diff --git a/CHANGES b/CHANGES index a745a467c3b..a1edfdac527 100644 --- a/CHANGES +++ b/CHANGES @@ -1,7 +1,7 @@ Apache CloudStack (Incubating) CHANGES ====================================== -Full release notes for each release are located in the project's documentation website: http://incubator.apache.org/cloudstack/docs +Full release notes for each release are located in the project's documentation website: http://cloudstack.apache.org/docs Version 4.0.0-incubating ------------------------ diff --git a/INSTALL.md b/INSTALL.md index a2137fdf69d..4f93900ddfe 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,7 +1,7 @@ This document describes how to develop, build, package and install Apache CloudStack (Incubating). For more information please refer to the project's website: - http://incubator.apache.org/cloudstack + http://cloudstack.apache.org Apache CloudStack developers use various platforms for development, this guide was tested against a CentOS 6.2 x86_64 setup. @@ -77,7 +77,7 @@ Start the MySQL service: You may get the source code from the repository hosted on Apache: - $ git clone https://git-wip-us.apache.org/repos/asf/incubator-cloudstack.git + $ git clone https://git-wip-us.apache.org/repos/asf/cloudstack.git Or, you may fork a repository from the official Apache CloudStack mirror by Apache on [Github](https://github.com/apache/incubator-cloudstack) diff --git a/LICENSE b/LICENSE index 00afdb41fd6..6b81a9140e9 100644 --- a/LICENSE +++ b/LICENSE @@ -370,7 +370,7 @@ Within the scripts/vm/hypervisor/xenserver directory from OpenStack, LLC http://www.openstack.org swift -Within the tools/appliance/definitions/systemvmtemplate and tools/appliance/definitions/systemvmtemplate64 directory +Within the tools/appliance/definitions/{devcloud,systemvmtemplate,systemvmtemplate64} directories licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) Copyright (c) 2010-2012 Patrick Debois diff --git a/README.md b/README.md index 7b4d973666e..7fb9b57c53f 100644 --- a/README.md +++ b/README.md @@ -17,12 +17,14 @@ Apache CloudStack offers three methods for managing cloud computing environments: an easy to use Web interface, command line tools, and a full-featured RESTful API. -Visit us at [cloudstack.org](http://incubator.apache.org/cloudstack). +Visit us at [Apache CloudStack](http://cloudstack.apache.org). ## Mailing lists -[Development Mailing List](mailto:cloudstack-dev-subscribe@incubator.apache.org) -[Users Mailing list](mailto:cloudstack-users-subscribe@incubator.apache.org) -[Commits mailing list](mailto:cloudstack-commits-subscribe@incubator.apache.org) +[Development Mailing List](mailto:dev-subscribe@cloudstack.apache.org) +[Users Mailing List](mailto:users-subscribe@cloudstack.apache.org) +[Commits Mailing List](mailto:commits-subscribe@cloudstack.apache.org) +[Issues Mailing List](mailto:issues-subscribe@cloudstack.apache.org) +[Marketing Mailing List](mailto:marketing-subscribe@cloudstack.apache.org) # License diff --git a/README.tools.md b/README.tools.md index f743c8927c6..069c3426921 100644 --- a/README.tools.md +++ b/README.tools.md @@ -61,7 +61,7 @@ Once installed per the Vagrant installation process, run: $ vagrant box add devcloud [path to devcloud.box] Then, either go into the devcloudbox folder of your checked out version of the -CloudStack code (incubator-cloudstack/tools/devcloud/devcloudbox), or copy the +CloudStack code (cloudstack/tools/devcloud/devcloudbox), or copy the contents of that folder to another location. Assuming the patched Vagrant installation is working, you then diff --git a/api/src/com/cloud/agent/api/routing/HealthCheckLBConfigAnswer.java b/api/src/com/cloud/agent/api/routing/HealthCheckLBConfigAnswer.java new file mode 100644 index 00000000000..dfca4ab5908 --- /dev/null +++ b/api/src/com/cloud/agent/api/routing/HealthCheckLBConfigAnswer.java @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.routing; + +import java.util.List; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.LoadBalancerTO; +import com.cloud.agent.api.to.NicTO; + +/** + * LoadBalancerConfigCommand sends the load balancer configuration + */ +public class HealthCheckLBConfigAnswer extends Answer { + List loadBalancers; + + protected HealthCheckLBConfigAnswer() { + } + + public HealthCheckLBConfigAnswer(List loadBalancers) { + this.loadBalancers = loadBalancers; + } + + public List getLoadBalancers() { + return loadBalancers; + } + +} diff --git a/api/src/com/cloud/agent/api/routing/HealthCheckLBConfigCommand.java b/api/src/com/cloud/agent/api/routing/HealthCheckLBConfigCommand.java new file mode 100644 index 00000000000..f705f6c9707 --- /dev/null +++ b/api/src/com/cloud/agent/api/routing/HealthCheckLBConfigCommand.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.routing; + +import com.cloud.agent.api.to.LoadBalancerTO; +import com.cloud.agent.api.to.NicTO; + +/** + * LoadBalancerConfigCommand sends the load balancer configuration + */ +public class HealthCheckLBConfigCommand extends NetworkElementCommand { + LoadBalancerTO[] loadBalancers; + + protected HealthCheckLBConfigCommand() { + } + + public HealthCheckLBConfigCommand(LoadBalancerTO[] loadBalancers) { + this.loadBalancers = loadBalancers; + } + + public LoadBalancerTO[] getLoadBalancers() { + return loadBalancers; + } + +} diff --git a/api/src/com/cloud/agent/api/to/LoadBalancerTO.java b/api/src/com/cloud/agent/api/to/LoadBalancerTO.java index 2d166ea1e1e..df2f8a87490 100644 --- a/api/src/com/cloud/agent/api/to/LoadBalancerTO.java +++ b/api/src/com/cloud/agent/api/to/LoadBalancerTO.java @@ -31,6 +31,7 @@ import com.cloud.network.lb.LoadBalancingRule.LbAutoScaleVmGroup; import com.cloud.network.lb.LoadBalancingRule.LbAutoScaleVmProfile; import com.cloud.network.lb.LoadBalancingRule.LbCondition; import com.cloud.network.lb.LoadBalancingRule.LbDestination; +import com.cloud.network.lb.LoadBalancingRule.LbHealthCheckPolicy; import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.utils.Pair; @@ -46,8 +47,10 @@ public class LoadBalancerTO { boolean inline; DestinationTO[] destinations; private StickinessPolicyTO[] stickinessPolicies; + private HealthCheckPolicyTO[] healthCheckPolicies; private AutoScaleVmGroupTO autoScaleVmGroupTO; final static int MAX_STICKINESS_POLICIES = 1; + final static int MAX_HEALTHCHECK_POLICIES = 1; public LoadBalancerTO(String uuid, String srcIp, int srcPort, String protocol, String algorithm, boolean revoked, boolean alreadyAdded, boolean inline, List destinations) { if (destinations == null) { // for autoscaleconfig destinations will be null; @@ -69,23 +72,52 @@ public class LoadBalancerTO { } } - public LoadBalancerTO(String id, String srcIp, int srcPort, String protocol, String algorithm, boolean revoked, boolean alreadyAdded, boolean inline, List arg_destinations, List stickinessPolicies) { + public LoadBalancerTO(String id, String srcIp, int srcPort, String protocol, String algorithm, boolean revoked, + boolean alreadyAdded, boolean inline, List arg_destinations, + List stickinessPolicies) { + + this(id, srcIp, srcPort, protocol, algorithm, revoked, alreadyAdded, inline, arg_destinations, + stickinessPolicies, null); + } + + public LoadBalancerTO(String id, String srcIp, int srcPort, String protocol, String algorithm, boolean revoked, + boolean alreadyAdded, boolean inline, List arg_destinations, + List stickinessPolicies, List healthCheckPolicies) { this(id, srcIp, srcPort, protocol, algorithm, revoked, alreadyAdded, inline, arg_destinations); this.stickinessPolicies = null; + this.healthCheckPolicies = null; if (stickinessPolicies != null && stickinessPolicies.size() > 0) { this.stickinessPolicies = new StickinessPolicyTO[MAX_STICKINESS_POLICIES]; int index = 0; for (LbStickinessPolicy stickinesspolicy : stickinessPolicies) { if (!stickinesspolicy.isRevoked()) { - this.stickinessPolicies[index] = new StickinessPolicyTO(stickinesspolicy.getMethodName(), stickinesspolicy.getParams()); + this.stickinessPolicies[index] = new StickinessPolicyTO(stickinesspolicy.getMethodName(), + stickinesspolicy.getParams()); index++; - if (index == MAX_STICKINESS_POLICIES) break; - } + if (index == MAX_STICKINESS_POLICIES) + break; } - if (index == 0) this.stickinessPolicies = null; } + if (index == 0) + this.stickinessPolicies = null; + } + + if (healthCheckPolicies != null && healthCheckPolicies.size() > 0) { + this.healthCheckPolicies = new HealthCheckPolicyTO[MAX_HEALTHCHECK_POLICIES]; + int index = 0; + for (LbHealthCheckPolicy hcp : healthCheckPolicies) { + this.healthCheckPolicies[0] = new HealthCheckPolicyTO(hcp.getpingpath(), hcp.getDescription(), + hcp.getResponseTime(), hcp.getHealthcheckInterval(), hcp.getHealthcheckThresshold(), + hcp.getUnhealthThresshold(), hcp.isRevoked()); + index++; + if (index == MAX_HEALTHCHECK_POLICIES) + break; } + if (index == 0) + this.healthCheckPolicies = null; + } + } protected LoadBalancerTO() { } @@ -126,6 +158,10 @@ public class LoadBalancerTO { return stickinessPolicies; } + public HealthCheckPolicyTO[] getHealthCheckPolicies() { + return healthCheckPolicies; + } + public DestinationTO[] getDestinations() { return destinations; } @@ -158,6 +194,65 @@ public class LoadBalancerTO { this._methodName = methodName; this._paramsList = paramsList; } + } + + public static class HealthCheckPolicyTO { + private String pingPath; + private String description; + private int responseTime; + private int healthcheckInterval; + private int healthcheckThresshold; + private int unhealthThresshold; + private boolean revoke = false; + + public HealthCheckPolicyTO(String pingPath, String description, int responseTime, int healthcheckInterval, + int healthcheckThresshold, int unhealthThresshold, boolean revoke) { + + this.description = description; + this.pingPath = pingPath; + this.responseTime = responseTime; + this.healthcheckInterval = healthcheckInterval; + this.healthcheckThresshold = healthcheckThresshold; + this.unhealthThresshold = unhealthThresshold; + this.revoke = revoke; + } + + public HealthCheckPolicyTO() { + + } + + public String getpingPath() { + return pingPath; + } + + public String getDescription() { + return description; + } + + public int getResponseTime() { + return responseTime; + } + + public int getHealthcheckInterval() { + return healthcheckInterval; + } + + public int getHealthcheckThresshold() { + return healthcheckThresshold; + } + + public int getUnhealthThresshold() { + return unhealthThresshold; + } + + public void setRevoke(boolean revoke) { + this.revoke = revoke; + } + + public boolean isRevoked() { + return revoke; + } + } public static class DestinationTO { @@ -165,6 +260,7 @@ public class LoadBalancerTO { int destPort; boolean revoked; boolean alreadyAdded; + String monitorState; public DestinationTO(String destIp, int destPort, boolean revoked, boolean alreadyAdded) { this.destIp = destIp; this.destPort = destPort; @@ -190,6 +286,14 @@ public class LoadBalancerTO { public boolean isAlreadyAdded() { return alreadyAdded; } + + public void setMonitorState(String state) { + this.monitorState = state; + } + + public String getMonitorState() { + return monitorState; + } } public static class CounterTO implements Serializable { private final String name; diff --git a/api/src/com/cloud/event/EventTypes.java b/api/src/com/cloud/event/EventTypes.java index c8cc841ffaf..b8200d8c2f1 100755 --- a/api/src/com/cloud/event/EventTypes.java +++ b/api/src/com/cloud/event/EventTypes.java @@ -111,6 +111,8 @@ public class EventTypes { public static final String EVENT_LOAD_BALANCER_DELETE = "LB.DELETE"; public static final String EVENT_LB_STICKINESSPOLICY_CREATE = "LB.STICKINESSPOLICY.CREATE"; public static final String EVENT_LB_STICKINESSPOLICY_DELETE = "LB.STICKINESSPOLICY.DELETE"; + public static final String EVENT_LB_HEALTHCHECKPOLICY_CREATE = "LB.HEALTHCHECKPOLICY.CREATE"; + public static final String EVENT_LB_HEALTHCHECKPOLICY_DELETE = "LB.HEALTHCHECKPOLICY.DELETE"; public static final String EVENT_LOAD_BALANCER_UPDATE = "LB.UPDATE"; // Account events diff --git a/api/src/com/cloud/network/Network.java b/api/src/com/cloud/network/Network.java index efed5cd4f8b..c2ab655b103 100644 --- a/api/src/com/cloud/network/Network.java +++ b/api/src/com/cloud/network/Network.java @@ -188,6 +188,7 @@ public interface Network extends ControlledEntity, StateObject, I public static final Capability InlineMode = new Capability("InlineMode"); public static final Capability SupportedTrafficDirection = new Capability("SupportedTrafficDirection"); public static final Capability SupportedEgressProtocols = new Capability("SupportedEgressProtocols"); + public static final Capability HealthCheckPolicy = new Capability("HealthCheckPolicy"); private String name; @@ -235,6 +236,8 @@ public interface Network extends ControlledEntity, StateObject, I s_fsm.addTransition(State.Implemented, Event.DestroyNetwork, State.Shutdown); s_fsm.addTransition(State.Shutdown, Event.OperationSucceeded, State.Allocated); s_fsm.addTransition(State.Shutdown, Event.OperationFailed, State.Implemented); + s_fsm.addTransition(State.Setup, Event.DestroyNetwork, State.Destroy); + s_fsm.addTransition(State.Allocated, Event.DestroyNetwork, State.Destroy); } public static StateMachine2 getStateMachine() { diff --git a/api/src/com/cloud/network/NetworkModel.java b/api/src/com/cloud/network/NetworkModel.java index 60e1f7fd164..916f28a00f2 100644 --- a/api/src/com/cloud/network/NetworkModel.java +++ b/api/src/com/cloud/network/NetworkModel.java @@ -259,4 +259,6 @@ public interface NetworkModel { void checkRequestedIpAddresses(long networkId, String ip4, String ip6) throws InvalidParameterValueException; String getStartIpv6Address(long id); + + Nic getPlaceholderNic(Network network, Long podId); } \ No newline at end of file diff --git a/api/src/com/cloud/network/element/LoadBalancingServiceProvider.java b/api/src/com/cloud/network/element/LoadBalancingServiceProvider.java index 879ea0ed663..cb3155f9c05 100644 --- a/api/src/com/cloud/network/element/LoadBalancingServiceProvider.java +++ b/api/src/com/cloud/network/element/LoadBalancingServiceProvider.java @@ -18,6 +18,7 @@ package com.cloud.network.element; import java.util.List; +import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.Network; import com.cloud.network.lb.LoadBalancingRule; @@ -25,6 +26,7 @@ import com.cloud.network.lb.LoadBalancingRule; public interface LoadBalancingServiceProvider extends NetworkElement, IpDeployingRequester { /** * Apply rules + * * @param network * @param rules * @return @@ -34,10 +36,14 @@ public interface LoadBalancingServiceProvider extends NetworkElement, IpDeployin /** * Validate rules + * * @param network * @param rule - * @return true/false. true should be return if there are no validations. false should be return if any oneof the validation fails. + * @return true/false. true should be return if there are no validations. + *false should be return if any oneof the validation fails. * @throws */ boolean validateLBRule(Network network, LoadBalancingRule rule); + + List updateHealthChecks(Network network, List lbrules); } diff --git a/api/src/com/cloud/network/lb/LoadBalancingRule.java b/api/src/com/cloud/network/lb/LoadBalancingRule.java index fb1d988a4de..3e11e8c7c2c 100644 --- a/api/src/com/cloud/network/lb/LoadBalancingRule.java +++ b/api/src/com/cloud/network/lb/LoadBalancingRule.java @@ -32,11 +32,14 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { private List destinations; private List stickinessPolicies; private LbAutoScaleVmGroup autoScaleVmGroup; + private List healthCheckPolicies; - public LoadBalancingRule(LoadBalancer lb, List destinations, List stickinessPolicies) { + public LoadBalancingRule(LoadBalancer lb, List destinations, + List stickinessPolicies, List healthCheckPolicies) { this.lb = lb; this.destinations = destinations; this.stickinessPolicies = stickinessPolicies; + this.healthCheckPolicies = healthCheckPolicies; } @Override @@ -128,6 +131,10 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { return lb; } + public void setDestinations(List destinations) { + this.destinations = destinations; + } + public List getDestinations() { return destinations; } @@ -136,11 +143,21 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { return stickinessPolicies; } + public void setHealthCheckPolicies(List healthCheckPolicies) { + this.healthCheckPolicies = healthCheckPolicies; + } + + public List getHealthCheckPolicies() { + return healthCheckPolicies; + } public interface Destination { String getIpAddress(); + int getDestinationPortStart(); + int getDestinationPortEnd(); + boolean isRevoked(); } @@ -174,6 +191,64 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { } } + public static class LbHealthCheckPolicy { + private String pingpath; + private String description; + private int responseTime; + private int healthcheckInterval; + private int healthcheckThresshold; + private int unhealthThresshold; + private boolean _revoke; + + public LbHealthCheckPolicy(String pingpath, String description, int responseTime, int healthcheckInterval, + int healthcheckThresshold, int unhealthThresshold) { + this(pingpath, description, responseTime, healthcheckInterval, healthcheckThresshold, unhealthThresshold, false); + } + + public LbHealthCheckPolicy(String pingpath, String description, int responseTime, int healthcheckInterval, + int healthcheckThresshold, int unhealthThresshold, boolean revoke) { + this.pingpath = pingpath; + this.description = description; + this.responseTime = responseTime; + this.healthcheckInterval = healthcheckInterval; + this.healthcheckThresshold = healthcheckThresshold; + this.unhealthThresshold = unhealthThresshold; + this._revoke = revoke; + } + + public LbHealthCheckPolicy() { + } + + public String getpingpath() { + return pingpath; + } + + public String getDescription() { + return description; + } + + public int getResponseTime() { + return responseTime; + } + + public int getHealthcheckInterval() { + return healthcheckInterval; + } + + public int getHealthcheckThresshold() { + return healthcheckThresshold; + } + + public int getUnhealthThresshold() { + return unhealthThresshold; + } + + public boolean isRevoked() { + return _revoke; + } + + } + public static class LbDestination implements Destination { private int portStart; private int portEnd; @@ -191,10 +266,12 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { public String getIpAddress() { return ip; } + @Override public int getDestinationPortStart() { return portStart; } + @Override public int getDestinationPortEnd() { return portEnd; @@ -230,15 +307,16 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { return null; } - @Override public TrafficType getTrafficType() { return null; } + @Override public FirewallRuleType getType() { return FirewallRuleType.User; } + public LbAutoScaleVmGroup getAutoScaleVmGroup() { return autoScaleVmGroup; } @@ -274,8 +352,7 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { private final AutoScalePolicy policy; private boolean revoked; - public LbAutoScalePolicy(AutoScalePolicy policy, List conditions) - { + public LbAutoScalePolicy(AutoScalePolicy policy, List conditions) { this.policy = policy; this.conditions = conditions; } @@ -309,7 +386,9 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { private final String networkId; private final String vmName; - public LbAutoScaleVmProfile(AutoScaleVmProfile profile, String autoScaleUserApiKey, String autoScaleUserSecretKey, String csUrl, String zoneId, String domainId, String serviceOfferingId, String templateId, String vmName, String networkId) { + public LbAutoScaleVmProfile(AutoScaleVmProfile profile, String autoScaleUserApiKey, + String autoScaleUserSecretKey, String csUrl, String zoneId, String domainId, String serviceOfferingId, + String templateId, String vmName, String networkId) { this.profile = profile; this.autoScaleUserApiKey = autoScaleUserApiKey; this.autoScaleUserSecretKey = autoScaleUserSecretKey; @@ -369,7 +448,8 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { private final LbAutoScaleVmProfile profile; private final String currentState; - public LbAutoScaleVmGroup(AutoScaleVmGroup vmGroup, List policies, LbAutoScaleVmProfile profile, String currentState) { + public LbAutoScaleVmGroup(AutoScaleVmGroup vmGroup, List policies, + LbAutoScaleVmProfile profile, String currentState) { this.vmGroup = vmGroup; this.policies = policies; this.profile = profile; diff --git a/api/src/com/cloud/network/lb/LoadBalancingRulesService.java b/api/src/com/cloud/network/lb/LoadBalancingRulesService.java index 3743aae4bf8..ed39bedaa6f 100644 --- a/api/src/com/cloud/network/lb/LoadBalancingRulesService.java +++ b/api/src/com/cloud/network/lb/LoadBalancingRulesService.java @@ -18,8 +18,10 @@ package com.cloud.network.lb; import java.util.List; +import org.apache.cloudstack.api.command.user.loadbalancer.CreateLBHealthCheckPolicyCmd; import org.apache.cloudstack.api.command.user.loadbalancer.CreateLBStickinessPolicyCmd; import org.apache.cloudstack.api.command.user.loadbalancer.CreateLoadBalancerRuleCmd; +import org.apache.cloudstack.api.command.user.loadbalancer.ListLBHealthCheckPoliciesCmd; import org.apache.cloudstack.api.command.user.loadbalancer.ListLBStickinessPoliciesCmd; import org.apache.cloudstack.api.command.user.loadbalancer.ListLoadBalancerRuleInstancesCmd; import org.apache.cloudstack.api.command.user.loadbalancer.ListLoadBalancerRulesCmd; @@ -28,6 +30,8 @@ import org.apache.cloudstack.api.command.user.loadbalancer.UpdateLoadBalancerRul import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; +import com.cloud.network.rules.HealthCheckPolicy; import com.cloud.network.rules.LoadBalancer; import com.cloud.network.rules.StickinessPolicy; import com.cloud.uservm.UserVm; @@ -66,6 +70,22 @@ public interface LoadBalancingRulesService { public boolean applyLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd) throws ResourceUnavailableException; boolean deleteLBStickinessPolicy(long stickinessPolicyId, boolean apply); + + /** + * Create a healthcheck policy to a load balancer from the given healthcheck + * parameters in (name,value) pairs. + * + * @param cmd + * the command specifying the stickiness method name, params + * (name,value pairs), policy name and description. + * @return the newly created stickiness policy if successfull, null + * otherwise + * @thows NetworkRuleConflictException + */ + public HealthCheckPolicy createLBHealthCheckPolicy(CreateLBHealthCheckPolicyCmd cmd); + public boolean applyLBHealthCheckPolicy(CreateLBHealthCheckPolicyCmd cmd) throws ResourceUnavailableException; + boolean deleteLBHealthCheckPolicy(long healthCheckPolicyId, boolean apply); + /** * Assign a virtual machine, or list of virtual machines, to a load balancer. */ @@ -104,8 +124,18 @@ public interface LoadBalancingRulesService { */ List searchForLBStickinessPolicies(ListLBStickinessPoliciesCmd cmd); + /** + * List healthcheck policies based on the given criteria + * + * @param cmd + * the command specifies the load balancing rule id. + * @return list of healthcheck policies that match the criteria. + */ + + List searchForLBHealthCheckPolicies(ListLBHealthCheckPoliciesCmd cmd); + List listByNetworkId(long networkId); LoadBalancer findById(long LoadBalancer); - + public void updateLBHealthChecks() throws ResourceUnavailableException; } diff --git a/api/src/com/cloud/network/rules/HealthCheckPolicy.java b/api/src/com/cloud/network/rules/HealthCheckPolicy.java new file mode 100644 index 00000000000..96bb28204a2 --- /dev/null +++ b/api/src/com/cloud/network/rules/HealthCheckPolicy.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.rules; + +import java.util.List; + +import com.cloud.utils.Pair; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +/** + */ +public interface HealthCheckPolicy extends InternalIdentity, Identity { + + public long getLoadBalancerId(); + + public String getpingpath(); + + public String getDescription(); + + public int getResponseTime(); + + public int getHealthcheckInterval(); + + public int getHealthcheckThresshold(); + + public int getUnhealthThresshold(); + + public boolean isRevoke(); + +} diff --git a/api/src/com/cloud/storage/DataStoreProviderApiService.java b/api/src/com/cloud/storage/DataStoreProviderApiService.java new file mode 100644 index 00000000000..f81a9960be1 --- /dev/null +++ b/api/src/com/cloud/storage/DataStoreProviderApiService.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +import java.util.List; + +import org.apache.cloudstack.api.response.StorageProviderResponse; + +public interface DataStoreProviderApiService { + public List getDataStoreProviders(String type); + +} diff --git a/api/src/com/cloud/storage/StoragePool.java b/api/src/com/cloud/storage/StoragePool.java index 091eef182cc..8b95383c537 100644 --- a/api/src/com/cloud/storage/StoragePool.java +++ b/api/src/com/cloud/storage/StoragePool.java @@ -99,7 +99,7 @@ public interface StoragePool extends Identity, InternalIdentity { /** * @return */ - Long getStorageProviderId(); + String getStorageProviderName(); boolean isInMaintenance(); } diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index b40b26ce57c..f4c6c527d1a 100755 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -460,6 +460,11 @@ public class ApiConstants { public static final String UCS_BLADE_ID = "bladeid"; public static final String VM_GUEST_IP = "vmguestip"; public static final String OLDER_THAN = "olderthan"; + public static final String HEALTHCHECK_RESPONSE_TIMEOUT = "responsetimeout"; + public static final String HEALTHCHECK_INTERVAL_TIME = "intervaltime"; + public static final String HEALTHCHECK_HEALTHY_THRESHOLD = "healthythreshold"; + public static final String HEALTHCHECK_UNHEALTHY_THRESHOLD = "unhealthythreshold"; + public static final String HEALTHCHECK_PINGPATH = "pingpath"; public enum HostDetails { all, capacity, events, stats, min; diff --git a/api/src/org/apache/cloudstack/api/BaseCmd.java b/api/src/org/apache/cloudstack/api/BaseCmd.java index 816b6deed77..8b7f43fd104 100644 --- a/api/src/org/apache/cloudstack/api/BaseCmd.java +++ b/api/src/org/apache/cloudstack/api/BaseCmd.java @@ -61,6 +61,7 @@ import com.cloud.projects.ProjectService; import com.cloud.resource.ResourceService; import com.cloud.server.ManagementService; import com.cloud.server.TaggedResourceService; +import com.cloud.storage.DataStoreProviderApiService; import com.cloud.storage.StorageService; import com.cloud.storage.VolumeApiService; import com.cloud.storage.snapshot.SnapshotService; @@ -131,6 +132,7 @@ public abstract class BaseCmd { @Inject public UsageService _usageService; @Inject public NetworkUsageService _networkUsageService; @Inject public VMSnapshotService _vmSnapshotService; + @Inject public DataStoreProviderApiService dataStoreProviderApiService; public abstract void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException; diff --git a/api/src/org/apache/cloudstack/api/ResponseGenerator.java b/api/src/org/apache/cloudstack/api/ResponseGenerator.java index a6025149846..628a185e93d 100644 --- a/api/src/org/apache/cloudstack/api/ResponseGenerator.java +++ b/api/src/org/apache/cloudstack/api/ResponseGenerator.java @@ -47,6 +47,7 @@ import org.apache.cloudstack.api.response.HypervisorCapabilitiesResponse; import org.apache.cloudstack.api.response.IPAddressResponse; import org.apache.cloudstack.api.response.InstanceGroupResponse; import org.apache.cloudstack.api.response.IpForwardingRuleResponse; +import org.apache.cloudstack.api.response.LBHealthCheckResponse; import org.apache.cloudstack.api.response.LBStickinessResponse; import org.apache.cloudstack.api.response.LDAPConfigResponse; import org.apache.cloudstack.api.response.LoadBalancerResponse; @@ -133,6 +134,7 @@ import com.cloud.network.as.Condition; import com.cloud.network.as.Counter; import com.cloud.network.router.VirtualRouter; import com.cloud.network.rules.FirewallRule; +import com.cloud.network.rules.HealthCheckPolicy; import com.cloud.network.rules.LoadBalancer; import com.cloud.network.rules.PortForwardingRule; import com.cloud.network.rules.StaticNatRule; @@ -213,6 +215,11 @@ public interface ResponseGenerator { LBStickinessResponse createLBStickinessPolicyResponse(StickinessPolicy stickinessPolicy, LoadBalancer lb); + LBHealthCheckResponse createLBHealthCheckPolicyResponse(List healthcheckPolicies, + LoadBalancer lb); + + LBHealthCheckResponse createLBHealthCheckPolicyResponse(HealthCheckPolicy healthcheckPolicy, LoadBalancer lb); + PodResponse createPodResponse(Pod pod, Boolean showCapacities); ZoneResponse createZoneResponse(DataCenter dataCenter, Boolean showCapacities); diff --git a/api/src/org/apache/cloudstack/api/command/admin/region/RemoveRegionCmd.java b/api/src/org/apache/cloudstack/api/command/admin/region/RemoveRegionCmd.java index 79c34d0690f..d2b696d2b6b 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/region/RemoveRegionCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/region/RemoveRegionCmd.java @@ -33,7 +33,7 @@ import com.cloud.user.Account; @APICommand(name = "removeRegion", description="Removes specified region", responseObject=SuccessResponse.class) public class RemoveRegionCmd extends BaseCmd { public static final Logger s_logger = Logger.getLogger(RemoveRegionCmd.class.getName()); - private static final String s_name = "updateregionresponse"; + private static final String s_name = "removeregionresponse"; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java index b86784ed0b0..5178d685889 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java @@ -73,8 +73,8 @@ public class CreateStoragePoolCmd extends BaseCmd { private Long zoneId; @Parameter(name=ApiConstants.PROVIDER, type=CommandType.STRING, - required=false, description="the storage provider uuid") - private String storageProviderUuid; + required=false, description="the storage provider name") + private String storageProviderName; @Parameter(name=ApiConstants.SCOPE, type=CommandType.STRING, required=false, description="the scope of the storage: cluster or zone") @@ -112,8 +112,8 @@ public class CreateStoragePoolCmd extends BaseCmd { return zoneId; } - public String getStorageProviderUuid() { - return this.storageProviderUuid; + public String getStorageProviderName() { + return this.storageProviderName; } public String getScope() { diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/ListStorageProvidersCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/ListStorageProvidersCmd.java new file mode 100644 index 00000000000..0dfc6633c6f --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/admin/storage/ListStorageProvidersCmd.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.api.command.admin.storage; + +import java.util.List; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.StorageProviderResponse; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; + +@APICommand(name = "listStorageProviders", description="Lists storage providers.", responseObject=StorageProviderResponse.class) +public class ListStorageProvidersCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(ListStorageProvidersCmd.class.getName()); + private static final String s_name = "liststorageprovidersresponse"; + + @Parameter(name=ApiConstants.TYPE, type=CommandType.STRING, description="the type of storage provider: either primary or image", required = true) + private String type; + + @Override + public String getCommandName() { + return s_name; + } + + public String getType() { + return this.type; + } + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, + NetworkRuleConflictException { + if (getType() == null) { + throw new ServerApiException(ApiErrorCode.MALFORMED_PARAMETER_ERROR, "need to specify type: either primary or image"); + } + + List providers = this.dataStoreProviderApiService.getDataStoreProviders(getType()); + ListResponse responses = new ListResponse(); + for (StorageProviderResponse provider : providers) { + provider.setObjectName("dataStoreProvider"); + } + responses.setResponses(providers); + responses.setResponseName(this.getCommandName()); + this.setResponseObject(responses); + } +} diff --git a/api/src/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java b/api/src/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java index 8a75c66531c..152dd4e14c2 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java @@ -35,7 +35,7 @@ import org.apache.log4j.Logger; import com.cloud.user.Account; import com.cloud.uservm.UserVm; -@APICommand(name = "assignVirtualMachine", description="Move a user VM to another user under same domain.", responseObject=UserVmResponse.class, since="3.0.0") +@APICommand(name = "assignVirtualMachine", description="Assign a VM from one account to another under the same domain. This API is available for Basic zones with security groups and Advance zones with guest networks. The VM is restricted to move between accounts under same domain.", responseObject=UserVmResponse.class, since="3.0.0") public class AssignVMCmd extends BaseCmd { public static final Logger s_logger = Logger.getLogger(AssignVMCmd.class.getName()); @@ -46,7 +46,7 @@ public class AssignVMCmd extends BaseCmd { ///////////////////////////////////////////////////// @Parameter(name=ApiConstants.VIRTUAL_MACHINE_ID, type=CommandType.UUID, entityType=UserVmResponse.class, - required=true, description="the vm ID of the user VM to be moved") + required=true, description="id of the VM to be moved") private Long virtualMachineId; @Parameter(name=ApiConstants.ACCOUNT, type=CommandType.STRING, required=true, description="account name of the new VM owner.") @@ -58,11 +58,11 @@ public class AssignVMCmd extends BaseCmd { //Network information @Parameter(name=ApiConstants.NETWORK_IDS, type=CommandType.LIST, collectionType=CommandType.UUID, entityType=NetworkResponse.class, - description="list of network ids that will be part of VM network after move in advanced network setting.") + description="list of new network ids in which the moved VM will participate. In case no network ids are provided the VM will be part of the default network for that zone. In case there is no network yet created for the new account the default network will be created.") private List networkIds; @Parameter(name=ApiConstants.SECURITY_GROUP_IDS, type=CommandType.LIST, collectionType=CommandType.UUID, entityType=SecurityGroupResponse.class, - description="comma separated list of security groups id that going to be applied to the virtual machine. Should be passed only when vm is moved in a zone with Basic Network support.") + description="list of security group ids to be applied on the virtual machine. In case no security groups are provided the VM is part of the default security group.") private List securityGroupIdList; ///////////////////////////////////////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBHealthCheckPolicyCmd.java b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBHealthCheckPolicyCmd.java new file mode 100644 index 00000000000..ac0ec3a9dab --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBHealthCheckPolicyCmd.java @@ -0,0 +1,168 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.loadbalancer; + + +import org.apache.cloudstack.api.response.FirewallRuleResponse; +import org.apache.log4j.Logger; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCreateCmd; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.rules.HealthCheckPolicy; +import org.apache.cloudstack.api.response.LBHealthCheckResponse; +import com.cloud.network.rules.LoadBalancer; +import com.cloud.user.Account; +import com.cloud.user.UserContext; + + +@APICommand(name = "createLBHealthCheckPolicy", description = "Creates a Load Balancer healthcheck policy ", responseObject = LBHealthCheckResponse.class, since="4.2.0") +@SuppressWarnings("rawtypes") +public class CreateLBHealthCheckPolicyCmd extends BaseAsyncCreateCmd { + public static final Logger s_logger = Logger + .getLogger(CreateLBHealthCheckPolicyCmd.class.getName()); + + private static final String s_name = "createlbhealthcheckpolicyresponse"; + + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.LBID, type = CommandType.UUID, entityType = FirewallRuleResponse.class, required = true, description = "the ID of the load balancer rule") + private Long lbRuleId; + + @Parameter(name = ApiConstants.DESCRIPTION, type = CommandType.STRING, description = "the description of the load balancer HealthCheck policy") + private String description; + + @Parameter(name = ApiConstants.HEALTHCHECK_PINGPATH, type = CommandType.STRING, required = false, description = "HTTP Ping Path") + private String pingPath; + + @Parameter(name = ApiConstants.HEALTHCHECK_RESPONSE_TIMEOUT, type = CommandType.INTEGER, required = false, description = "Time to wait when receiving a response from the health check (2sec - 60 sec)") + private int responsTimeOut; + + @Parameter(name = ApiConstants.HEALTHCHECK_INTERVAL_TIME, type = CommandType.INTEGER, required = false, description = "Amount of time between health checks (1 sec - 20940 sec)") + private int healthCheckInterval; + + @Parameter(name = ApiConstants.HEALTHCHECK_HEALTHY_THRESHOLD, type = CommandType.INTEGER, required = false, description = "Number of consecutive health check success before declaring an instance healthy") + private int healthyThreshold; + + @Parameter(name = ApiConstants.HEALTHCHECK_UNHEALTHY_THRESHOLD, type = CommandType.INTEGER, required = false, description = "Number of consecutive health check failures before declaring an instance unhealthy") + private int unhealthyThreshold; + + // /////////////////////////////////////////////////// + // ///////////////// Accessors /////////////////////// + // /////////////////////////////////////////////////// + + public Long getLbRuleId() { + return lbRuleId; + } + + public String getDescription() { + return description; + } + + public String getPingPath() { + return pingPath; + } + + // /////////////////////////////////////////////////// + // ///////////// API Implementation/////////////////// + // /////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + Account account = UserContext.current().getCaller(); + if (account != null) { + return account.getId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } + + public int getResponsTimeOut() { + return responsTimeOut; + } + + public int getHealthCheckInterval() { + return healthCheckInterval; + } + + public int getHealthyThreshold() { + return healthyThreshold; + } + + public int getUnhealthyThreshold() { + return unhealthyThreshold; + } + + @Override + public void execute() throws ResourceAllocationException, ResourceUnavailableException { + HealthCheckPolicy policy = null; + boolean success = false; + + try { + UserContext.current().setEventDetails("Load balancer healthcheck policy Id : " + getEntityId()); + success = _lbService.applyLBHealthCheckPolicy(this); + if (success) { + // State might be different after the rule is applied, so get new object here + policy = _entityMgr.findById(HealthCheckPolicy.class, getEntityId()); + LoadBalancer lb = _lbService.findById(policy.getLoadBalancerId()); + LBHealthCheckResponse hcResponse = _responseGenerator.createLBHealthCheckPolicyResponse(policy, lb); + setResponseObject(hcResponse); + hcResponse.setResponseName(getCommandName()); + } + } finally { + if (!success || (policy == null)) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create healthcheck policy "); + } + } + } + + @Override + public void create() { + try { + HealthCheckPolicy result = _lbService.createLBHealthCheckPolicy(this); + this.setEntityId(result.getId()); + this.setEntityUuid(result.getUuid()); + } catch (InvalidParameterValueException e) { + s_logger.warn("Exception: ", e); + throw new ServerApiException(ApiErrorCode.MALFORMED_PARAMETER_ERROR , e.getMessage()); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_LB_HEALTHCHECKPOLICY_CREATE; + } + + @Override + public String getEventDescription() { + return "Create Load Balancer HealthCheck policy"; + } +} diff --git a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBHealthCheckPolicyCmd.java b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBHealthCheckPolicyCmd.java new file mode 100644 index 00000000000..bf91da51d68 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBHealthCheckPolicyCmd.java @@ -0,0 +1,116 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.loadbalancer; + +import org.apache.cloudstack.api.response.LBHealthCheckResponse; +import org.apache.log4j.Logger; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.SuccessResponse; +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.network.rules.HealthCheckPolicy; +import com.cloud.network.rules.LoadBalancer; +import com.cloud.user.Account; +import com.cloud.user.UserContext; + +@APICommand(name = "deleteLBHealthCheckPolicy", description = "Deletes a load balancer HealthCheck policy.", responseObject = SuccessResponse.class, since="4.2.0") +public class DeleteLBHealthCheckPolicyCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(DeleteLBHealthCheckPolicyCmd.class.getName()); + private static final String s_name = "deletelbhealthcheckpolicyresponse"; + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = LBHealthCheckResponse.class, + required = true, description = "the ID of the load balancer HealthCheck policy") + private Long id; + + // /////////////////////////////////////////////////// + // ///////////////// Accessors /////////////////////// + // /////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + // /////////////////////////////////////////////////// + // ///////////// API Implementation/////////////////// + // /////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + Account account = UserContext.current().getCaller(); + if (account != null) { + return account.getId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } + + @Override + public String getEventType() { + return EventTypes.EVENT_LB_HEALTHCHECKPOLICY_DELETE; + } + + @Override + public String getEventDescription() { + return "deleting load balancer HealthCheck policy: " + getId(); + } + + @Override + public void execute() { + UserContext.current().setEventDetails("Load balancer healthcheck policy Id: " + getId()); + boolean result = _lbService.deleteLBHealthCheckPolicy(getId() , true); + + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete load balancer healthcheck policy"); + } + } + + @Override + public String getSyncObjType() { + return BaseAsyncCmd.networkSyncObject; + } + + @Override + public Long getSyncObjId() { + HealthCheckPolicy policy = _entityMgr.findById(HealthCheckPolicy.class, + getId()); + if (policy == null) { + throw new InvalidParameterValueException("Unable to find load balancer healthcheck rule: " + id); + } + LoadBalancer lb = _lbService.findById(policy.getLoadBalancerId()); + if (lb == null) { + throw new InvalidParameterValueException("Unable to find load balancer rule for healthcheck rule: " + id); + } + return lb.getNetworkId(); + } +} diff --git a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java new file mode 100644 index 00000000000..cf5ea3238b8 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.loadbalancer; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.response.FirewallRuleResponse; +import org.apache.log4j.Logger; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.LBHealthCheckResponse; +import org.apache.cloudstack.api.response.LBStickinessResponse; +import org.apache.cloudstack.api.response.ListResponse; + +import com.cloud.network.rules.HealthCheckPolicy; +import com.cloud.network.rules.LoadBalancer; +import com.cloud.user.Account; +import com.cloud.user.UserContext; + +@APICommand(name = "listLBHealthCheckPolicies", description = "Lists load balancer HealthCheck policies.", responseObject = LBHealthCheckResponse.class, since="4.2.0") +public class ListLBHealthCheckPoliciesCmd extends BaseListCmd { + public static final Logger s_logger = Logger + .getLogger(ListLBHealthCheckPoliciesCmd.class.getName()); + + private static final String s_name = "listlbhealthcheckpoliciesresponse"; + + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + @Parameter(name = ApiConstants.LBID, type = CommandType.UUID, entityType = FirewallRuleResponse.class, + required = true, description = "the ID of the load balancer rule") + private Long lbRuleId; + + // /////////////////////////////////////////////////// + // ///////////////// Accessors /////////////////////// + // /////////////////////////////////////////////////// + public Long getLbRuleId() { + return lbRuleId; + } + + // /////////////////////////////////////////////////// + // ///////////// API Implementation/////////////////// + // /////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public void execute() { + List hcpResponses = new ArrayList(); + LoadBalancer lb = _lbService.findById(getLbRuleId()); + ListResponse response = new ListResponse(); + + if (lb != null) { + List healthCheckPolicies = _lbService.searchForLBHealthCheckPolicies(this); + LBHealthCheckResponse spResponse = _responseGenerator.createLBHealthCheckPolicyResponse(healthCheckPolicies, lb); + hcpResponses.add(spResponse); + response.setResponses(hcpResponses); + } + + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } + +} diff --git a/api/src/org/apache/cloudstack/api/response/LBHealthCheckPolicyResponse.java b/api/src/org/apache/cloudstack/api/response/LBHealthCheckPolicyResponse.java new file mode 100644 index 00000000000..5dd123c03a3 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/response/LBHealthCheckPolicyResponse.java @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import com.cloud.network.rules.HealthCheckPolicy; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.BaseResponse; + +public class LBHealthCheckPolicyResponse extends BaseResponse { + @SerializedName("id") + @Param(description = "the LB HealthCheck policy ID") + private String id; + + @SerializedName("pingpath") + @Param(description = "the pingpath of the healthcheck policy") + private String pingpath; + + @SerializedName("description") + @Param(description = "the description of the healthcheck policy") + private String description; + + @SerializedName("state") + @Param(description = "the state of the policy") + private String state; + + @SerializedName("responsetime") + @Param(description = "Time to wait when receiving a response from the health check") + private int responseTime; + + @SerializedName("healthcheckinterval") + @Param(description = "Amount of time between health checks") + private int healthcheckInterval; + + @SerializedName("healthcheckthresshold") + @Param(description = "Number of consecutive health check success before declaring an instance healthy") + private int healthcheckthresshold; + + @SerializedName("unhealthcheckthresshold") + @Param(description = "Number of consecutive health check failures before declaring an instance unhealthy.") + private int unhealthcheckthresshold; + + public void setId(String id) { + this.id = id; + } + + public String getpingpath() { + return pingpath; + } + + public void setpingpath(String pingpath) { + this.pingpath = pingpath; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } + + public LBHealthCheckPolicyResponse(HealthCheckPolicy healthcheckpolicy) { + if (healthcheckpolicy.isRevoke()) { + this.setState("Revoked"); + } + if (healthcheckpolicy.getUuid() != null) + setId(healthcheckpolicy.getUuid()); + this.pingpath = healthcheckpolicy.getpingpath(); + this.healthcheckInterval = healthcheckpolicy.getHealthcheckInterval(); + this.responseTime = healthcheckpolicy.getResponseTime(); + this.healthcheckthresshold = healthcheckpolicy.getHealthcheckThresshold(); + this.unhealthcheckthresshold = healthcheckpolicy.getUnhealthThresshold(); + setObjectName("healthcheckpolicy"); + } +} diff --git a/api/src/org/apache/cloudstack/api/response/LBHealthCheckResponse.java b/api/src/org/apache/cloudstack/api/response/LBHealthCheckResponse.java new file mode 100644 index 00000000000..182013ffef0 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/response/LBHealthCheckResponse.java @@ -0,0 +1,102 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import com.cloud.network.rules.HealthCheckPolicy; +import org.apache.cloudstack.api.ApiConstants; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import java.util.List; +import java.util.UUID; + +@EntityReference(value=HealthCheckPolicy.class) +public class LBHealthCheckResponse extends BaseResponse { +@SerializedName("lbruleid") + @Param(description = "the LB rule ID") + private String lbRuleId; + + + @SerializedName("account") + @Param(description = "the account of the HealthCheck policy") + private String accountName; + + @SerializedName(ApiConstants.DOMAIN_ID) + @Param(description = "the domain ID of the HealthCheck policy") + private String domainId; + + @SerializedName("domain") + @Param(description = "the domain of the HealthCheck policy") + private String domainName; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "the id of the zone the HealthCheck policy belongs to") + private String zoneId; + + @SerializedName("healthcheckpolicy") + @Param(description = "the list of healthcheckpolicies", responseObject = LBHealthCheckPolicyResponse.class) + private List healthCheckPolicies; + + public void setlbRuleId(String lbRuleId) { + this.lbRuleId = lbRuleId; + } + + public void setRules(List policies) { + this.healthCheckPolicies = policies; + } + + public List getHealthCheckPolicies() { + return healthCheckPolicies; + } + + public void setHealthCheckPolicies(List healthCheckPolicies) { + this.healthCheckPolicies = healthCheckPolicies; + } + + public String getAccountName() { + return accountName; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public String getDomainName() { + return domainName; + } + + public void setDomainName(String domainName) { + this.domainName = domainName; + } + + public LBHealthCheckResponse() { + } + + public LBHealthCheckResponse(HealthCheckPolicy healthcheckpolicy) { + setObjectName("healthcheckpolicy"); + } +} diff --git a/api/src/org/apache/cloudstack/api/response/StorageProviderResponse.java b/api/src/org/apache/cloudstack/api/response/StorageProviderResponse.java new file mode 100644 index 00000000000..4baf48629ff --- /dev/null +++ b/api/src/org/apache/cloudstack/api/response/StorageProviderResponse.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.BaseResponse; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class StorageProviderResponse extends BaseResponse { + @SerializedName("name") @Param(description="the name of the storage provider") + private String name; + + @SerializedName("type") @Param(description="the type of the storage provider: primary or image provider") + private String type; + + /** + * @return the type + */ + public String getType() { + return type; + } + + /** + * @param type the type to set + */ + public void setType(String type) { + this.type = type; + } + + /** + * @return the name + */ + public String getName() { + return name; + } + + /** + * @param name the name to set + */ + public void setName(String name) { + this.name = name; + } + + +} diff --git a/api/src/org/apache/cloudstack/api/response/VpnUsersResponse.java b/api/src/org/apache/cloudstack/api/response/VpnUsersResponse.java index e654e8a522a..c29a37e7a48 100644 --- a/api/src/org/apache/cloudstack/api/response/VpnUsersResponse.java +++ b/api/src/org/apache/cloudstack/api/response/VpnUsersResponse.java @@ -48,6 +48,8 @@ public class VpnUsersResponse extends BaseResponse implements ControlledEntityRe @SerializedName(ApiConstants.PROJECT) @Param(description="the project name of the vpn") private String projectName; + @SerializedName(ApiConstants.STATE) @Param(description="the state of the Vpn User") + private String state; public void setId(String id) { this.id = id; @@ -80,4 +82,12 @@ public class VpnUsersResponse extends BaseResponse implements ControlledEntityRe this.projectName = projectName; } + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } + } diff --git a/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java b/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java index 06697c4384f..44d53aaa175 100644 --- a/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java +++ b/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java @@ -133,7 +133,7 @@ public class BackupSnapshotCommandTest { } @Override - public Long getStorageProviderId() { + public String getStorageProviderName() { // TODO Auto-generated method stub return null; } diff --git a/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java b/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java index 767d7c37c5e..c2d69c0b0fd 100644 --- a/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java +++ b/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java @@ -115,7 +115,7 @@ public class SnapshotCommandTest { } @Override - public Long getStorageProviderId() { + public String getStorageProviderName() { // TODO Auto-generated method stub return null; } diff --git a/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java b/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java index 852e52b1b86..02085f577b6 100644 --- a/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java +++ b/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java @@ -134,7 +134,7 @@ public class ResizeVolumeCommandTest { } @Override - public Long getStorageProviderId() { + public String getStorageProviderName() { // TODO Auto-generated method stub return null; } diff --git a/awsapi-setup/setup/cloud-setup-bridge b/awsapi-setup/setup/cloud-setup-bridge index d25072da1eb..d456387b414 100644 --- a/awsapi-setup/setup/cloud-setup-bridge +++ b/awsapi-setup/setup/cloud-setup-bridge @@ -20,7 +20,7 @@ # Cloud.com Bridge setup script. # -settingsFile="/usr/share/cloud/bridge/conf/ec2-service.properties" +settingsFile="/etc/cloudstack/management/ec2-service.properties" function readCurrentSettings () { readVar managementServer diff --git a/awsapi/src/com/cloud/bridge/util/EncryptionSecretKeyCheckerUtil.java b/awsapi/src/com/cloud/bridge/util/EncryptionSecretKeyCheckerUtil.java index 0a652212b3e..6f0049f24c4 100644 --- a/awsapi/src/com/cloud/bridge/util/EncryptionSecretKeyCheckerUtil.java +++ b/awsapi/src/com/cloud/bridge/util/EncryptionSecretKeyCheckerUtil.java @@ -36,7 +36,7 @@ import org.jasypt.encryption.pbe.config.SimpleStringPBEConfig; public class EncryptionSecretKeyCheckerUtil { private static final Logger s_logger = Logger.getLogger(EncryptionSecretKeyCheckerUtil.class); - private static final String s_keyFile = "/etc/cloud/management/key"; + private static final String s_keyFile = "/etc/cloudstack/management/key"; private static final String s_envKey = "CLOUD_SECRET_KEY"; private static StandardPBEStringEncryptor s_encryptor = new StandardPBEStringEncryptor(); private static boolean s_useEncryption = false; diff --git a/client/WEB-INF/classes/resources/messages.properties b/client/WEB-INF/classes/resources/messages.properties index f0b6b363bc9..66b32acfc6c 100644 --- a/client/WEB-INF/classes/resources/messages.properties +++ b/client/WEB-INF/classes/resources/messages.properties @@ -17,6 +17,7 @@ #new labels (begin) ********************************************************************************************** +message.redirecting.region=Redirecting to region... label.use.vm.ip=Use VM IP: label.menu.regions=Regions label.region=Region diff --git a/client/pom.xml b/client/pom.xml index cda6ab8b4e7..7ad2eff4cd9 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -65,6 +65,11 @@ cloud-plugin-network-nvp ${project.version} + + org.apache.cloudstack + cloud-plugin-snmp-alerts + ${project.version} + org.apache.cloudstack cloud-plugin-network-ovs @@ -214,6 +219,16 @@ cloud-engine-storage-volume ${project.version} + + org.apache.cloudstack + cloud-plugin-hypervisor-simulator + ${project.version} + + + org.apache.cloudstack + cloud-plugin-storage-volume-default + ${project.version} + install @@ -238,7 +253,6 @@ 60000 - -XX:MaxPermSize=512m -Xmx2g ${project.build.directory}/${project.build.finalName}/WEB-INF/web.xml ${project.build.directory}/${project.build.finalName} @@ -476,21 +490,6 @@ - - simulator - - - simulator - - - - - org.apache.cloudstack - cloud-plugin-hypervisor-simulator - ${project.version} - - - netapp diff --git a/client/tomcatconf/commands.properties.in b/client/tomcatconf/commands.properties.in index 1c8fa4c24eb..4e7548756f3 100644 --- a/client/tomcatconf/commands.properties.in +++ b/client/tomcatconf/commands.properties.in @@ -152,6 +152,9 @@ createLBStickinessPolicy=15 deleteLBStickinessPolicy=15 listLoadBalancerRules=15 listLBStickinessPolicies=15 +listLBHealthCheckPolicies=15 +createLBHealthCheckPolicy=15 +deleteLBHealthCheckPolicy=15 listLoadBalancerRuleInstances=15 updateLoadBalancerRule=15 @@ -276,6 +279,7 @@ listAsyncJobs=15 #### storage pools commands listStoragePools=3 +listStorageProviders=3 createStoragePool=1 updateStoragePool=1 deleteStoragePool=1 diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in index 7b64f49ee20..a98a41ffd5a 100644 --- a/client/tomcatconf/componentContext.xml.in +++ b/client/tomcatconf/componentContext.xml.in @@ -36,9 +36,25 @@ --> - - + + + + + + + + + + + + + + + + + + @@ -191,6 +207,12 @@ + + @@ -199,11 +221,9 @@ - @@ -302,14 +322,15 @@ - + + + diff --git a/client/tomcatconf/log4j-cloud.xml.in b/client/tomcatconf/log4j-cloud.xml.in index 086669376aa..ce4079f9c96 100755 --- a/client/tomcatconf/log4j-cloud.xml.in +++ b/client/tomcatconf/log4j-cloud.xml.in @@ -74,6 +74,20 @@ under the License. + + + + + + + + + + + + + + @@ -142,6 +156,17 @@ under the License. + + + + + + + + + + + diff --git a/client/tomcatconf/nonossComponentContext.xml.in b/client/tomcatconf/nonossComponentContext.xml.in index 7e3552db67e..1cc1722cff5 100644 --- a/client/tomcatconf/nonossComponentContext.xml.in +++ b/client/tomcatconf/nonossComponentContext.xml.in @@ -37,8 +37,24 @@ --> - + + + + + + + + + + + + + + + + + @@ -310,6 +326,9 @@ + + + diff --git a/core/src/com/cloud/alert/AlertManager.java b/core/src/com/cloud/alert/AlertManager.java index a24e18c8373..b6d005a5f21 100755 --- a/core/src/com/cloud/alert/AlertManager.java +++ b/core/src/com/cloud/alert/AlertManager.java @@ -27,26 +27,27 @@ public interface AlertManager extends Manager { public static final short ALERT_TYPE_VIRTUAL_NETWORK_PUBLIC_IP = CapacityVO.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP; public static final short ALERT_TYPE_PRIVATE_IP = CapacityVO.CAPACITY_TYPE_PRIVATE_IP; public static final short ALERT_TYPE_SECONDARY_STORAGE = CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE; - public static final short ALERT_TYPE_HOST = 6; - public static final short ALERT_TYPE_USERVM = 7; - public static final short ALERT_TYPE_DOMAIN_ROUTER = 8; - public static final short ALERT_TYPE_CONSOLE_PROXY = 9; - public static final short ALERT_TYPE_ROUTING = 10; // lost connection to default route (to the gateway) - public static final short ALERT_TYPE_STORAGE_MISC = 11; // lost connection to default route (to the gateway) - public static final short ALERT_TYPE_USAGE_SERVER = 12; // lost connection to default route (to the gateway) - public static final short ALERT_TYPE_MANAGMENT_NODE = 13; // lost connection to default route (to the gateway) - public static final short ALERT_TYPE_DOMAIN_ROUTER_MIGRATE = 14; - public static final short ALERT_TYPE_CONSOLE_PROXY_MIGRATE = 15; - public static final short ALERT_TYPE_USERVM_MIGRATE = 16; - public static final short ALERT_TYPE_VLAN = 17; - public static final short ALERT_TYPE_SSVM = 18; - public static final short ALERT_TYPE_USAGE_SERVER_RESULT = 19; // Usage job result - public static final short ALERT_TYPE_STORAGE_DELETE = 20; - public static final short ALERT_TYPE_UPDATE_RESOURCE_COUNT = 21; // Generated when we fail to update the resource count - public static final short ALERT_TYPE_USAGE_SANITY_RESULT = 22; - public static final short ALERT_TYPE_DIRECT_ATTACHED_PUBLIC_IP = 23; - public static final short ALERT_TYPE_LOCAL_STORAGE = 24; - public static final short ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED = 25; // Generated when the resource limit exceeds the limit. Currently used for recurring snapshots only + public static final short ALERT_TYPE_HOST = 7; + public static final short ALERT_TYPE_USERVM = 8; + public static final short ALERT_TYPE_DOMAIN_ROUTER = 9; + public static final short ALERT_TYPE_CONSOLE_PROXY = 10; + public static final short ALERT_TYPE_ROUTING = 11; // lost connection to default route (to the gateway) + public static final short ALERT_TYPE_STORAGE_MISC = 12; // lost connection to default route (to the gateway) + public static final short ALERT_TYPE_USAGE_SERVER = 13; // lost connection to default route (to the gateway) + public static final short ALERT_TYPE_MANAGMENT_NODE = 14; // lost connection to default route (to the gateway) + public static final short ALERT_TYPE_DOMAIN_ROUTER_MIGRATE = 15; + public static final short ALERT_TYPE_CONSOLE_PROXY_MIGRATE = 16; + public static final short ALERT_TYPE_USERVM_MIGRATE = 17; + public static final short ALERT_TYPE_VLAN = 18; + public static final short ALERT_TYPE_SSVM = 19; + public static final short ALERT_TYPE_USAGE_SERVER_RESULT = 20; // Usage job result + public static final short ALERT_TYPE_STORAGE_DELETE = 21; + public static final short ALERT_TYPE_UPDATE_RESOURCE_COUNT = 22; // Generated when we fail to update the resource + // count + public static final short ALERT_TYPE_USAGE_SANITY_RESULT = 23; + public static final short ALERT_TYPE_DIRECT_ATTACHED_PUBLIC_IP = 24; + public static final short ALERT_TYPE_LOCAL_STORAGE = 25; + public static final short ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED = 26; // Generated when the resource limit exceeds the limit. Currently used for recurring snapshots only void clearAlert(short alertType, long dataCenterId, long podId); diff --git a/debian/cloudstack-agent.install b/debian/cloudstack-agent.install index b1425717584..02501855354 100644 --- a/debian/cloudstack-agent.install +++ b/debian/cloudstack-agent.install @@ -19,8 +19,8 @@ /etc/cloudstack/agent/environment.properties /etc/cloudstack/agent/log4j-cloud.xml /etc/init.d/cloudstack-agent -/usr/bin/cloud-setup-agent +/usr/bin/cloudstack-setup-agent /usr/bin/cloud-ssh /var/log/cloudstack/agent /usr/share/cloudstack-agent/lib/* -/usr/share/cloudstack-agent/plugins \ No newline at end of file +/usr/share/cloudstack-agent/plugins diff --git a/debian/cloudstack-management.install b/debian/cloudstack-management.install index cecc31181a9..e80701d0a78 100644 --- a/debian/cloudstack-management.install +++ b/debian/cloudstack-management.install @@ -31,3 +31,5 @@ /usr/bin/cloud-setup-databases /usr/bin/cloud-migrate-databases /usr/share/cloudstack-management/* +/usr/share/java/* +/usr/share/tomcat6/lib/* diff --git a/debian/cloudstack-management.postinst b/debian/cloudstack-management.postinst index 7b6a1ed6d71..4e9b046caff 100644 --- a/debian/cloudstack-management.postinst +++ b/debian/cloudstack-management.postinst @@ -22,5 +22,20 @@ if [ "$1" = configure ]; then else usermod -m -d /var/lib/cloudstack/management cloud fi - chown cloud /var/log/cloudstack/management -fi \ No newline at end of file + + for i in /var/cache/cloudstack/management \ + /var/cache/cloudstack/management/work \ + /var/cache/cloudstack/management/temp \ + /var/log/cloudstack/management \ + /etc/cloudstack/management/Catalina \ + /etc/cloudstack/management/Catalina/localhost \ + /var/lib/cloudstack/management \ + /etc/cloudstack/management/Catalina/localhost/client + do + chmod 0770 $i + chgrp cloud $i + done + + chmod 0640 /etc/cloudstack/management/db.properties + chgrp cloud /etc/cloudstack/management/db.properties +fi diff --git a/debian/rules b/debian/rules index 613d76a63f6..3804d8d49e9 100755 --- a/debian/rules +++ b/debian/rules @@ -68,7 +68,7 @@ install: install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ install -D plugins/hypervisors/kvm/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ install -D packaging/debian/init/cloud-agent $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-agent - install -D agent/bindir/cloud-setup-agent.in $(DESTDIR)/usr/bin/cloud-setup-agent + install -D agent/bindir/cloud-setup-agent.in $(DESTDIR)/usr/bin/cloudstack-setup-agent install -D agent/bindir/cloud-ssh.in $(DESTDIR)/usr/bin/cloud-ssh install -D agent/target/transformed/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/agent @@ -99,12 +99,15 @@ install: ln -s tomcat6-nonssl.conf $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/tomcat6.conf mkdir -p $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/Catalina/localhost/client + mkdir -p ${DESTDIR}/usr/share/tomcat6/lib + mkdir -p ${DESTDIR}/usr/share/java install -D packaging/debian/init/cloud-management $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-management install -D client/bindir/cloud-update-xenserver-licenses.in $(DESTDIR)/usr/bin/cloud-update-xenserver-licenses - install -D server/target/cloud-server-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/$(PACKAGE)-server.jar + install -D server/target/cloud-server-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/java/$(PACKAGE)-server.jar ln -s /usr/share/tomcat6/bin $(DESTDIR)/usr/share/$(PACKAGE)-management/bin ln -s ../../..$(SYSCONFDIR)/$(PACKAGE)/management $(DESTDIR)/usr/share/$(PACKAGE)-management/conf - ln -s /usr/share/tomcat6/lib $(DESTDIR)/usr/share/$(PACKAGE)-management/lib + ln -s ../../../usr/share/tomcat6/lib $(DESTDIR)/usr/share/$(PACKAGE)-management/lib + ln -s ../../java/$(PACKAGE)-server.jar $(DESTDIR)/usr/share/tomcat6/lib/$(PACKAGE)-server.jar ln -s ../../../var/log/$(PACKAGE)/management $(DESTDIR)/usr/share/$(PACKAGE)-management/logs ln -s ../../../var/cache/$(PACKAGE)/management/temp $(DESTDIR)/usr/share/$(PACKAGE)-management/temp ln -s ../../../var/cache/$(PACKAGE)/management/work $(DESTDIR)/usr/share/$(PACKAGE)-management/work diff --git a/developer/pom.xml b/developer/pom.xml index ff47b143093..3dc276adc23 100644 --- a/developer/pom.xml +++ b/developer/pom.xml @@ -10,7 +10,7 @@ language governing permissions and limitations under the License. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 cloud-developer Apache CloudStack Developer Tools @@ -21,25 +21,98 @@ 4.2.0-SNAPSHOT + mysql mysql-connector-java - 5.1.21 - runtime + ${cs.mysql.version} + + + commons-dbcp + commons-dbcp + ${cs.dbcp.version} + + + commons-pool + commons-pool + ${cs.pool.version} + + + org.jasypt + jasypt + ${cs.jasypt.version} + + + org.apache.cloudstack + cloud-utils + ${project.version} + + + org.apache.cloudstack + cloud-server + ${project.version} + + + org.apache.cloudstack + cloud-plugin-hypervisor-simulator + ${project.version} + compile - - org.apache.cloudstack - cloud-plugin-hypervisor-simulator - ${project.version} - compile - install + + + org.codehaus.mojo + properties-maven-plugin + 1.0-alpha-2 + + + initialize + + read-project-properties + + + + ${basedir}/../utils/conf/db.properties + ${basedir}/../utils/conf/db.properties.override + + true + + + + + + maven-antrun-plugin + 1.7 + + + generate-resources + + run + + + + + + + + + + + + + + + + + + - + deploydb @@ -48,91 +121,10 @@ - - org.codehaus.mojo - properties-maven-plugin - 1.0-alpha-2 - - - initialize - - read-project-properties - - - - ${project.parent.basedir}/utils/conf/db.properties - ${project.parent.basedir}/utils/conf/db.properties.override - - true - - - - - - maven-antrun-plugin - 1.7 - - - generate-resources - - run - - - - - - - - - - - - - - - - - - - org.codehaus.mojo exec-maven-plugin 1.2.1 - - - - mysql - mysql-connector-java - ${cs.mysql.version} - - - commons-dbcp - commons-dbcp - ${cs.dbcp.version} - - - commons-pool - commons-pool - ${cs.pool.version} - - - org.jasypt - jasypt - ${cs.jasypt.version} - - - org.apache.cloudstack - cloud-utils - ${project.version} - - - org.apache.cloudstack - cloud-server - ${project.version} - - process-resources @@ -143,17 +135,11 @@ - false - true - - org.apache.cloudstack - cloud-server - com.cloud.upgrade.DatabaseCreator - ${project.parent.basedir}/utils/conf/db.properties - ${project.parent.basedir}/utils/conf/db.properties.override + ${basedir}/../utils/conf/db.properties + ${basedir}/../utils/conf/db.properties.override ${basedir}/target/db/create-schema.sql ${basedir}/target/db/create-schema-premium.sql @@ -181,7 +167,59 @@ catalina.home - ${project.parent.basedir}/utils + ${basedir}/../utils + + + paths.script + ${basedir}/target/db + + + + + + + + + + deploydb-simulator + + + deploydb-simulator + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + process-resources + create-schema-simulator + + java + + + + + com.cloud.upgrade.DatabaseCreator + + + ${basedir}/../utils/conf/db.properties + ${basedir}/../utils/conf/db.properties.override + + ${basedir}/target/db/create-schema-simulator.sql + ${basedir}/target/db/templates.simulator.sql + + com.cloud.upgrade.DatabaseUpgradeChecker + --database=simulator + --rootpassword=${db.root.password} + + + + catalina.home + ${basedir}/../utils paths.script @@ -194,4 +232,4 @@ - + \ No newline at end of file diff --git a/docs/en-US/Installation_Guide.xml b/docs/en-US/Installation_Guide.xml index 2f60acac984..f2f27ad9621 100644 --- a/docs/en-US/Installation_Guide.xml +++ b/docs/en-US/Installation_Guide.xml @@ -57,5 +57,6 @@ + diff --git a/docs/en-US/Release_Notes.xml b/docs/en-US/Release_Notes.xml index 4d1f62fb713..2cedb90a763 100644 --- a/docs/en-US/Release_Notes.xml +++ b/docs/en-US/Release_Notes.xml @@ -92,11 +92,11 @@ Name - systemvm-vmware-3.0.0 + systemvm-vmware-4.0 Description - systemvm-vmware-3.0.0 + systemvm-vmware-4.0 URL diff --git a/docs/en-US/about-password-encryption.xml b/docs/en-US/about-password-encryption.xml index 3cd84d19508..6c11c579ed2 100644 --- a/docs/en-US/about-password-encryption.xml +++ b/docs/en-US/about-password-encryption.xml @@ -52,7 +52,7 @@ Of course, the database secret key itself can not be stored in the open – it must be encrypted. How then does &PRODUCT; read it? A second secret key must be provided from an external source during Management Server startup. This key can be provided in one of two ways: - loaded from a file or provided by the &PRODUCT; administrator. The &PRODUCT; database has a new + loaded from a file or provided by the &PRODUCT; administrator. The &PRODUCT; database has a configuration setting that lets it know which of these methods will be used. If the encryption type is set to "file," the key must be in a file in a known location. If the encryption type is set to "web," the administrator runs the utility diff --git a/docs/en-US/about-regions.xml b/docs/en-US/about-regions.xml new file mode 100644 index 00000000000..432faeb6c5e --- /dev/null +++ b/docs/en-US/about-regions.xml @@ -0,0 +1,49 @@ + + +%BOOK_ENTITIES; +]> + + +
+ About Regions + To increase reliability of the cloud, you can optionally group resources into multiple geographic regions. + A region is the largest available organizational unit within a &PRODUCT; deployment. + A region is made up of several availability zones, where each zone is roughly equivalent to a datacenter. + Each region is controlled by its own cluster of Management Servers, running in one of the zones. + The zones in a region are typically located in close geographical proximity. + Regions are a useful technique for providing fault tolerance and disaster recovery. + By grouping zones into regions, the cloud can achieve higher availability and scalability. + User accounts can span regions, so that users can deploy VMs in multiple, widely-dispersed regions. + Even if one of the regions becomes unavailable, the services are still available to the end-user through VMs deployed in another region. + And by grouping communities of zones under their own nearby Management Servers, the latency of communications within the cloud is reduced + compared to managing widely-dispersed zones from a single central Management Server. + + + Usage records can also be consolidated and tracked at the region level, creating reports or invoices for each geographic region. + + + + + + region-overview.png: Nested structure of a region. + + Regions are visible to the end user. When a user starts a guest VM, the user must select a region for their guest. + Users might also be required to copy their private templates to additional regions to enable creation of guest VMs using their templates in those regions. +
\ No newline at end of file diff --git a/docs/en-US/about-zones.xml b/docs/en-US/about-zones.xml index 5385df05088..8f6cd06e6d9 100644 --- a/docs/en-US/about-zones.xml +++ b/docs/en-US/about-zones.xml @@ -23,7 +23,12 @@ -->
About Zones - A zone is the largest organizational unit within a &PRODUCT; deployment. A zone typically corresponds to a single datacenter, although it is permissible to have multiple zones in a datacenter. The benefit of organizing infrastructure into zones is to provide physical isolation and redundancy. For example, each zone can have its own power supply and network uplink, and the zones can be widely separated geographically (though this is not required). + A zone is the second largest organizational unit within a &PRODUCT; deployment. A zone + typically corresponds to a single datacenter, although it is permissible to have multiple + zones in a datacenter. The benefit of organizing infrastructure into zones is to provide + physical isolation and redundancy. For example, each zone can have its own power supply and + network uplink, and the zones can be widely separated geographically (though this is not + required). A zone consists of: One or more pods. Each pod contains one or more clusters of hosts and one or more primary storage servers. @@ -46,5 +51,6 @@ How many primary storage servers to place in each cluster and total capacity for the storage servers. How much secondary storage to deploy in a zone. - When you add a new zone, you will be prompted to configure the zone’s physical network and add the first pod, cluster, host, primary storage, and secondary storage. + When you add a new zone using the &PRODUCT; UI, you will be prompted to configure the zone’s physical network + and add the first pod, cluster, host, primary storage, and secondary storage.
diff --git a/docs/en-US/added-API-commands-4-1.xml b/docs/en-US/added-API-commands-4-1.xml index aa5529e41ff..0beb104cfe0 100644 --- a/docs/en-US/added-API-commands-4-1.xml +++ b/docs/en-US/added-API-commands-4-1.xml @@ -19,7 +19,7 @@ under the License. -->
- Added API Commands in 4.1-incubating + Added API Commands in 4.1 createEgressFirewallRules (creates an egress firewall rule on the guest network.) @@ -65,5 +65,10 @@ getUser (This API can only be used by the Admin. Get user details by using the API Key.) + addRegion (Add a region) + removeRegion (Delete a region) + updateRegion (Modify attributes of a region) + listRegions (List regions) +
diff --git a/docs/en-US/api-calls.xml b/docs/en-US/api-calls.xml index 3b97893d81d..af4073ac60b 100644 --- a/docs/en-US/api-calls.xml +++ b/docs/en-US/api-calls.xml @@ -25,8 +25,9 @@ Calling the &PRODUCT; API - + + diff --git a/docs/en-US/api-reference.xml b/docs/en-US/api-reference.xml index 983e76f2aa5..9a1acc145bd 100644 --- a/docs/en-US/api-reference.xml +++ b/docs/en-US/api-reference.xml @@ -19,6 +19,6 @@
API Reference Documentation You can find all the API reference documentation at the below site: - http://incubator.apache.org/cloudstack/docs/api/ + http://cloudstack.apache.org/docs/api/
diff --git a/docs/en-US/api-throttling.xml b/docs/en-US/api-throttling.xml new file mode 100644 index 00000000000..908e22389a8 --- /dev/null +++ b/docs/en-US/api-throttling.xml @@ -0,0 +1,67 @@ + + +%BOOK_ENTITIES; +]> + + + +
+ Limiting the Rate of API Requests + You can limit the rate at which API requests can be placed for each + account. This is useful to avoid malicious attacks on the Management Server, prevent + performance degradation, and provide fairness to all accounts. + If the number of API calls exceeds the threshold, an error message is returned for any additional API calls. + The caller will have to retry these API calls at another time. +
+ Configuring the API Request Rate + To control the API request rate, use the following global configuration + settings: + + api.throttling.enabled - Enable/Disable API throttling. By default, this setting is false, so + API throttling is not enabled. + api.throttling.interval (in seconds) - Time interval during which the number of API requests is to be counted. + When the interval has passed, the API count is reset to 0. + api.throttling.max - Maximum number of APIs that can be placed within the api.throttling.interval period. + api.throttling.cachesize - Cache size for storing API counters. + Use a value higher than the total number of accounts managed by the cloud. + One cache entry is needed for each account, to store the running API total for that account. + + +
+
+ Limitations on API Throttling + The following limitations exist in the current implementation of this feature. + Even with these limitations, &PRODUCT; is still able to effectively use API throttling to + avoid malicious attacks causing denial of service. + + + In a deployment with multiple Management Servers, + the cache is not synchronized across them. + In this case, &PRODUCT; might not be able to + ensure that only the exact desired number of API requests are allowed. + In the worst case, the number of API calls that might be allowed is + (number of Management Servers) * (api.throttling.max). + + The API commands resetApiLimit and getApiLimit are limited to the + Management Server where the API is invoked. + + +
+
\ No newline at end of file diff --git a/docs/en-US/aws-ec2-user-setup.xml b/docs/en-US/aws-ec2-user-setup.xml index f41eaa158d7..a2d89187feb 100644 --- a/docs/en-US/aws-ec2-user-setup.xml +++ b/docs/en-US/aws-ec2-user-setup.xml @@ -66,7 +66,7 @@ cloudstack-aws-api-register. If you do not have the source then download the script using the following command.
- wget -O cloudstack-aws-api-register "https://git-wip-us.apache.org/repos/asf?p=incubator-cloudstack.git;a=blob_plain;f=awsapi-setup/setup/cloudstack-aws-api-register;hb=HEAD" + wget -O cloudstack-aws-api-register "https://git-wip-us.apache.org/repos/asf?p=cloudstack.git;a=blob_plain;f=awsapi-setup/setup/cloudstack-aws-api-register;hb=4.1" Then execute it, using the access and secret keys that were obtained in step . An example is shown below. diff --git a/docs/en-US/best-practices.xml b/docs/en-US/best-practices.xml new file mode 100644 index 00000000000..41d7cde9036 --- /dev/null +++ b/docs/en-US/best-practices.xml @@ -0,0 +1,82 @@ + + +%BOOK_ENTITIES; +]> + + + + + Best Practices + Deploying a cloud is challenging. There are many different technology choices to make, and &PRODUCT; is flexible enough in its configuration that there are many possible ways to combine and configure the chosen technology. This section contains suggestions and requirements about cloud deployments. + These should be treated as suggestions and not absolutes. However, we do encourage anyone planning to build a cloud outside of these guidelines to seek guidance and advice on the project mailing lists. +
+ Process Best Practices + + + A staging system that models the production environment is strongly advised. It is critical if customizations have been applied to &PRODUCT;. + + + Allow adequate time for installation, a beta, and learning the system. Installs with basic networking can be done in hours. Installs with advanced networking usually take several days for the first attempt, with complicated installations taking longer. For a full production system, allow at least 4-8 weeks for a beta to work through all of the integration issues. You can get help from fellow users on the cloudstack-users mailing list. + + +
+
+ Setup Best Practices + + + Each host should be configured to accept connections only from well-known entities such as the &PRODUCT; Management Server or your network monitoring software. + + + Use multiple clusters per pod if you need to achieve a certain switch density. + + + Primary storage mountpoints or LUNs should not exceed 6 TB in size. It is better to have multiple smaller primary storage elements per cluster than one large one. + + + When exporting shares on primary storage, avoid data loss by restricting the range of IP addresses that can access the storage. See "Linux NFS on Local Disks and DAS" or "Linux NFS on iSCSI". + + + NIC bonding is straightforward to implement and provides increased reliability. + + + 10G networks are generally recommended for storage access when larger servers that can support relatively more VMs are used. + + + Host capacity should generally be modeled in terms of RAM for the guests. Storage and CPU may be overprovisioned. RAM may not. RAM is usually the limiting factor in capacity designs. + + + (XenServer) Configure the XenServer dom0 settings to allocate more memory to dom0. This can enable XenServer to handle larger numbers of virtual machines. We recommend 2940 MB of RAM for XenServer dom0. For instructions on how to do this, see http://support.citrix.com/article/CTX126531. The article refers to XenServer 5.6, but the same information applies to XenServer 6.0. + + +
+
+ Maintenance Best Practices + + + Monitor host disk space. Many host failures occur because the host's root disk fills up from logs that were not rotated adequately. + + + Monitor the total number of VM instances in each cluster, and disable allocation to the cluster if the total is approaching the maximum that the hypervisor can handle. Be sure to leave a safety margin to allow for the possibility of one or more hosts failing, which would increase the VM load on the other hosts as the VMs are redeployed. Consult the documentation for your chosen hypervisor to find the maximum permitted number of VMs per host, then use &PRODUCT; global configuration settings to set this as the default limit. Monitor the VM activity in each cluster and keep the total number of VMs below a safe level that allows for the occasional host failure. For example, if there are N hosts in the cluster, and you want to allow for one host in the cluster to be down at any given time, the total number of VM instances you can permit in the cluster is at most (N-1) * (per-host-limit). Once a cluster reaches this number of VMs, use the &PRODUCT; UI to disable allocation to the cluster. + + + The lack of up-do-date hotfixes can lead to data corruption and lost VMs. + Be sure all the hotfixes provided by the hypervisor vendor are applied. Track the release of hypervisor patches through your hypervisor vendor’s support channel, and apply patches as soon as possible after they are released. &PRODUCT; will not track or notify you of required hypervisor patches. It is essential that your hosts are completely up to date with the provided hypervisor patches. The hypervisor vendor is likely to refuse to support any system that is not up to date with patches. +
+
diff --git a/docs/en-US/building-with-maven-steps.xml b/docs/en-US/building-with-maven-steps.xml index 319654fb290..1c15bfa96e1 100644 --- a/docs/en-US/building-with-maven-steps.xml +++ b/docs/en-US/building-with-maven-steps.xml @@ -25,7 +25,7 @@
Building Steps &PRODUCT; uses git for source version control, first make sure you have the source code by pulling it: - git clone https://git-wip-us.apache.org/repos/asf/incubator-cloudstack.git + git clone https://git-wip-us.apache.org/repos/asf/cloudstack.git Several Project Object Models (POM) are defined to deal with the various build targets of &PRODUCT;. Certain features require some packages that are not compatible with the Apache license and therefore need to be downloaded on your own. Check the wiki for additional information https://cwiki.apache.org/CLOUDSTACK/building-with-maven.html. In order to build all the open source targets of &PRODUCT; do: mvn clean install The resulting jar files will be in the target directory of the subdirectory of the compiled module. diff --git a/docs/en-US/castor-with-cs.xml b/docs/en-US/castor-with-cs.xml index 5049d33d638..7bf676b9c62 100644 --- a/docs/en-US/castor-with-cs.xml +++ b/docs/en-US/castor-with-cs.xml @@ -34,8 +34,7 @@ To configure CAStor: - Install &PRODUCT; 4.0.0-incubating by following the instructions given in the INSTALL.txt - file. + Install &PRODUCT; by following the instructions given in the INSTALL.txt file. You can use the S3 storage system in &PRODUCT; without setting up and installing the compute components. diff --git a/docs/en-US/changed-apicommands-4.1.xml b/docs/en-US/changed-apicommands-4.1.xml index 42bd088afb3..1667aafaa22 100644 --- a/docs/en-US/changed-apicommands-4.1.xml +++ b/docs/en-US/changed-apicommands-4.1.xml @@ -19,7 +19,7 @@ under the License. -->
- Changed API Commands in 4.1-incubating + Changed API Commands in 4.1 @@ -34,13 +34,148 @@ createNetworkOffering + + + The following request parameters have been added: + + + isPersistent + + + startipv6 + + + endipv6 + + + ip6gateway + + + ip6cidr + + + + + + listNetworkOfferings listNetworks - The following request parameters is added: isPersistent. - This parameter determines if the network or network offering created or listed by - using this offering are persistent or not. + The following request parameters have been added: + + + isPersistent + This parameter determines if the network or network offering listed are + persistent or not. + + + ip6gateway + + + ip6cidr + + + + + + + createVlanIpRange + + + The following request parameters have been added: + + + startipv6 + + + endipv6 + + + ip6gateway + + + ip6cidr + + + + + + + deployVirtualMachine + + + The following parameter has been added: ip6Address. + The following parameter is updated to accept the IPv6 address: + iptonetworklist. + + + + + CreateZoneCmd + + + The following parameter have been added: ip6dns1, ip6dns2. + + + + + listRouters + listVirtualMachines + + + For nic responses, the following fields have been added. + + + ip6address + + + ip6gateway + + + ip6cidr + + + + + + + listVlanIpRanges + + + For nic responses, the following fields have been added. + + + startipv6 + + + endipv6 + + + ip6gateway + + + ip6cidr + + + + + + + listRouters + listZones + + + For DomainRouter and DataCenter response, the following fields have been + added. + + + ip6dns1 + + + ip6dns2 + + @@ -97,9 +232,21 @@ listZones - The following request parameters is added: securitygroupenabled + The following request parameter is added: securitygroupenabled + + createAccount + The following new request parameters are added: accountid, userid + + + createUser + The following new request parameter is added: userid + + + createDomain + The following new request parameter is added: domainid + diff --git a/docs/en-US/cloud-infrastructure-concepts.xml b/docs/en-US/cloud-infrastructure-concepts.xml index 7a7098a4b47..2ba228aa4dd 100644 --- a/docs/en-US/cloud-infrastructure-concepts.xml +++ b/docs/en-US/cloud-infrastructure-concepts.xml @@ -23,7 +23,8 @@ --> - Cloud Infrastructure Concepts + Cloud Infrastructure Concepts + diff --git a/docs/en-US/cloudmonkey.xml b/docs/en-US/cloudmonkey.xml index 0057562cca2..be4d17c3aa1 100644 --- a/docs/en-US/cloudmonkey.xml +++ b/docs/en-US/cloudmonkey.xml @@ -24,7 +24,7 @@
CloudMonkey - CloudMonkey is the &PRODUCT; Command Line Interface (CLI). It is written in Python and leverages Marvin. CloudMonkey can be used both as an interactive shell and as a command line tool which simplifies &PRODUCT; configuration and management. + CloudMonkey is the &PRODUCT; Command Line Interface (CLI). It is written in Python. CloudMonkey can be used both as an interactive shell and as a command line tool which simplifies &PRODUCT; configuration and management. It can be used with &PRODUCT; releases since the 4.0.x branch. CloudMonkey is still under development and should be considered a Work In Progress (WIP), the wiki is the most up to date documentation: https://cwiki.apache.org/CLOUDSTACK/cloudstack-cloudmonkey-cli.html @@ -32,13 +32,15 @@
Installing CloudMonkey - There are two ways to get CloudMonkey: - + CloudMonkey is dependent on readline, pygments, prettytable, when installing from source you will need to resolve those dependencies. Using the cheese shop, the dependencies will be automatically installed. + There are three ways to get CloudMonkey. Via the official &PRODUCT; source releases or via a community maintained distribution at the cheese shop. Developers can also get it directly from the git repository in tools/cli/. + + - Via the official Apache &PRODUCT; releases (starting with 4.1). + Via the official Apache &PRODUCT; releases as well as the git repository. Configuration - To configure CloudMonkey you can edit the .cloudmonkey_config file in the user's home directory as shown below. The values can also be set interactively at the cloudmonkey prompt + To configure CloudMonkey you can edit the ~/.cloudmonkey/config file in the user's home directory as shown below. The values can also be set interactively at the cloudmonkey prompt. Logs are kept in ~/.cloudmonkey/log, and history is stored in ~/.cloudmonkey/history. Discovered apis are listed in ~/.cloudmonkey/cache. Only the log and history files can be custom paths and can be configured by setting appropriate file paths in ~/.cloudmonkey/config -$ cat .cloudmonkey_config -[CLI] -protocol = http +$ cat ~/.cloudmonkey/config +[core] +log_file = /Users/sebastiengoasguen/.cloudmonkey/log asyncblock = true +paramcompletion = false +history_file = /Users/sebastiengoasguen/.cloudmonkey/history + +[ui] color = true -prompt = cloudmonkey> -history_file = /Users/sebastiengoasguen/.cloudmonkey_history -host = localhost +prompt = > +tabularize = false + +[user] +secretkey =VDaACYb0LV9eNjTetIOElcVQkvJck_J_QljX_FcHRj87ZKiy0z0ty0ZsYBkoXkY9b7eq1EhwJaw7FF3akA3KBQ +apikey = plgWJfZK4gyS3mOMTVmjUVg-X-jlWlnfaUJ9GAbBbf9EdMkAYMmAiLqzzq1ElZLYq_u38zCm0bewzGUdP66mg + +[server] path = /client/api +host = localhost +protocol = http port = 8080 -apikey = plgWJfZK4gyS3mOMTVmjUVg-X-jlWlnfaUJ9GAbBbf9EdM-kAYMmAiLqzzq1ElZLYq_u38zCm0bewzGUdP66mg -secretkey = VDaACYb0LV9eNjTetIOElcVQkvJck_J_QljX_FcHRj87ZKiy0z0ty0ZsYBkoXkY9b7eq1EhwJaw7FF3akA3KBQ -timeout = 600 -log_file = /Users/sebastiengoasguen/.cloudmonkey_log +timeout = 3600 - The values can also be set at the cloudmonkey prompt. The API and secret keys are obtained via the &PRODUCT; UI or via a raw api call. + The values can also be set at the CloudMonkey prompt. The API and secret keys are obtained via the &PRODUCT; UI or via a raw api call. set prompt myprompt> +☁ Apache CloudStack cloudmonkey 4.1.0-snapshot. Type help or ? to list commands. + +> set prompt myprompt> myprompt> set host localhost myprompt> set port 8080 myprompt> set apikey myprompt> set secretkey ]]> - You can use cloudmonkey to interact with a local cloud, and even with a remote public cloud. You just need to set the host value properly and obtain the keys from the cloud administrator. + You can use CloudMonkey to interact with a local cloud, and even with a remote public cloud. You just need to set the host value properly and obtain the keys from the cloud administrator. +
+ +
+ API Discovery + + In &PRODUCT; 4.0.* releases, the list of api calls available will be pre-cached, while starting with &PRODUCT; 4.1 releases and above an API discovery service is enabled. CloudMonkey will discover automatically the api calls available on the management server. The sync command in CloudMonkey pulls a list of apis which are accessible to your user role, along with help docs etc. and stores them in ~/.cloudmonkey/cache. This allows cloudmonkey to be adaptable to changes in mgmt server, so in case the sysadmin enables a plugin such as Nicira NVP for that user role, the users can get those changes. New verbs and grammar (DSL) rules are created on the fly. + + To discover the APIs available do: + + > sync +324 APIs discovered and cached + +
+ +
+ Tabular Output + The number of key/value pairs returned by the api calls can be large resulting in a very long output. To enable easier viewing of the output, a tabular formatting can be setup. You may enable tabular listing and even choose set of column fields, this allows you to create your own field using the filter param which takes in comma separated argument. If argument has a space, put them under double quotes. The create table will have the same sequence of field filters provided + To enable it, use the set function and create filters like so: + +> set tabularize true +> list users filter=id,domain,account +count = 1 +user: ++--------------------------------------+--------+---------+ +| id | domain | account | ++--------------------------------------+--------+---------+ +| 7ed6d5da-93b2-4545-a502-23d20b48ef2a | ROOT | admin | ++--------------------------------------+--------+---------+ +
Interactive Shell Usage - To start learning cloudmonkey, the best is to use the interactive shell. Simply type cloudmonkey at the prompt and you should get the interactive shell. - At the cloudmonkey prompt press the tab key twice, you will see all potential verbs available. Pick on, enter a space and then press tab twice. You will see all actions available for that verb + To start learning CloudMonkey, the best is to use the interactive shell. Simply type CloudMonkey at the prompt and you should get the interactive shell. + At the CloudMonkey prompt press the tab key twice, you will see all potential verbs available. Pick on, enter a space and then press tab twice. You will see all actions available for that verb diff --git a/docs/en-US/configure-package-repository.xml b/docs/en-US/configure-package-repository.xml index 9ef2307b105..3d102c697ad 100644 --- a/docs/en-US/configure-package-repository.xml +++ b/docs/en-US/configure-package-repository.xml @@ -34,7 +34,7 @@ in the sections for or you may find pre-built DEB and RPM packages for your convience linked from the - downloads + downloads page. diff --git a/docs/en-US/extracting-source.xml b/docs/en-US/extracting-source.xml index 97a40b66213..d1690401229 100644 --- a/docs/en-US/extracting-source.xml +++ b/docs/en-US/extracting-source.xml @@ -27,10 +27,10 @@ Extracting the &PRODUCT; release is relatively simple and can be done with a single command as follows: - $ tar -jxvf apache-cloudstack-4.0.0-incubating-src.tar.bz2 + $ tar -jxvf apache-cloudstack-4.1.0.src.tar.bz2 You can now move into the directory: - $ cd ./apache-cloudstack-4.0.0-incubating-src + $ cd ./apache-cloudstack-4.1.0-src
diff --git a/docs/en-US/feature-overview.xml b/docs/en-US/feature-overview.xml index a05078f8606..57b6d84973d 100644 --- a/docs/en-US/feature-overview.xml +++ b/docs/en-US/feature-overview.xml @@ -56,9 +56,9 @@ documented. This API enables the creation of command line tools and new user interfaces to suit particular needs. See the Developer’s Guide and API Reference, both available at - Apache CloudStack Guides + Apache CloudStack Guides and - Apache CloudStack API Reference + Apache CloudStack API Reference respectively. diff --git a/docs/en-US/getting-release.xml b/docs/en-US/getting-release.xml index b9e97c9b03d..ee08a941b96 100644 --- a/docs/en-US/getting-release.xml +++ b/docs/en-US/getting-release.xml @@ -29,8 +29,7 @@ Apache CloudStack project download page. - Prior releases are available via archive.apache.org at http://archive.apache.org/dist/incubator/cloudstack/releases/. - + Prior releases are available via archive.apache.org as well. See the downloads page for more information on archived releases. You'll notice several links under the 'Latest release' section. A link to a file ending in tar.bz2, as well as a PGP/GPG signature, MD5, and SHA512 file. The tar.bz2 file contains the Bzip2-compressed tarball with the source code. diff --git a/docs/en-US/guest-nw-usage-with-traffic-sentinel.xml b/docs/en-US/guest-nw-usage-with-traffic-sentinel.xml index eb42d641c4b..d6fc10bca52 100644 --- a/docs/en-US/guest-nw-usage-with-traffic-sentinel.xml +++ b/docs/en-US/guest-nw-usage-with-traffic-sentinel.xml @@ -54,12 +54,11 @@ addTrafficMonitor. Pass in the URL of the Traffic Sentinel as protocol + host + port (optional); for example, http://10.147.28.100:8080. For the addTrafficMonitor command syntax, see the API Reference at API + url="http://cloudstack.apache.org/docs/api/index.html">API Documentation. For information about how to call the &PRODUCT; API, see the Developer’s Guide at - CloudStack API Developer's Guide. + + &PRODUCT; API Developer's Guide. Log in to the &PRODUCT; UI as administrator. diff --git a/docs/en-US/images/region-overview.png b/docs/en-US/images/region-overview.png new file mode 100644 index 00000000000..528445c9d89 Binary files /dev/null and b/docs/en-US/images/region-overview.png differ diff --git a/docs/en-US/ip-forwarding-firewalling.xml b/docs/en-US/ip-forwarding-firewalling.xml index 54e18b7cfbc..d7a24571429 100644 --- a/docs/en-US/ip-forwarding-firewalling.xml +++ b/docs/en-US/ip-forwarding-firewalling.xml @@ -20,13 +20,15 @@ -->
IP Forwarding and Firewalling - By default, all incoming traffic to the public IP address is rejected. All outgoing traffic - from the guests is translated via NAT to the public IP address and is allowed. + By default, all incoming traffic to the public IP address is rejected. + All outgoing traffic from the guests is also blocked by default. + To allow outgoing traffic, follow the procedure in . To allow incoming traffic, users may set up firewall rules and/or port forwarding rules. For example, you can use a firewall rule to open a range of ports on the public IP address, such as 33 through 44. Then use port forwarding rules to direct traffic from individual ports within that range to specific ports on user VMs. For example, one port forwarding rule could route incoming traffic on the public IP's port 33 to port 100 on one user VM's private IP. - + +
diff --git a/docs/en-US/ipv6-support.xml b/docs/en-US/ipv6-support.xml new file mode 100644 index 00000000000..7367ec9ad80 --- /dev/null +++ b/docs/en-US/ipv6-support.xml @@ -0,0 +1,191 @@ + + +%BOOK_ENTITIES; +]> + + +
+ IPv6 Support in &PRODUCT; + &PRODUCT;supports Internet Protocol version 6 (IPv6), the recent version of the Internet + Protocol (IP) that defines routing the network traffic. IPv6 uses a 128-bit address that + exponentially expands the current address space that is available to the users. IPv6 addresses + consist of eight groups of four hexadecimal digits separated by colons, for example, + 5001:0dt8:83a3:1012:1000:8s2e:0870:7454. &PRODUCT; supports IPv6 for public IPs in shared + networks. With IPv6 support, VMs in shared networks can obtain both IPv4 and IPv6 addresses from + the DHCP server. You can deploy VMs either in a IPv6 or IPv4 network, or in a dual network + environment. If IPv6 network is used, the VM generates a link-local IPv6 address by itself, and + receives a stateful IPv6 address from the DHCPv6 server. + IPv6 is supported only on KVM and XenServer hypervisors. The IPv6 support is only an + experimental feature. + Here's the sequence of events when IPv6 is used: + + + The administrator creates an IPv6 shared network in an advanced zone. + + + The user deploys a VM in an IPv6 shared network. + + + The user VM generates an IPv6 link local address by itself, and gets an IPv6 global or + site local address through DHCPv6. + For information on API changes, see . + + +
+ Prerequisites and Guidelines + Consider the following: + + + CIDR size must be 64 for IPv6 networks. + + + The DHCP client of the guest VMs should support generating DUID based on Link-layer + Address (DUID- LL). DUID-LL derives from the MAC address of guest VMs, and therefore the + user VM can be identified by using DUID. See Dynamic Host Configuration Protocol for IPv6 + for more information. + + + The gateway of the guest network generates Router Advisement and Response messages to + Router Solicitation. The M (Managed Address Configuration) flag of Router Advisement + should enable stateful IP address configuration. Set the M flag to where the end nodes + receive their IPv6 addresses from the DHCPv6 server as opposed to the router or + switch. + + The M flag is the 1-bit Managed Address Configuration flag for Router Advisement. + When set, Dynamic Host Configuration Protocol (DHCPv6) is available for address + configuration in addition to any IPs set by using stateless address + auto-configuration. + + + + Use the System VM template exclusively designed to support IPv6. Download the System + VM template from http://nfs1.lab.vmops.com/templates/routing/debian/ipv6/. + + + The concept of Default Network applies to IPv6 networks. However, unlike IPv4 + &PRODUCT; does not control the routing information of IPv6 in shared network; the choice + of Default Network will not affect the routing in the user VM. + + + In a multiple shared network, the default route is set by the rack router, rather than + the DHCP server, which is out of &PRODUCT; control. Therefore, in order for the user VM to + get only the default route from the default NIC, modify the configuration of the user VM, + and set non-default NIC's accept_ra to 0 explicitly. The + accept_ra parameter accepts Router Advertisements and auto-configure + /proc/sys/net/ipv6/conf/interface with received data. + + +
+
+ Limitations of IPv6 in &PRODUCT; + The following are not yet supported: + + + Security groups + + + Userdata and metadata + + + Passwords + + +
+
+ Guest VM Configuration for DHCPv6 + For the guest VMs to get IPv6 address, run dhclient command manually on each of the VMs. + Use DUID-LL to set up dhclient. + The IPv6 address is lost when a VM is stopped and started. Therefore, use the same procedure + to get an IPv6 address when a VM is stopped and started. + + + Set up dhclient by using DUID-LL. + Perform the following for DHCP Client 4.2 and above: + + + Run the following command on the selected VM to get the dhcpv6 offer from + VR: + dhclient -6 -D LL <dev> + + + Perform the following for DHCP Client 4.1: + + + Open the following to the dhclient configuration file: + vi /etc/dhcp/dhclient.conf + + + Add the following to the dhclient configuration file: + send dhcp6.client-id = concat(00:03:00, hardware); + + + + + Get IPv6 address from DHCP server as part of the system or network restart. + Based on the operating systems, perform the following: + On CentOS 6.2: + + + Open the Ethernet interface configuration file: + vi /etc/sysconfig/network-scripts/ifcfg-eth0 + The ifcfg-eth0 file controls the first NIC in a system. + + + Make the necessary configuration changes, as given below: + DEVICE=eth0 +HWADDR=06:A0:F0:00:00:38 +NM_CONTROLLED=no +ONBOOT=yes +BOOTPROTO=dhcp6 +TYPE=Ethernet +USERCTL=no +PEERDNS=yes +IPV6INIT=yes +DHCPV6C=yes + + + Open the following: + vi /etc/sysconfig/network + + + Make the necessary configuration changes, as given below: + NETWORKING=yes +HOSTNAME=centos62mgmt.lab.vmops.com +NETWORKING_IPV6=yes +IPV6_AUTOCONF=no + + + On Ubuntu 12.10 + + + Open the following: + etc/network/interfaces: + + + Make the necessary configuration changes, as given below: + iface eth0 inet6 dhcp +autoconf 0 +accept_ra 1 + + + + +
+
diff --git a/docs/en-US/management-server-install-multi-node.xml b/docs/en-US/management-server-install-multi-node.xml index 3f011b83b87..21cf28fc719 100644 --- a/docs/en-US/management-server-install-multi-node.xml +++ b/docs/en-US/management-server-install-multi-node.xml @@ -65,8 +65,7 @@ linkend="sect-source-buildrpm"/> or as Repeat these steps on each additional Management Server.
- Be sure to configure a load balancer for the Management Servers. See Management Server - Load Balancing. + Be sure to configure a load balancer for the Management Servers. See .
diff --git a/docs/en-US/management-server-lb.xml b/docs/en-US/management-server-lb.xml index 85a86221c80..9aee1548026 100644 --- a/docs/en-US/management-server-lb.xml +++ b/docs/en-US/management-server-lb.xml @@ -19,7 +19,7 @@ under the License. -->
- Setting Zone VLAN and Running VM Maximums + Management Server Load Balancing &PRODUCT; can use a load balancer to provide a virtual IP for multiple Management Servers. The administrator is responsible for creating the load balancer rules for the Management Servers. The application requires persistence or stickiness across multiple sessions. @@ -58,4 +58,9 @@ + In addition to above settings, the adminstrator is responsible for setting the 'host' global + config value from the management server IP to load balancer virtual IP address. + If the 'host' value is not set to the VIP for Port 8250 and one of your management servers crashes, + the UI is still available but the system VMs will not be able to contact the management server. +
diff --git a/docs/en-US/provisioning-steps-overview.xml b/docs/en-US/provisioning-steps-overview.xml index daf2cfc9d9b..5fb61963b4b 100644 --- a/docs/en-US/provisioning-steps-overview.xml +++ b/docs/en-US/provisioning-steps-overview.xml @@ -26,12 +26,13 @@ After the Management Server is installed and running, you can add the compute resources for it to manage. For an overview of how a &PRODUCT; cloud infrastructure is organized, see . To provision the cloud infrastructure, or to scale it up at any time, follow these procedures: - Add a zone. See . - Add more pods (optional). See . - Add more clusters (optional). See . - Add more hosts (optional). See . - Add primary storage. See . - Add secondary storage. See . + Define regions (optional). See . + Add a zone to the region. See . + Add more pods to the zone (optional). See . + Add more clusters to the pod (optional). See . + Add more hosts to the cluster (optional). See . + Add primary storage to the cluster. See . + Add secondary storage to the zone. See . Initialize and test the new cloud. See . When you have finished these steps, you will have a deployment with the following basic structure: diff --git a/docs/en-US/provisioning-steps.xml b/docs/en-US/provisioning-steps.xml index 8777b02df13..04ece13938e 100644 --- a/docs/en-US/provisioning-steps.xml +++ b/docs/en-US/provisioning-steps.xml @@ -23,8 +23,9 @@ --> Steps to Provisioning Your Cloud Infrastructure - This section tells how to add zones, pods, clusters, hosts, storage, and networks to your cloud. If you are unfamiliar with these entities, please begin by looking through . + This section tells how to add regions, zones, pods, clusters, hosts, storage, and networks to your cloud. If you are unfamiliar with these entities, please begin by looking through . + diff --git a/docs/en-US/region-add.xml b/docs/en-US/region-add.xml new file mode 100644 index 00000000000..960777c0a2e --- /dev/null +++ b/docs/en-US/region-add.xml @@ -0,0 +1,128 @@ + + +%BOOK_ENTITIES; +]> + + + +
+ Adding Regions (optional) + Grouping your cloud resources into geographic regions is an optional step when provisioning the cloud. + For an overview of regions, see . +
+ The First Region: The Default Region + If you do not take action to define regions, then all the zones in your cloud will be + automatically grouped into a single default region. This region is assigned the region + ID of 1. + You can change the name or URL of the default region by using the API command updateRegion. For example: + http://<IP_of_Management_Server>:8080/client/api?command=updateRegion&id=1&name=Northern&endpoint=http://<region_1_IP_address_here>:8080/client&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D +
+
+ Adding a Region + Use these steps to add a second region in addition to the default region. + + Each region has its own &PRODUCT; instance. Therefore, the first step of creating a new region + is to install the Management Server software, on one or more nodes, in the + geographic area where you want to set up the new region. Use the steps in the + Installation guide. When you come to the step where you set up the database, use + the additional command-line flag -r <region_id> to set a + region ID for the new region. The default region is automatically assigned a + region ID of 1, so your first additional region might be region 2. + cloud-setup-databases cloud:<dbpassword>@localhost --deploy-as=root:<password> -e <encryption_type> -m <management_server_key> -k <database_key> -r <region_id> + + By the end of the installation procedure, the Management Server should have been started. Be sure that the Management Server installation was successful and complete. + Add region 2 to region 1. Use the API command addRegion. (For information about how to make an API call, see the Developer's Guide.) + http://<IP_of_region_1_Management_Server>:8080/client/api?command=addRegion&id=2&name=Western&endpoint=http://<region_2_IP_address_here>:8080/client&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D + + Now perform the same command in reverse, adding region 1 to region 2. + http://<IP_of_region_2_Management_Server>:8080/client/api?command=addRegion&id=1&name=Northern&endpoint=http://<region_1_IP_address_here>:8080/client&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D + + Copy the account, user, and domain tables from the region 1 database to the region 2 database. + In the following commands, it is assumed that you have set the root password on the + database, which is a &PRODUCT; recommended best practice. Substitute your own MySQL + root password. + + First, run this command to copy the contents of the database: + # mysqldump -u root -p<mysql_password> -h <region1_db_host> cloud account user domain > region1.sql + + Then run this command to put the data onto the region 2 database: + # mysql -u root -p<mysql_password> -h <region2_db_host> cloud < region1.sql + + + + Remove project accounts. Run these commands on the region 2 database: + mysql> delete from account where type = 5; + + Set the default zone as null: + mysql> update account set default_zone_id = null; + + Restart the Management Servers in region 2. + +
+
+ Adding Third and Subsequent Regions + To add the third region, and subsequent additional regions, the steps are similar to those for adding the second region. + However, you must repeat certain steps additional times for each additional region: + + Install &PRODUCT; in each additional region. Set the region ID for each region during the database setup step. + cloud-setup-databases cloud:<dbpassword>@localhost --deploy-as=root:<password> -e <encryption_type> -m <management_server_key> -k <database_key> -r <region_id> + Once the Management Server is running, add your new region to all existing regions by + repeatedly calling the API command addRegion. For example, if you were adding + region 3: + http://<IP_of_region_1_Management_Server>:8080/client/api?command=addRegion&id=3&name=Eastern&endpoint=http://<region_3_IP_address_here>:8080/client&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D + +http://<IP_of_region_2_Management_Server>:8080/client/api?command=addRegion&id=3&name=Eastern&endpoint=http://<region_3_IP_address_here>:8080/client&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D + Repeat the procedure in reverse to add all existing regions to the new region. For example, + for the third region, add the other two existing regions: + http://<IP_of_region_3_Management_Server>:8080/client/api?command=addRegion&id=1&name=Northern&endpoint=http://<region_1_IP_address_here>:8080/client&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D + +http://<IP_of_region_3_Management_Server>:8080/client/api?command=addRegion&id=2&name=Western&endpoint=http://<region_2_IP_address_here>:8080/client&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D + Copy the account, user, and domain tables from any existing region's database to the new + region's database. + In the following commands, it is assumed that you have set the root password on the + database, which is a &PRODUCT; recommended best practice. Substitute your own MySQL + root password. + + First, run this command to copy the contents of the database: + # mysqldump -u root -p<mysql_password> -h <region1_db_host> cloud account user domain > region1.sql + + Then run this command to put the data onto the new region's database. For example, for region + 3: + # mysql -u root -p<mysql_password> -h <region3_db_host> cloud < region1.sql + + + + Remove project accounts. Run these commands on the region 2 database: + mysql> delete from account where type = 5; + + Set the default zone as null: + mysql> update account set default_zone_id = null; + + Restart the Management Servers in the new region. + +
+
+ Deleting a Region + To delete a region, use the API command removeRegion. Repeat the call to remove the region from all other regions. For example, to remove the 3rd region in a three-region cloud: + http://<IP_of_region_1_Management_Server>:8080/client/api?command=removeRegion&id=3&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D + +http://<IP_of_region_2_Management_Server>:8080/client/api?command=removeRegion&id=3&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D +
+
\ No newline at end of file diff --git a/docs/en-US/response-formats.xml b/docs/en-US/response-formats.xml index 7b3f93ac636..b21f4ab668b 100644 --- a/docs/en-US/response-formats.xml +++ b/docs/en-US/response-formats.xml @@ -25,6 +25,8 @@
Response Formats: XML and JSON CloudStack supports two formats as the response to an API call. The default response is XML. If you would like the response to be in JSON, add &response=json to the Command String. + The two response formats differ in how they handle blank fields. In JSON, if there is no value for a response field, it will not appear in the response. If all the fields were empty, there might be no response at all. + In XML, even if there is no value to be returned, an empty field will be returned as a placeholder XML element. Sample XML Response: <listipaddressesresponse> diff --git a/docs/en-US/sys-reliability-and-ha.xml b/docs/en-US/sys-reliability-and-ha.xml index 94385ff683d..e3c1cd9026f 100644 --- a/docs/en-US/sys-reliability-and-ha.xml +++ b/docs/en-US/sys-reliability-and-ha.xml @@ -25,8 +25,10 @@ System Reliability and High Availability + - + + diff --git a/docs/en-US/whats-new.xml b/docs/en-US/whats-new.xml index 761d7a2eb37..252f87d0543 100644 --- a/docs/en-US/whats-new.xml +++ b/docs/en-US/whats-new.xml @@ -1,5 +1,5 @@ - %BOOK_ENTITIES; ]> @@ -25,6 +25,7 @@
What's New in the API for 4.1 + diff --git a/engine/api/src/org/apache/cloudstack/engine/datacenter/entity/api/StorageEntity.java b/engine/api/src/org/apache/cloudstack/engine/datacenter/entity/api/StorageEntity.java index 2c7f443e567..872931b1c8b 100755 --- a/engine/api/src/org/apache/cloudstack/engine/datacenter/entity/api/StorageEntity.java +++ b/engine/api/src/org/apache/cloudstack/engine/datacenter/entity/api/StorageEntity.java @@ -21,4 +21,5 @@ package org.apache.cloudstack.engine.datacenter.entity.api; import com.cloud.storage.StoragePool; public interface StorageEntity extends DataCenterResourceEntity, StoragePool { + } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/AbstractScope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/AbstractScope.java new file mode 100644 index 00000000000..c94db66b202 --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/AbstractScope.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.engine.subsystem.api.storage; + +public abstract class AbstractScope implements Scope { + @Override + public boolean isSameScope(Scope scope) { + if (this.getScopeType() == scope.getScopeType() && this.getScopeId() == scope.getScopeId()) { + return true; + } else { + return false; + } + } +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ClusterScope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ClusterScope.java index fce7d82cb99..0f0e9581523 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ClusterScope.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ClusterScope.java @@ -19,7 +19,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage; -public class ClusterScope implements Scope { +public class ClusterScope extends AbstractScope { private ScopeType type = ScopeType.CLUSTER; private Long clusterId; private Long podId; diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java index 95e3d0b2ef8..280e02e2a32 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java @@ -34,9 +34,9 @@ public interface DataStoreLifeCycle { public boolean unmanaged(); - public boolean maintain(long storeId); + public boolean maintain(DataStore store); - public boolean cancelMaintain(long storeId); + public boolean cancelMaintain(DataStore store); - public boolean deleteDataStore(long storeId); + public boolean deleteDataStore(DataStore store); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProvider.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProvider.java index d29c4828713..115a52f92ac 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProvider.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProvider.java @@ -19,12 +19,19 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import java.util.Map; +import java.util.Set; public interface DataStoreProvider { - public DataStoreLifeCycle getLifeCycle(); + public static enum DataStoreProviderType { + PRIMARY, + IMAGE + } + public DataStoreLifeCycle getDataStoreLifeCycle(); + public DataStoreDriver getDataStoreDriver(); + public HypervisorHostListener getHostListener(); public String getName(); - public String getUuid(); - public long getId(); public boolean configure(Map params); + public Set getTypes(); + } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java index 94998133196..906720a1f41 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java @@ -20,12 +20,12 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import java.util.List; +import com.cloud.storage.DataStoreProviderApiService; import com.cloud.utils.component.Manager; -public interface DataStoreProviderManager extends Manager { - public DataStoreProvider getDataStoreProviderByUuid(String uuid); - public DataStoreProvider getDataStoreProviderById(long id); +public interface DataStoreProviderManager extends Manager, DataStoreProviderApiService { public DataStoreProvider getDataStoreProvider(String name); public DataStoreProvider getDefaultPrimaryDataStoreProvider(); public List getDataStoreProviders(); + } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HostScope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HostScope.java index 71d1952c625..c5e90ac894c 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HostScope.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HostScope.java @@ -19,7 +19,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage; -public class HostScope implements Scope { +public class HostScope extends AbstractScope { private ScopeType type = ScopeType.HOST; private Long hostId; public HostScope(Long hostId) { diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ImageDataStoreProvider.java similarity index 86% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ImageDataStoreProvider.java index d44a40e971f..1fb987e81cd 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ImageDataStoreProvider.java @@ -16,9 +16,8 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.datastore.provider; +package org.apache.cloudstack.engine.subsystem.api.storage; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; public interface ImageDataStoreProvider extends DataStoreProvider { diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java new file mode 100644 index 00000000000..b2b787cc133 --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.engine.subsystem.api.storage; + +import java.util.Map; + +import com.cloud.storage.Storage.StoragePoolType; + +public class PrimaryDataStoreParameters { + private Long zoneId; + private Long podId; + private Long clusterId; + private String providerName; + private Map details; + private String tags; + private StoragePoolType type; + private String host; + private String path; + private int port; + private String uuid; + private String name; + private String userInfo; + /** + * @return the userInfo + */ + public String getUserInfo() { + return userInfo; + } + + /** + * @param userInfo the userInfo to set + */ + public void setUserInfo(String userInfo) { + this.userInfo = userInfo; + } + + /** + * @return the name + */ + public String getName() { + return name; + } + + /** + * @param name the name to set + */ + public void setName(String name) { + this.name = name; + } + + /** + * @return the uuid + */ + public String getUuid() { + return uuid; + } + + /** + * @param uuid the uuid to set + */ + public void setUuid(String uuid) { + this.uuid = uuid; + } + + /** + * @return the port + */ + public int getPort() { + return port; + } + + /** + * @param port the port to set + */ + public void setPort(int port) { + this.port = port; + } + + /** + * @return the path + */ + public String getPath() { + return path; + } + + /** + * @param path the path to set + */ + public void setPath(String path) { + this.path = path; + } + + /** + * @return the host + */ + public String getHost() { + return host; + } + + /** + * @param host the host to set + */ + public void setHost(String host) { + this.host = host; + } + + /** + * @return the type + */ + public StoragePoolType getType() { + return type; + } + + /** + * @param type the type to set + */ + public void setType(StoragePoolType type) { + this.type = type; + } + + /** + * @return the tags + */ + public String getTags() { + return tags; + } + + /** + * @param tags the tags to set + */ + public void setTags(String tags) { + this.tags = tags; + } + + /** + * @return the details + */ + public Map getDetails() { + return details; + } + + /** + * @param details the details to set + */ + public void setDetails(Map details) { + this.details = details; + } + + /** + * @return the providerName + */ + public String getProviderName() { + return providerName; + } + + /** + * @param providerName the providerName to set + */ + public void setProviderName(String providerName) { + this.providerName = providerName; + } + + /** + * @return the clusterId + */ + public Long getClusterId() { + return clusterId; + } + + /** + * @param clusterId the clusterId to set + */ + public void setClusterId(Long clusterId) { + this.clusterId = clusterId; + } + + /** + * @return the podId + */ + public Long getPodId() { + return podId; + } + + /** + * @param podId the podId to set + */ + public void setPodId(Long podId) { + this.podId = podId; + } + + /** + * @return the zoneId + */ + public Long getZoneId() { + return zoneId; + } + + /** + * @param zoneId the zoneId to set + */ + public void setZoneId(Long zoneId) { + this.zoneId = zoneId; + } +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreProvider.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreProvider.java index b248758bc12..b349ac9ad71 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreProvider.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreProvider.java @@ -14,3 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +package org.apache.cloudstack.engine.subsystem.api.storage; + +public interface PrimaryDataStoreProvider extends DataStoreProvider { +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/Scope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/Scope.java index c1596d4f5f7..91d4734ef15 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/Scope.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/Scope.java @@ -20,5 +20,6 @@ package org.apache.cloudstack.engine.subsystem.api.storage; public interface Scope { public ScopeType getScopeType(); + public boolean isSameScope(Scope scope); public Long getScopeId(); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ZoneScope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ZoneScope.java index ac277af36de..2d3d41f22b5 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ZoneScope.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ZoneScope.java @@ -19,7 +19,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage; -public class ZoneScope implements Scope { +public class ZoneScope extends AbstractScope { private ScopeType type = ScopeType.ZONE; private Long zoneId; diff --git a/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java index 579eaefe329..55b2314f0fd 100644 --- a/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java +++ b/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java @@ -80,8 +80,8 @@ public class StoragePoolVO implements StoragePool{ @Enumerated(value = EnumType.STRING) private StoragePoolStatus status; - @Column(name = "storage_provider_id", updatable = true, nullable = false) - private Long storageProviderId; + @Column(name = "storage_provider_name", updatable = true, nullable = false) + private String storageProviderName; @Column(name = "host_address") private String hostAddress; @@ -180,12 +180,12 @@ public class StoragePoolVO implements StoragePool{ return availableBytes; } - public Long getStorageProviderId() { - return storageProviderId; + public String getStorageProviderName() { + return storageProviderName; } - public void setStorageProviderId(Long provider) { - storageProviderId = provider; + public void setStorageProviderName(String providerName) { + storageProviderName = providerName; } public long getCapacityBytes() { diff --git a/engine/api/test/org/apache/cloudstack/engine/subsystem/api/storage/ScopeTest.java b/engine/api/test/org/apache/cloudstack/engine/subsystem/api/storage/ScopeTest.java new file mode 100644 index 00000000000..e3ec48c74f0 --- /dev/null +++ b/engine/api/test/org/apache/cloudstack/engine/subsystem/api/storage/ScopeTest.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.engine.subsystem.api.storage; + +import static org.junit.Assert.*; +import junit.framework.Assert; + +import org.junit.Test; + +public class ScopeTest { + + @Test + public void testZoneScope() { + ZoneScope zoneScope = new ZoneScope(1L); + ZoneScope zoneScope2 = new ZoneScope(1L); + Assert.assertTrue(zoneScope.isSameScope(zoneScope2)); + + ZoneScope zoneScope3 = new ZoneScope(2L); + Assert.assertFalse(zoneScope.isSameScope(zoneScope3)); + } + + @Test + public void testClusterScope() { + ClusterScope clusterScope = new ClusterScope(1L, 1L, 1L); + ClusterScope clusterScope2 = new ClusterScope(1L, 1L, 1L); + + Assert.assertTrue(clusterScope.isSameScope(clusterScope2)); + + ClusterScope clusterScope3 = new ClusterScope(2L, 2L, 1L); + Assert.assertFalse(clusterScope.isSameScope(clusterScope3)); + } + + @Test + public void testHostScope() { + HostScope hostScope = new HostScope(1L); + HostScope hostScope2 = new HostScope(1L); + HostScope hostScope3 = new HostScope(2L); + + Assert.assertTrue(hostScope.isSameScope(hostScope2)); + Assert.assertFalse(hostScope.isSameScope(hostScope3)); + } + +} diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java index 2771f78e381..bc546f8d0c1 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java @@ -28,7 +28,7 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; -import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageDataStore; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; @@ -57,21 +57,21 @@ public class ImageDataStoreManagerImpl implements ImageDataStoreManager { @Override public ImageDataStore getImageDataStore(long dataStoreId) { ImageDataStoreVO dataStore = dataStoreDao.findById(dataStoreId); - long providerId = dataStore.getProvider(); - ImageDataStoreProvider provider = (ImageDataStoreProvider)providerManager.getDataStoreProviderById(providerId); + String providerName = dataStore.getProviderName(); + ImageDataStoreProvider provider = (ImageDataStoreProvider)providerManager.getDataStoreProvider(providerName); ImageDataStore imgStore = DefaultImageDataStoreImpl.getDataStore(dataStore, - driverMaps.get(provider.getUuid()), provider + driverMaps.get(provider.getName()), provider ); // TODO Auto-generated method stub return imgStore; } @Override - public boolean registerDriver(String uuid, ImageDataStoreDriver driver) { - if (driverMaps.containsKey(uuid)) { + public boolean registerDriver(String providerName, ImageDataStoreDriver driver) { + if (driverMaps.containsKey(providerName)) { return false; } - driverMaps.put(uuid, driver); + driverMaps.put(providerName, driver); return true; } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/AncientImageDataStoreProvider.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/AncientImageDataStoreProvider.java index b2ee9ab853d..2715dc7e0e9 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/AncientImageDataStoreProvider.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/AncientImageDataStoreProvider.java @@ -19,14 +19,18 @@ package org.apache.cloudstack.storage.image.store; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import java.util.UUID; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; -import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreHelper; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; @@ -47,10 +51,9 @@ public class AncientImageDataStoreProvider implements ImageDataStoreProvider { ImageDataStoreManager storeMgr; @Inject ImageDataStoreHelper helper; - long id; - String uuid; + @Override - public DataStoreLifeCycle getLifeCycle() { + public DataStoreLifeCycle getDataStoreLifeCycle() { return lifeCycle; } @@ -59,23 +62,12 @@ public class AncientImageDataStoreProvider implements ImageDataStoreProvider { return this.name; } - @Override - public String getUuid() { - return this.uuid; - } - - @Override - public long getId() { - return this.id; - } - @Override public boolean configure(Map params) { lifeCycle = ComponentContext.inject(DefaultImageDataStoreLifeCycle.class); driver = ComponentContext.inject(AncientImageDataStoreDriverImpl.class); - uuid = (String)params.get("uuid"); - id = (Long)params.get("id"); - storeMgr.registerDriver(uuid, driver); + + storeMgr.registerDriver(this.getName(), driver); Map infos = new HashMap(); String dataStoreName = UUID.nameUUIDFromBytes(this.name.getBytes()).toString(); @@ -83,10 +75,27 @@ public class AncientImageDataStoreProvider implements ImageDataStoreProvider { infos.put("uuid", dataStoreName); infos.put("protocol", "http"); infos.put("scope", ScopeType.GLOBAL); - infos.put("provider", this.getId()); - DataStoreLifeCycle lifeCycle = this.getLifeCycle(); + infos.put("providerName", this.getName()); + DataStoreLifeCycle lifeCycle = this.getDataStoreLifeCycle(); lifeCycle.initialize(infos); return true; } + @Override + public DataStoreDriver getDataStoreDriver() { + return this.driver; + } + + @Override + public HypervisorHostListener getHostListener() { + return null; + } + + @Override + public Set getTypes() { + Set types = new HashSet(); + types.add(DataStoreProviderType.IMAGE); + return types; + } + } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreImpl.java index a6e961a0a83..6eefc6f43f8 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreImpl.java @@ -25,13 +25,13 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageDataStore; import org.apache.cloudstack.storage.image.db.ImageDataStoreVO; diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java index efbb999bdcf..0b5de858819 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java @@ -18,12 +18,16 @@ */ package org.apache.cloudstack.storage.image.store; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; -import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; import org.apache.cloudstack.storage.image.driver.DefaultImageDataStoreDriverImpl; @@ -41,7 +45,7 @@ public class DefaultImageDataStoreProvider implements ImageDataStoreProvider { long id; String uuid; @Override - public DataStoreLifeCycle getLifeCycle() { + public DataStoreLifeCycle getDataStoreLifeCycle() { return lifeCycle; } @@ -50,24 +54,29 @@ public class DefaultImageDataStoreProvider implements ImageDataStoreProvider { return this.name; } - @Override - public String getUuid() { - return this.uuid; - } - - @Override - public long getId() { - return this.id; - } - @Override public boolean configure(Map params) { lifeCycle = ComponentContext.inject(DefaultImageDataStoreLifeCycle.class); driver = ComponentContext.inject(DefaultImageDataStoreDriverImpl.class); - uuid = (String)params.get("uuid"); - id = (Long)params.get("id"); - storeMgr.registerDriver(uuid, driver); + + storeMgr.registerDriver(this.getName(), driver); return true; } + @Override + public Set getTypes() { + Set types = new HashSet(); + types.add(DataStoreProviderType.IMAGE); + return types; + } + + @Override + public DataStoreDriver getDataStoreDriver() { + return this.driver; + } + + @Override + public HypervisorHostListener getHostListener() { + return null; + } } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java index 17aabca3921..ba29c1a14b0 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java @@ -86,27 +86,22 @@ public class DefaultImageDataStoreLifeCycle implements ImageDataStoreLifeCycle { @Override - public boolean maintain(long storeId) { + public boolean maintain(DataStore store) { // TODO Auto-generated method stub return false; } @Override - public boolean cancelMaintain(long storeId) { + public boolean cancelMaintain(DataStore store) { // TODO Auto-generated method stub return false; } @Override - public boolean deleteDataStore(long storeId) { + public boolean deleteDataStore(DataStore store) { // TODO Auto-generated method stub return false; } - - - - - } diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java index 414e2319465..9444fa5246e 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java @@ -133,7 +133,7 @@ public class StorageAllocatorTest { storage.setCapacityBytes(20000); storage.setHostAddress(UUID.randomUUID().toString()); storage.setPath(UUID.randomUUID().toString()); - storage.setStorageProviderId(provider.getId()); + storage.setStorageProviderName(provider.getName()); storage = storagePoolDao.persist(storage); storagePoolId = storage.getId(); @@ -176,7 +176,7 @@ public class StorageAllocatorTest { storage.setCapacityBytes(20000); storage.setHostAddress(UUID.randomUUID().toString()); storage.setPath(UUID.randomUUID().toString()); - storage.setStorageProviderId(provider.getId()); + storage.setStorageProviderName(provider.getName()); StoragePoolVO newStorage = storagePoolDao.persist(storage); newStorageId = newStorage.getId(); diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java index d8d187c0ce5..35a1790a0a9 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java @@ -281,9 +281,9 @@ public class volumeServiceTest extends CloudStackTestNGBase { params.put("port", "1"); params.put("roles", DataStoreRole.Primary.toString()); params.put("uuid", UUID.nameUUIDFromBytes(this.getPrimaryStorageUrl().getBytes()).toString()); - params.put("providerId", String.valueOf(provider.getId())); + params.put("providerName", String.valueOf(provider.getName())); - DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); this.primaryStore = lifeCycle.initialize(params); ClusterScope scope = new ClusterScope(clusterId, podId, dcId); lifeCycle.attachCluster(this.primaryStore, scope); @@ -297,8 +297,8 @@ public class volumeServiceTest extends CloudStackTestNGBase { params.put("uuid", name); params.put("protocol", "http"); params.put("scope", ScopeType.GLOBAL.toString()); - params.put("provider", Long.toString(provider.getId())); - DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + params.put("providerName", name); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); DataStore store = lifeCycle.initialize(params); return store; } @@ -323,9 +323,9 @@ public class volumeServiceTest extends CloudStackTestNGBase { params.put("port", "1"); params.put("roles", DataStoreRole.Primary.toString()); params.put("uuid", UUID.nameUUIDFromBytes(this.getPrimaryStorageUrl().getBytes()).toString()); - params.put("providerId", String.valueOf(provider.getId())); + params.put("providerName", String.valueOf(provider.getName())); - DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); DataStore store = lifeCycle.initialize(params); ClusterScope scope = new ClusterScope(clusterId, podId, dcId); lifeCycle.attachCluster(store, scope); diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java index e70f803ee81..2dc3e255b38 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java @@ -242,16 +242,16 @@ public class PrimaryDataStoreEntityImpl implements StorageEntity { } - @Override - public Long getStorageProviderId() { - // TODO Auto-generated method stub - return null; - } - @Override public boolean isInMaintenance() { // TODO Auto-generated method stub return false; } + @Override + public String getStorageProviderName() { + // TODO Auto-generated method stub + return null; + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java index d1c26e1a272..b3ed0aaab68 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java @@ -26,6 +26,6 @@ public interface PrimaryDataStoreProviderManager { public PrimaryDataStore getPrimaryDataStore(long dataStoreId); public PrimaryDataStore getPrimaryDataStore(String uuid); - boolean registerDriver(String uuid, PrimaryDataStoreDriver driver); - boolean registerHostListener(String uuid, HypervisorHostListener listener); + boolean registerDriver(String providerName, PrimaryDataStoreDriver driver); + boolean registerHostListener(String providerName, HypervisorHostListener listener); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java index 96d2da357f5..91b6c6329bb 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java @@ -18,21 +18,28 @@ */ package org.apache.cloudstack.storage.datastore.provider; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.UUID; +import java.util.Set; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.response.StorageProviderResponse; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider.DataStoreProviderType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao; -import org.apache.cloudstack.storage.datastore.db.DataStoreProviderVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.utils.component.ManagerBase; @Component @@ -44,15 +51,11 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto @Inject DataStoreProviderDao providerDao; protected Map providerMap = new HashMap(); - @Override - public DataStoreProvider getDataStoreProviderByUuid(String uuid) { - return providerMap.get(uuid); - } - + @Inject + PrimaryDataStoreProviderManager primaryDataStoreProviderMgr; @Override public DataStoreProvider getDataStoreProvider(String name) { - DataStoreProviderVO dspv = providerDao.findByName(name); - return providerMap.get(dspv.getUuid()); + return providerMap.get(name); } @Override @@ -60,59 +63,86 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto // TODO Auto-generated method stub return null; } + + public List getPrimayrDataStoreProviders() { + List providers = new ArrayList(); + for (DataStoreProvider provider : providerMap.values()) { + if (provider instanceof PrimaryDataStoreProvider) { + StorageProviderResponse response = new StorageProviderResponse(); + response.setName(provider.getName()); + response.setType(DataStoreProvider.DataStoreProviderType.PRIMARY.toString()); + providers.add(response); + } + } + return providers; + } + + public List getImageDataStoreProviders() { + List providers = new ArrayList(); + for (DataStoreProvider provider : providerMap.values()) { + if (provider instanceof ImageDataStoreProvider) { + StorageProviderResponse response = new StorageProviderResponse(); + response.setName(provider.getName()); + response.setType(DataStoreProvider.DataStoreProviderType.IMAGE.toString()); + providers.add(response); + } + } + return providers; + } @Override public boolean configure(String name, Map params) throws ConfigurationException { Map copyParams = new HashMap(params); - //TODO: hold global lock - List providerVos = providerDao.listAll(); for (DataStoreProvider provider : providers) { - boolean existingProvider = false; - DataStoreProviderVO providerVO = null; - for (DataStoreProviderVO prov : providerVos) { - if (prov.getName().equalsIgnoreCase(provider.getName())) { - existingProvider = true; - providerVO = prov; - break; - } + String providerName = provider.getName(); + if (providerMap.get(providerName) != null) { + s_logger.debug("Failed to register data store provider, provider name: " + providerName + " is not unique"); + return false; } - String uuid = null; - if (!existingProvider) { - uuid = UUID.nameUUIDFromBytes(provider.getName().getBytes()).toString(); - providerVO = new DataStoreProviderVO(); - providerVO.setName(provider.getName()); - providerVO.setUuid(uuid); - providerVO = providerDao.persist(providerVO); - } else { - uuid = providerVO.getUuid(); - } - copyParams.put("uuid", uuid); - copyParams.put("id", providerVO.getId()); - providerMap.put(uuid, provider); + + s_logger.debug("registering data store provider:" + provider.getName()); + + providerMap.put(providerName, provider); try { boolean registrationResult = provider.configure(copyParams); if (!registrationResult) { - providerMap.remove(uuid); + providerMap.remove(providerName); + s_logger.debug("Failed to register data store provider: " + providerName); + return false; + } + + Set types = provider.getTypes(); + if (types.contains(DataStoreProviderType.PRIMARY)) { + primaryDataStoreProviderMgr.registerDriver(provider.getName(), (PrimaryDataStoreDriver)provider.getDataStoreDriver()); + primaryDataStoreProviderMgr.registerHostListener(provider.getName(), provider.getHostListener()); } } catch(Exception e) { s_logger.debug("configure provider failed", e); - providerMap.remove(uuid); + providerMap.remove(providerName); } } return true; } - @Override - public DataStoreProvider getDataStoreProviderById(long id) { - DataStoreProviderVO provider = providerDao.findById(id); - return providerMap.get(provider.getUuid()); - } - @Override public DataStoreProvider getDefaultPrimaryDataStoreProvider() { return this.getDataStoreProvider("ancient primary data store provider"); } + + @Override + public List getDataStoreProviders(String type) { + if (type == null) { + throw new InvalidParameterValueException("Invalid parameter, need to specify type: either primary or image"); + } + if (type.equalsIgnoreCase(DataStoreProvider.DataStoreProviderType.PRIMARY.toString())) { + return this.getPrimayrDataStoreProviders(); + } else if (type.equalsIgnoreCase(DataStoreProvider.DataStoreProviderType.IMAGE.toString())) { + return this.getImageDataStoreProviders(); + } else { + throw new InvalidParameterValueException("Invalid parameter: " + type); + } + } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreHelper.java index ba267af6984..3f1632cf13c 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreHelper.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreHelper.java @@ -34,14 +34,14 @@ public class ImageDataStoreHelper { @Inject ImageDataStoreDao imageStoreDao; public ImageDataStoreVO createImageDataStore(Map params) { - ImageDataStoreVO store = imageStoreDao.findByUuid((String)params.get("uuid")); + ImageDataStoreVO store = imageStoreDao.findByName((String)params.get("name")); if (store != null) { return store; } store = new ImageDataStoreVO(); store.setName((String)params.get("name")); store.setProtocol((String)params.get("protocol")); - store.setProvider((Long)params.get("provider")); + store.setProviderName((String)params.get("providerName")); store.setScope((ScopeType)params.get("scope")); store.setUuid((String)params.get("uuid")); store = imageStoreDao.persist(store); diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataStoreVO.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataStoreVO.java index c7b8e2d1228..4cb402a1271 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataStoreVO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataStoreVO.java @@ -45,8 +45,8 @@ public class ImageDataStoreVO { @Column(name = "protocol", nullable = false) private String protocol; - @Column(name = "image_provider_id", nullable = false) - private long provider; + @Column(name = "image_provider_name", nullable = false) + private String providerName; @Column(name = "data_center_id") private long dcId; @@ -64,16 +64,16 @@ public class ImageDataStoreVO { return this.name; } - public long getProvider() { - return this.provider; + public String getProviderName() { + return this.providerName; } public void setName(String name) { this.name = name; } - public void setProvider(long provider) { - this.provider = provider; + public void setProviderName(String provider) { + this.providerName = provider; } public void setProtocol(String protocol) { diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index c6ca90d1641..5f8daf42bb3 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -18,57 +18,181 @@ */ package org.apache.cloudstack.storage.volume.datastore; +import java.util.List; import java.util.Map; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.alert.AlertManager; +import com.cloud.capacity.Capacity; +import com.cloud.capacity.CapacityVO; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.StoragePoolStatus; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; @Component public class PrimaryDataStoreHelper { + private static final Logger s_logger = Logger + .getLogger(PrimaryDataStoreHelper.class); @Inject private PrimaryDataStoreDao dataStoreDao; - public StoragePoolVO createPrimaryDataStore(Map params) { - StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID((String)params.get("uuid")); + @Inject + DataStoreManager dataStoreMgr; + @Inject + StorageManager storageMgr; + @Inject + protected CapacityDao _capacityDao; + @Inject + protected StoragePoolHostDao storagePoolHostDao; + public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) { + StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID(params.getUuid()); if (dataStoreVO != null) { - throw new CloudRuntimeException("duplicate uuid: " + params.get("uuid")); + throw new CloudRuntimeException("duplicate uuid: " + params.getUuid()); } dataStoreVO = new StoragePoolVO(); - dataStoreVO.setStorageProviderId(Long.parseLong((String)params.get("providerId"))); - dataStoreVO.setHostAddress((String)params.get("server")); - dataStoreVO.setPath((String)params.get("path")); - dataStoreVO.setPoolType((StoragePoolType)params.get("protocol")); - dataStoreVO.setPort(Integer.parseInt((String)params.get("port"))); - dataStoreVO.setName((String)params.get("name")); - dataStoreVO.setUuid((String)params.get("uuid")); - dataStoreVO = dataStoreDao.persist(dataStoreVO); - return dataStoreVO; + dataStoreVO.setStorageProviderName(params.getProviderName()); + dataStoreVO.setHostAddress(params.getHost()); + dataStoreVO.setPath(params.getPath()); + dataStoreVO.setPoolType(params.getType()); + dataStoreVO.setPort(params.getPort()); + dataStoreVO.setName(params.getName()); + dataStoreVO.setUuid(params.getUuid()); + dataStoreVO.setDataCenterId(params.getZoneId()); + dataStoreVO.setPodId(params.getPodId()); + dataStoreVO.setClusterId(params.getClusterId()); + dataStoreVO.setStatus(StoragePoolStatus.Initialized); + dataStoreVO.setUserInfo(params.getUserInfo()); + + Map details = params.getDetails(); + String tags = params.getTags(); + if (tags != null) { + String[] tokens = tags.split(","); + + for (String tag : tokens) { + tag = tag.trim(); + if (tag.length() == 0) { + continue; + } + details.put(tag, "true"); + } + } + + dataStoreVO = dataStoreDao.persist(dataStoreVO, details); + + return dataStoreMgr.getDataStore(dataStoreVO.getId(), DataStoreRole.Primary); } - public boolean deletePrimaryDataStore(long id) { - StoragePoolVO dataStoreVO = dataStoreDao.findById(id); - if (dataStoreVO == null) { - throw new CloudRuntimeException("can't find store: " + id); + public DataStore attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { + StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId()); + if (poolHost == null) { + poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath()); + storagePoolHostDao.persist(poolHost); } - dataStoreDao.remove(id); + + StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); + pool.setScope(scope.getScopeType()); + pool.setAvailableBytes(existingInfo.getAvailableBytes()); + pool.setCapacityBytes(existingInfo.getCapacityBytes()); + pool.setStatus(StoragePoolStatus.Up); + this.dataStoreDao.update(pool.getId(), pool); + this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes()); + return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + } + + public DataStore attachCluster(DataStore store) { + StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); + + storageMgr.createCapacityEntry(pool.getId()); + + pool.setScope(ScopeType.CLUSTER); + pool.setStatus(StoragePoolStatus.Up); + this.dataStoreDao.update(pool.getId(), pool); + return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); + } + + public DataStore attachZone(DataStore store) { + StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); + pool.setScope(ScopeType.ZONE); + pool.setStatus(StoragePoolStatus.Up); + this.dataStoreDao.update(pool.getId(), pool); + return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); + } + + public boolean maintain(DataStore store) { + StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); + pool.setStatus(StoragePoolStatus.Maintenance); + this.dataStoreDao.update(pool.getId(), pool); return true; } - public void attachCluster(DataStore dataStore) { - //send down AttachPrimaryDataStoreCmd command to all the hosts in the cluster - AttachPrimaryDataStoreCmd cmd = new AttachPrimaryDataStoreCmd(dataStore.getUri()); - /*for (EndPoint ep : dataStore.getEndPoints()) { - ep.sendMessage(cmd); - } */ + public boolean cancelMaintain(DataStore store) { + StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); + pool.setStatus(StoragePoolStatus.Up); + dataStoreDao.update(store.getId(), pool); + return true; } + + protected boolean deletePoolStats(Long poolId) { + CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId, + CapacityVO.CAPACITY_TYPE_STORAGE); + CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId, + CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED); + if (capacity1 != null) { + _capacityDao.remove(capacity1.getId()); + } + + if (capacity2 != null) { + _capacityDao.remove(capacity2.getId()); + } + + return true; + } + public boolean deletePrimaryDataStore(DataStore store) { + List hostPoolRecords = this.storagePoolHostDao + .listByPoolId(store.getId()); + StoragePoolVO poolVO = this.dataStoreDao.findById(store.getId()); + Transaction txn = Transaction.currentTxn(); + txn.start(); + for (StoragePoolHostVO host : hostPoolRecords) { + storagePoolHostDao.deleteStoragePoolHostDetails( + host.getHostId(), host.getPoolId()); + } + poolVO.setUuid(null); + this.dataStoreDao.update(poolVO.getId(), poolVO); + dataStoreDao.remove(poolVO.getId()); + deletePoolStats(poolVO.getId()); + // Delete op_host_capacity entries + this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, + null, null, null, poolVO.getId()); + txn.commit(); + + s_logger.debug("Storage pool id=" + poolVO.getId() + + " is removed successfully"); + return true; + } + } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java index fbfade6c6aa..31e6908e28f 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; @@ -48,9 +49,11 @@ import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.component.ComponentContext; @@ -74,6 +77,8 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { protected DataStoreProvider provider; @Inject VMTemplatePoolDao templatePoolDao; + @Inject + StoragePoolHostDao poolHostDao; private VolumeDao volumeDao; @@ -152,6 +157,12 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { vo.getDataCenterId()); } else if (vo.getScope() == ScopeType.ZONE) { return new ZoneScope(vo.getDataCenterId()); + } else if (vo.getScope() == ScopeType.HOST) { + List poolHosts = poolHostDao.listByPoolId(vo.getId()); + if (poolHosts.size() > 0) { + return new HostScope(poolHosts.get(0).getHostId()); + } + s_logger.debug("can't find a local storage in pool host table: " + vo.getId()); } return null; } @@ -320,13 +331,13 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { return this.pdsv.getPodId(); } - @Override - public Long getStorageProviderId() { - return this.pdsv.getStorageProviderId(); - } - @Override public boolean isInMaintenance() { return this.getStatus() == StoragePoolStatus.Maintenance ? true : false; } + + @Override + public String getStorageProviderName() { + return this.pdsv.getStorageProviderName(); + } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java deleted file mode 100644 index 6154a666b24..00000000000 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java +++ /dev/null @@ -1,963 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.datastore.lifecycle; - -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus; -import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; -import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; - -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.CreateStoragePoolCommand; -import com.cloud.agent.api.DeleteStoragePoolCommand; -import com.cloud.agent.api.ModifyStoragePoolCommand; -import com.cloud.agent.api.StoragePoolInfo; -import com.cloud.alert.AlertManager; -import com.cloud.capacity.Capacity; -import com.cloud.capacity.CapacityVO; -import com.cloud.capacity.dao.CapacityDao; -import com.cloud.exception.DiscoveryException; -import com.cloud.exception.InvalidParameterValueException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.Status; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.resource.ResourceManager; -import com.cloud.server.ManagementServer; -import com.cloud.storage.OCFS2Manager; -import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.storage.StorageManager; -import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolDiscoverer; -import com.cloud.storage.StoragePoolHostVO; -import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolWorkVO; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.storage.dao.StoragePoolWorkDao; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.user.Account; -import com.cloud.user.User; -import com.cloud.user.UserContext; -import com.cloud.user.dao.UserDao; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.UriUtils; -import com.cloud.utils.db.DB; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.exception.ExecutionException; -import com.cloud.vm.ConsoleProxyVO; -import com.cloud.vm.DomainRouterVO; -import com.cloud.vm.SecondaryStorageVmVO; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.VirtualMachineManager; -import com.cloud.vm.dao.ConsoleProxyDao; -import com.cloud.vm.dao.DomainRouterDao; -import com.cloud.vm.dao.SecondaryStorageVmDao; -import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.dao.VMInstanceDao; - -public class AncientPrimaryDataStoreLifeCycleImpl implements - PrimaryDataStoreLifeCycle { - private static final Logger s_logger = Logger - .getLogger(AncientPrimaryDataStoreLifeCycleImpl.class); - @Inject - protected ResourceManager _resourceMgr; - protected List _discoverers; - @Inject - PrimaryDataStoreDao primaryDataStoreDao; - @Inject - protected OCFS2Manager _ocfs2Mgr; - @Inject - DataStoreManager dataStoreMgr; - @Inject - AgentManager agentMgr; - @Inject - StorageManager storageMgr; - @Inject - protected CapacityDao _capacityDao; - - @Inject - VolumeDao volumeDao; - @Inject - VMInstanceDao vmDao; - @Inject - ManagementServer server; - @Inject - protected VirtualMachineManager vmMgr; - @Inject - protected SecondaryStorageVmDao _secStrgDao; - @Inject - UserVmDao userVmDao; - @Inject - protected UserDao _userDao; - @Inject - protected DomainRouterDao _domrDao; - @Inject - protected StoragePoolHostDao _storagePoolHostDao; - @Inject - protected AlertManager _alertMgr; - @Inject - protected ConsoleProxyDao _consoleProxyDao; - - @Inject - protected StoragePoolWorkDao _storagePoolWorkDao; - - @Override - public DataStore initialize(Map dsInfos) { - Long clusterId = (Long) dsInfos.get("clusterId"); - Long podId = (Long) dsInfos.get("podId"); - Long zoneId = (Long) dsInfos.get("zoneId"); - String url = (String) dsInfos.get("url"); - Long providerId = (Long)dsInfos.get("providerId"); - if (clusterId != null && podId == null) { - throw new InvalidParameterValueException( - "Cluster id requires pod id"); - } - - URI uri = null; - try { - uri = new URI(UriUtils.encodeURIComponent(url)); - if (uri.getScheme() == null) { - throw new InvalidParameterValueException("scheme is null " - + url + ", add nfs:// as a prefix"); - } else if (uri.getScheme().equalsIgnoreCase("nfs")) { - String uriHost = uri.getHost(); - String uriPath = uri.getPath(); - if (uriHost == null || uriPath == null - || uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) { - throw new InvalidParameterValueException( - "host or path is null, should be nfs://hostname/path"); - } - } else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) { - String uriPath = uri.getPath(); - if (uriPath == null) { - throw new InvalidParameterValueException( - "host or path is null, should be sharedmountpoint://localhost/path"); - } - } else if (uri.getScheme().equalsIgnoreCase("rbd")) { - String uriPath = uri.getPath(); - if (uriPath == null) { - throw new InvalidParameterValueException( - "host or path is null, should be rbd://hostname/pool"); - } - } - } catch (URISyntaxException e) { - throw new InvalidParameterValueException(url - + " is not a valid uri"); - } - - String tags = (String) dsInfos.get("tags"); - Map details = (Map) dsInfos - .get("details"); - if (tags != null) { - String[] tokens = tags.split(","); - - for (String tag : tokens) { - tag = tag.trim(); - if (tag.length() == 0) { - continue; - } - details.put(tag, "true"); - } - } - - String scheme = uri.getScheme(); - String storageHost = uri.getHost(); - String hostPath = uri.getPath(); - Object localStorage = dsInfos.get("localStorage"); - if (localStorage != null) { - hostPath = hostPath.replace("/", ""); - } - String userInfo = uri.getUserInfo(); - int port = uri.getPort(); - StoragePoolVO pool = null; - if (s_logger.isDebugEnabled()) { - s_logger.debug("createPool Params @ scheme - " + scheme - + " storageHost - " + storageHost + " hostPath - " - + hostPath + " port - " + port); - } - if (scheme.equalsIgnoreCase("nfs")) { - if (port == -1) { - port = 2049; - } - pool = new StoragePoolVO(StoragePoolType.NetworkFilesystem, - storageHost, port, hostPath); - } else if (scheme.equalsIgnoreCase("file")) { - if (port == -1) { - port = 0; - } - pool = new StoragePoolVO(StoragePoolType.Filesystem, - "localhost", 0, hostPath); - } else if (scheme.equalsIgnoreCase("sharedMountPoint")) { - pool = new StoragePoolVO(StoragePoolType.SharedMountPoint, - storageHost, 0, hostPath); - } else if (scheme.equalsIgnoreCase("clvm")) { - pool = new StoragePoolVO(StoragePoolType.CLVM, storageHost, 0, - hostPath.replaceFirst("/", "")); - } else if (scheme.equalsIgnoreCase("rbd")) { - if (port == -1) { - port = 6789; - } - pool = new StoragePoolVO(StoragePoolType.RBD, storageHost, - port, hostPath.replaceFirst("/", "")); - pool.setUserInfo(userInfo); - } else if (scheme.equalsIgnoreCase("PreSetup")) { - pool = new StoragePoolVO(StoragePoolType.PreSetup, - storageHost, 0, hostPath); - } else if (scheme.equalsIgnoreCase("iscsi")) { - String[] tokens = hostPath.split("/"); - int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1); - if (port == -1) { - port = 3260; - } - if (lun != -1) { - if (clusterId == null) { - throw new IllegalArgumentException( - "IscsiLUN need to have clusters specified"); - } - hostPath.replaceFirst("/", ""); - pool = new StoragePoolVO(StoragePoolType.IscsiLUN, - storageHost, port, hostPath); - } else { - for (StoragePoolDiscoverer discoverer : _discoverers) { - Map> pools; - try { - pools = discoverer.find(zoneId, podId, uri, details); - } catch (DiscoveryException e) { - throw new IllegalArgumentException( - "Not enough information for discovery " + uri, - e); - } - if (pools != null) { - Map.Entry> entry = pools - .entrySet().iterator().next(); - pool = entry.getKey(); - details = entry.getValue(); - break; - } - } - } - } else if (scheme.equalsIgnoreCase("iso")) { - if (port == -1) { - port = 2049; - } - pool = new StoragePoolVO(StoragePoolType.ISO, storageHost, - port, hostPath); - } else if (scheme.equalsIgnoreCase("vmfs")) { - pool = new StoragePoolVO(StoragePoolType.VMFS, - "VMFS datastore: " + hostPath, 0, hostPath); - } else if (scheme.equalsIgnoreCase("ocfs2")) { - port = 7777; - pool = new StoragePoolVO(StoragePoolType.OCFS2, "clustered", - port, hostPath); - } else { - StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme); - - if (type != null) { - pool = new StoragePoolVO(type, storageHost, - 0, hostPath); - } else { - s_logger.warn("Unable to figure out the scheme for URI: " + uri); - throw new IllegalArgumentException( - "Unable to figure out the scheme for URI: " + uri); - } - } - - if (pool == null) { - s_logger.warn("Unable to figure out the scheme for URI: " + uri); - throw new IllegalArgumentException( - "Unable to figure out the scheme for URI: " + uri); - } - - if (localStorage == null) { - List pools = primaryDataStoreDao - .listPoolByHostPath(storageHost, hostPath); - if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) { - Long oldPodId = pools.get(0).getPodId(); - throw new CloudRuntimeException("Storage pool " + uri - + " already in use by another pod (id=" + oldPodId + ")"); - } - } - - long poolId = primaryDataStoreDao.getNextInSequence(Long.class, "id"); - Object existingUuid = dsInfos.get("uuid"); - String uuid = null; - - if (existingUuid != null) { - uuid = (String)existingUuid; - } else if (scheme.equalsIgnoreCase("sharedmountpoint") - || scheme.equalsIgnoreCase("clvm")) { - uuid = UUID.randomUUID().toString(); - } else if (scheme.equalsIgnoreCase("PreSetup")) { - uuid = hostPath.replace("/", ""); - } else { - uuid = UUID.nameUUIDFromBytes( - new String(storageHost + hostPath).getBytes()).toString(); - } - - List spHandles = primaryDataStoreDao - .findIfDuplicatePoolsExistByUUID(uuid); - if ((spHandles != null) && (spHandles.size() > 0)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Another active pool with the same uuid already exists"); - } - throw new CloudRuntimeException( - "Another active pool with the same uuid already exists"); - } - - String poolName = (String) dsInfos.get("name"); - if (s_logger.isDebugEnabled()) { - s_logger.debug("In createPool Setting poolId - " + poolId - + " uuid - " + uuid + " zoneId - " + zoneId + " podId - " - + podId + " poolName - " + poolName); - } - - pool.setId(poolId); - pool.setUuid(uuid); - pool.setDataCenterId(zoneId); - pool.setPodId(podId); - pool.setName(poolName); - pool.setClusterId(clusterId); - pool.setStorageProviderId(providerId); - pool.setStatus(StoragePoolStatus.Initialized); - pool = primaryDataStoreDao.persist(pool, details); - - return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); - } - - protected boolean createStoragePool(long hostId, StoragePool pool) { - s_logger.debug("creating pool " + pool.getName() + " on host " - + hostId); - if (pool.getPoolType() != StoragePoolType.NetworkFilesystem - && pool.getPoolType() != StoragePoolType.Filesystem - && pool.getPoolType() != StoragePoolType.IscsiLUN - && pool.getPoolType() != StoragePoolType.Iscsi - && pool.getPoolType() != StoragePoolType.VMFS - && pool.getPoolType() != StoragePoolType.SharedMountPoint - && pool.getPoolType() != StoragePoolType.PreSetup - && pool.getPoolType() != StoragePoolType.OCFS2 - && pool.getPoolType() != StoragePoolType.RBD - && pool.getPoolType() != StoragePoolType.CLVM) { - s_logger.warn(" Doesn't support storage pool type " - + pool.getPoolType()); - return false; - } - CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); - final Answer answer = agentMgr.easySend(hostId, cmd); - if (answer != null && answer.getResult()) { - return true; - } else { - primaryDataStoreDao.expunge(pool.getId()); - String msg = ""; - if (answer != null) { - msg = "Can not create storage pool through host " + hostId - + " due to " + answer.getDetails(); - s_logger.warn(msg); - } else { - msg = "Can not create storage pool through host " + hostId - + " due to CreateStoragePoolCommand returns null"; - s_logger.warn(msg); - } - throw new CloudRuntimeException(msg); - } - } - - @Override - public boolean attachCluster(DataStore store, ClusterScope scope) { - PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store; - // Check if there is host up in this cluster - List allHosts = _resourceMgr.listAllUpAndEnabledHosts( - Host.Type.Routing, primarystore.getClusterId(), - primarystore.getPodId(), primarystore.getDataCenterId()); - if (allHosts.isEmpty()) { - throw new CloudRuntimeException( - "No host up to associate a storage pool with in cluster " - + primarystore.getClusterId()); - } - - if (primarystore.getPoolType() == StoragePoolType.OCFS2 - && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) { - s_logger.warn("Can not create storage pool " + primarystore - + " on cluster " + primarystore.getClusterId()); - primaryDataStoreDao.expunge(primarystore.getId()); - return false; - } - - boolean success = false; - for (HostVO h : allHosts) { - success = createStoragePool(h.getId(), primarystore); - if (success) { - break; - } - } - - s_logger.debug("In createPool Adding the pool to each of the hosts"); - List poolHosts = new ArrayList(); - for (HostVO h : allHosts) { - try { - this.storageMgr.connectHostToSharedPool(h.getId(), - primarystore.getId()); - poolHosts.add(h); - } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + h - + " and " + primarystore, e); - } - } - - if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool " + primarystore - + " on cluster " + primarystore.getClusterId()); - primaryDataStoreDao.expunge(primarystore.getId()); - return false; - } else { - storageMgr.createCapacityEntry(primarystore.getId()); - } - StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId()); - pool.setScope(ScopeType.CLUSTER); - pool.setStatus(StoragePoolStatus.Up); - this.primaryDataStoreDao.update(pool.getId(), pool); - return true; - } - - @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope) { - List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); - for (HostVO host : hosts) { - try { - this.storageMgr.connectHostToSharedPool(host.getId(), - dataStore.getId()); - } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + host - + " and " + dataStore, e); - } - } - StoragePoolVO pool = this.primaryDataStoreDao.findById(dataStore.getId()); - - pool.setScope(ScopeType.ZONE); - pool.setStatus(StoragePoolStatus.Up); - this.primaryDataStoreDao.update(pool.getId(), pool); - return true; - } - - @Override - public boolean dettach() { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean unmanaged() { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean maintain(long storeId) { - Long userId = UserContext.current().getCallerUserId(); - User user = _userDao.findById(userId); - Account account = UserContext.current().getCaller(); - StoragePoolVO pool = this.primaryDataStoreDao.findById(storeId); - try { - StoragePool storagePool = (StoragePool) this.dataStoreMgr - .getDataStore(storeId, DataStoreRole.Primary); - List hosts = _resourceMgr.listHostsInClusterByStatus( - pool.getClusterId(), Status.Up); - if (hosts == null || hosts.size() == 0) { - pool.setStatus(StoragePoolStatus.Maintenance); - primaryDataStoreDao.update(pool.getId(), pool); - return true; - } else { - // set the pool state to prepare for maintenance - pool.setStatus(StoragePoolStatus.PrepareForMaintenance); - primaryDataStoreDao.update(pool.getId(), pool); - } - // remove heartbeat - for (HostVO host : hosts) { - ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand( - false, storagePool); - final Answer answer = agentMgr.easySend(host.getId(), cmd); - if (answer == null || !answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool false failed due to " - + ((answer == null) ? "answer null" : answer - .getDetails())); - } - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool false secceeded"); - } - } - } - // check to see if other ps exist - // if they do, then we can migrate over the system vms to them - // if they dont, then just stop all vms on this one - List upPools = primaryDataStoreDao - .listByStatusInZone(pool.getDataCenterId(), - StoragePoolStatus.Up); - boolean restart = true; - if (upPools == null || upPools.size() == 0) { - restart = false; - } - - // 2. Get a list of all the ROOT volumes within this storage pool - List allVolumes = this.volumeDao.findByPoolId(pool - .getId()); - - // 3. Enqueue to the work queue - for (VolumeVO volume : allVolumes) { - VMInstanceVO vmInstance = vmDao - .findById(volume.getInstanceId()); - - if (vmInstance == null) { - continue; - } - - // enqueue sp work - if (vmInstance.getState().equals(State.Running) - || vmInstance.getState().equals(State.Starting) - || vmInstance.getState().equals(State.Stopping)) { - - try { - StoragePoolWorkVO work = new StoragePoolWorkVO( - vmInstance.getId(), pool.getId(), false, false, - server.getId()); - _storagePoolWorkDao.persist(work); - } catch (Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Work record already exists, re-using by re-setting values"); - } - StoragePoolWorkVO work = _storagePoolWorkDao - .findByPoolIdAndVmId(pool.getId(), - vmInstance.getId()); - work.setStartedAfterMaintenance(false); - work.setStoppedForMaintenance(false); - work.setManagementServerId(server.getId()); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // 4. Process the queue - List pendingWork = _storagePoolWorkDao - .listPendingWorkForPrepareForMaintenanceByPoolId(pool - .getId()); - - for (StoragePoolWorkVO work : pendingWork) { - // shut down the running vms - VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); - - if (vmInstance == null) { - continue; - } - - // if the instance is of type consoleproxy, call the console - // proxy - if (vmInstance.getType().equals( - VirtualMachine.Type.ConsoleProxy)) { - // call the consoleproxymanager - ConsoleProxyVO consoleProxy = _consoleProxyDao - .findById(vmInstance.getId()); - if (!vmMgr.advanceStop(consoleProxy, true, user, account)) { - String errorMsg = "There was an error stopping the console proxy id: " - + vmInstance.getId() - + " ,cannot enable storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - if (restart) { - - if (this.vmMgr.advanceStart(consoleProxy, null, user, - account) == null) { - String errorMsg = "There was an error starting the console proxy id: " - + vmInstance.getId() - + " on another storage pool, cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - } else { - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // if the instance is of type uservm, call the user vm manager - if (vmInstance.getType().equals(VirtualMachine.Type.User)) { - UserVmVO userVm = userVmDao.findById(vmInstance.getId()); - if (!vmMgr.advanceStop(userVm, true, user, account)) { - String errorMsg = "There was an error stopping the user vm id: " - + vmInstance.getId() - + " ,cannot enable storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type secondary storage vm, call the - // secondary storage vm manager - if (vmInstance.getType().equals( - VirtualMachine.Type.SecondaryStorageVm)) { - SecondaryStorageVmVO secStrgVm = _secStrgDao - .findById(vmInstance.getId()); - if (!vmMgr.advanceStop(secStrgVm, true, user, account)) { - String errorMsg = "There was an error stopping the ssvm id: " - + vmInstance.getId() - + " ,cannot enable storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - if (restart) { - if (vmMgr.advanceStart(secStrgVm, null, user, account) == null) { - String errorMsg = "There was an error starting the ssvm id: " - + vmInstance.getId() - + " on another storage pool, cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - } else { - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // if the instance is of type domain router vm, call the network - // manager - if (vmInstance.getType().equals( - VirtualMachine.Type.DomainRouter)) { - DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); - if (!vmMgr.advanceStop(domR, true, user, account)) { - String errorMsg = "There was an error stopping the domain router id: " - + vmInstance.getId() - + " ,cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - if (restart) { - if (vmMgr.advanceStart(domR, null, user, account) == null) { - String errorMsg = "There was an error starting the domain router id: " - + vmInstance.getId() - + " on another storage pool, cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - } else { - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - } - - // 5. Update the status - pool.setStatus(StoragePoolStatus.Maintenance); - this.primaryDataStoreDao.update(pool.getId(), pool); - - return true; - } catch (Exception e) { - s_logger.error( - "Exception in enabling primary storage maintenance:", e); - setPoolStateToError(pool); - throw new CloudRuntimeException(e.getMessage()); - } - } - - private void setPoolStateToError(StoragePoolVO primaryStorage) { - primaryStorage.setStatus(StoragePoolStatus.ErrorInMaintenance); - this.primaryDataStoreDao.update(primaryStorage.getId(), primaryStorage); - } - - @Override - public boolean cancelMaintain(long storageId) { - // Change the storage state back to up - Long userId = UserContext.current().getCallerUserId(); - User user = _userDao.findById(userId); - Account account = UserContext.current().getCaller(); - StoragePoolVO poolVO = this.primaryDataStoreDao - .findById(storageId); - StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore( - storageId, DataStoreRole.Primary); - poolVO.setStatus(StoragePoolStatus.Up); - primaryDataStoreDao.update(storageId, poolVO); - - List hosts = _resourceMgr.listHostsInClusterByStatus( - pool.getClusterId(), Status.Up); - if (hosts == null || hosts.size() == 0) { - return true; - } - // add heartbeat - for (HostVO host : hosts) { - ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand( - true, pool); - final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd); - if (answer == null || !answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool add failed due to " - + ((answer == null) ? "answer null" : answer - .getDetails())); - } - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool add secceeded"); - } - } - } - - // 2. Get a list of pending work for this queue - List pendingWork = _storagePoolWorkDao - .listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId()); - - // 3. work through the queue - for (StoragePoolWorkVO work : pendingWork) { - try { - VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); - - if (vmInstance == null) { - continue; - } - - // if the instance is of type consoleproxy, call the console - // proxy - if (vmInstance.getType().equals( - VirtualMachine.Type.ConsoleProxy)) { - - ConsoleProxyVO consoleProxy = _consoleProxyDao - .findById(vmInstance.getId()); - if (vmMgr.advanceStart(consoleProxy, null, user, account) == null) { - String msg = "There was an error starting the console proxy id: " - + vmInstance.getId() - + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type ssvm, call the ssvm manager - if (vmInstance.getType().equals( - VirtualMachine.Type.SecondaryStorageVm)) { - SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance - .getId()); - if (vmMgr.advanceStart(ssVm, null, user, account) == null) { - String msg = "There was an error starting the ssvm id: " - + vmInstance.getId() - + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type ssvm, call the ssvm manager - if (vmInstance.getType().equals( - VirtualMachine.Type.DomainRouter)) { - DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); - if (vmMgr.advanceStart(domR, null, user, account) == null) { - String msg = "There was an error starting the domR id: " - + vmInstance.getId() - + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type user vm, call the user vm manager - if (vmInstance.getType().equals(VirtualMachine.Type.User)) { - UserVmVO userVm = userVmDao.findById(vmInstance.getId()); - - if (vmMgr.advanceStart(userVm, null, user, account) == null) { - - String msg = "There was an error starting the user vm id: " - + vmInstance.getId() - + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } catch (Exception e) { - s_logger.debug("Failed start vm", e); - throw new CloudRuntimeException(e.toString()); - } - } - return true; - } - - @DB - @Override - public boolean deleteDataStore(long storeId) { - // for the given pool id, find all records in the storage_pool_host_ref - List hostPoolRecords = this._storagePoolHostDao - .listByPoolId(storeId); - StoragePoolVO poolVO = this.primaryDataStoreDao.findById(storeId); - StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(storeId, DataStoreRole.Primary); - boolean deleteFlag = false; - Transaction txn = Transaction.currentTxn(); - try { - // if not records exist, delete the given pool (base case) - if (hostPoolRecords.size() == 0) { - - txn.start(); - poolVO.setUuid(null); - this.primaryDataStoreDao.update(poolVO.getId(), poolVO); - primaryDataStoreDao.remove(poolVO.getId()); - deletePoolStats(poolVO.getId()); - txn.commit(); - - deleteFlag = true; - return true; - } else { - // Remove the SR associated with the Xenserver - for (StoragePoolHostVO host : hostPoolRecords) { - DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand( - pool); - final Answer answer = agentMgr.easySend(host.getHostId(), - deleteCmd); - - if (answer != null && answer.getResult()) { - deleteFlag = true; - break; - } - } - } - } finally { - if (deleteFlag) { - // now delete the storage_pool_host_ref and storage_pool records - txn.start(); - for (StoragePoolHostVO host : hostPoolRecords) { - _storagePoolHostDao.deleteStoragePoolHostDetails( - host.getHostId(), host.getPoolId()); - } - poolVO.setUuid(null); - this.primaryDataStoreDao.update(poolVO.getId(), poolVO); - primaryDataStoreDao.remove(poolVO.getId()); - deletePoolStats(poolVO.getId()); - // Delete op_host_capacity entries - this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, - null, null, null, poolVO.getId()); - txn.commit(); - - s_logger.debug("Storage pool id=" + poolVO.getId() - + " is removed successfully"); - return true; - } else { - // alert that the storage cleanup is required - s_logger.warn("Failed to Delete storage pool id: " + poolVO.getId()); - _alertMgr - .sendAlert(AlertManager.ALERT_TYPE_STORAGE_DELETE, - poolVO.getDataCenterId(), poolVO.getPodId(), - "Unable to delete storage pool id= " + poolVO.getId(), - "Delete storage pool command failed. Please check logs."); - } - } - return false; - } - - @DB - private boolean deletePoolStats(Long poolId) { - CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId, - CapacityVO.CAPACITY_TYPE_STORAGE); - CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId, - CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED); - Transaction txn = Transaction.currentTxn(); - txn.start(); - if (capacity1 != null) { - _capacityDao.remove(capacity1.getId()); - } - - if (capacity2 != null) { - _capacityDao.remove(capacity2.getId()); - } - - txn.commit(); - return true; - } - - @Override - public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { - StoragePoolHostVO poolHost = _storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId()); - if (poolHost == null) { - poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath()); - _storagePoolHostDao.persist(poolHost); - } - - StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId()); - pool.setScope(scope.getScopeType()); - pool.setAvailableBytes(existingInfo.getAvailableBytes()); - pool.setCapacityBytes(existingInfo.getCapacityBytes()); - pool.setStatus(StoragePoolStatus.Up); - this.primaryDataStoreDao.update(pool.getId(), pool); - this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes()); - - return true; - } - -} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java index 5e8727a316a..fea02e8d1ed 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java @@ -60,8 +60,8 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif @Override public DataStore initialize(Map dsInfos) { - StoragePoolVO storeVO = primaryStoreHelper.createPrimaryDataStore(dsInfos); - return providerMgr.getPrimaryDataStore(storeVO.getId()); + DataStore store = primaryStoreHelper.createPrimaryDataStore(null); + return providerMgr.getPrimaryDataStore(store.getId()); } protected void attachCluster(DataStore store) { @@ -113,26 +113,6 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif return false; } - @Override - public boolean maintain(long storeId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean cancelMaintain(long storeId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean deleteDataStore(long storeId) { - // TODO Auto-generated method stub - return false; - } - - - @Override public boolean attachZone(DataStore dataStore, ZoneScope scope) { // TODO Auto-generated method stub @@ -146,4 +126,22 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif return false; } + @Override + public boolean maintain(DataStore store) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean cancelMaintain(DataStore store) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean deleteDataStore(DataStore store) { + // TODO Auto-generated method stub + return false; + } + } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java index e181adabb5b..e38c3b306fa 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java @@ -57,18 +57,18 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt @Override public PrimaryDataStore getPrimaryDataStore(long dataStoreId) { StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStoreId); - long providerId = dataStoreVO.getStorageProviderId(); - DataStoreProvider provider = providerManager.getDataStoreProviderById(providerId); - DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, driverMaps.get(provider.getUuid()), provider); + String providerName = dataStoreVO.getStorageProviderName(); + DataStoreProvider provider = providerManager.getDataStoreProvider(providerName); + DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, driverMaps.get(provider.getName()), provider); return dataStore; } @Override - public boolean registerDriver(String uuid, PrimaryDataStoreDriver driver) { - if (driverMaps.get(uuid) != null) { + public boolean registerDriver(String providerName, PrimaryDataStoreDriver driver) { + if (driverMaps.get(providerName) != null) { return false; } - driverMaps.put(uuid, driver); + driverMaps.put(providerName, driver); return true; } @@ -79,7 +79,7 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt } @Override - public boolean registerHostListener(String uuid, HypervisorHostListener listener) { - return storageMgr.registerHostListener(uuid, listener); + public boolean registerHostListener(String providerName, HypervisorHostListener listener) { + return storageMgr.registerHostListener(providerName, listener); } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java index a1402c13b3d..46fa738e294 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java @@ -16,24 +16,29 @@ // under the License. package org.apache.cloudstack.storage.datastore.provider; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider.DataStoreProviderType; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl; import org.apache.cloudstack.storage.datastore.lifecycle.DefaultPrimaryDataStoreLifeCycleImpl; -import org.springframework.stereotype.Component; import com.cloud.utils.component.ComponentContext; -@Component + public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider { private final String providerName = "default primary data store provider"; protected PrimaryDataStoreDriver driver; + protected HypervisorHostListener listener; @Inject PrimaryDataStoreProviderManager storeMgr; @@ -46,7 +51,7 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv } @Override - public DataStoreLifeCycle getLifeCycle() { + public DataStoreLifeCycle getDataStoreLifeCycle() { return this.lifecyle; } @@ -54,22 +59,25 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv public boolean configure(Map params) { lifecyle = ComponentContext.inject(DefaultPrimaryDataStoreLifeCycleImpl.class); driver = ComponentContext.inject(DefaultPrimaryDataStoreDriverImpl.class); - HypervisorHostListener listener = ComponentContext.inject(DefaultHostListener.class); - uuid = (String)params.get("uuid"); - id = (Long)params.get("id"); - storeMgr.registerDriver(uuid, this.driver); - storeMgr.registerHostListener(uuid, listener); + listener = ComponentContext.inject(DefaultHostListener.class); return true; } @Override - public String getUuid() { - return this.uuid; + public PrimaryDataStoreDriver getDataStoreDriver() { + return this.driver; } @Override - public long getId() { - return this.id; + public HypervisorHostListener getHostListener() { + return this.listener; + } + + @Override + public Set getTypes() { + Set types = new HashSet(); + types.add(DataStoreProviderType.PRIMARY); + return types; } } diff --git a/engine/storage/volume/test/org/apache/cloudstack/storage/volume/test/ConfiguratorTest.java b/engine/storage/volume/test/org/apache/cloudstack/storage/volume/test/ConfiguratorTest.java index 829694bd753..122c3532a09 100644 --- a/engine/storage/volume/test/org/apache/cloudstack/storage/volume/test/ConfiguratorTest.java +++ b/engine/storage/volume/test/org/apache/cloudstack/storage/volume/test/ConfiguratorTest.java @@ -28,7 +28,7 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; diff --git a/packaging/centos63/cloud.spec b/packaging/centos63/cloud.spec index 16c36020b45..893691c2f3a 100644 --- a/packaging/centos63/cloud.spec +++ b/packaging/centos63/cloud.spec @@ -33,8 +33,8 @@ Release: %{_rel}%{dist} %endif Version: %{_ver} License: ASL 2.0 -Vendor: Apache CloudStack -Packager: Apache CloudStack +Vendor: Apache CloudStack +Packager: Apache CloudStack Group: System Environment/Libraries # FIXME do groups for every single one of the subpackages Source0: %{name}-%{_maventag}.tgz @@ -165,7 +165,14 @@ echo Doing CloudStack build cp packaging/centos63/replace.properties build/replace.properties echo VERSION=%{_maventag} >> build/replace.properties echo PACKAGE=%{name} >> build/replace.properties -mvn -P awsapi package -Dsystemvm + +if [ "%{_ossnoss}" == "NONOSS" -o "%{_ossnoss}" == "nonoss" ] ; then + echo "Packaging nonoss components" + mvn -Pawsapi,systemvm -Dnonoss package +else + echo "Packaging oss components" + mvn -Pawsapi package -Dsystemvm +fi %install [ ${RPM_BUILD_ROOT} != "/" ] && rm -rf ${RPM_BUILD_ROOT} @@ -283,22 +290,35 @@ cp -r cloud-cli/cloudtool ${RPM_BUILD_ROOT}%{_libdir}/python2.6/site-packages/ install cloud-cli/cloudapis/cloud.py ${RPM_BUILD_ROOT}%{_libdir}/python2.6/site-packages/cloudapis.py # AWS API -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/bridge +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/setup -cp -r awsapi/target/cloud-awsapi-%{_maventag}/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/bridge +cp -r awsapi/target/cloud-awsapi-%{_maventag}/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi install -D awsapi-setup/setup/cloud-setup-bridge ${RPM_BUILD_ROOT}%{_bindir}/cloudstack-setup-bridge install -D awsapi-setup/setup/cloudstack-aws-api-register ${RPM_BUILD_ROOT}%{_bindir}/cloudstack-aws-api-register cp -r awsapi-setup/db/mysql/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/setup +for name in applicationContext.xml cloud-bridge.properties commons-logging.properties crypto.properties xes.keystore ec2-service.properties ; do + mv ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/classes/$name \ + ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/$name +done + +#Don't package the below for AWS API +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/classes/db.properties +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/classes/LICENSE.txt +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/classes/log4j.properties +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/classes/log4j-vmops.xml +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/classes/META-INF +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/classes/NOTICE.txt +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/classes/services.xml + %clean [ ${RPM_BUILD_ROOT} != "/" ] && rm -rf ${RPM_BUILD_ROOT} - %preun management -/sbin/service cloud-management stop || true +/sbin/service cloudstack-management stop || true if [ "$1" == "0" ] ; then - /sbin/chkconfig --del cloud-management > /dev/null 2>&1 || true - /sbin/service cloud-management stop > /dev/null 2>&1 || true + /sbin/chkconfig --del cloudstack-management > /dev/null 2>&1 || true + /sbin/service cloudstack-management stop > /dev/null 2>&1 || true fi %pre management @@ -315,8 +335,8 @@ rm -rf %{_localstatedir}/cache/cloud %post management if [ "$1" == "1" ] ; then - /sbin/chkconfig --add cloud-management > /dev/null 2>&1 || true - /sbin/chkconfig --level 345 cloud-management on > /dev/null 2>&1 || true + /sbin/chkconfig --add cloudstack-management > /dev/null 2>&1 || true + /sbin/chkconfig --level 345 cloudstack-management on > /dev/null 2>&1 || true fi if [ -d "%{_datadir}/%{name}-management" ] ; then @@ -370,6 +390,12 @@ fi %config(noreplace) %{_sysconfdir}/%{name}/management/tomcat-users.xml %config(noreplace) %{_sysconfdir}/%{name}/management/web.xml %config(noreplace) %{_sysconfdir}/%{name}/management/environment.properties +%config(noreplace) %{_sysconfdir}/%{name}/management/applicationContext.xml +%config(noreplace) %{_sysconfdir}/%{name}/management/cloud-bridge.properties +%config(noreplace) %{_sysconfdir}/%{name}/management/commons-logging.properties +%config(noreplace) %{_sysconfdir}/%{name}/management/ec2-service.properties +%config(noreplace) %{_sysconfdir}/%{name}/management/crypto.properties +%config(noreplace) %{_sysconfdir}/%{name}/management/xes.keystore %attr(0755,root,root) %{_initrddir}/%{name}-management %attr(0755,root,root) %{_bindir}/%{name}-setup-management %attr(0755,root,root) %{_bindir}/%{name}-update-xenserver-licenses @@ -443,7 +469,7 @@ fi %files awsapi %defattr(0644,cloud,cloud,0755) -%{_datadir}/%{name}-bridge/webapps/bridge +%{_datadir}/%{name}-bridge/webapps/awsapi %attr(0644,root,root) %{_datadir}/%{name}-bridge/setup/* %attr(0755,root,root) %{_bindir}/cloudstack-aws-api-register %attr(0755,root,root) %{_bindir}/cloudstack-setup-bridge diff --git a/packaging/centos63/cloudstack-agent.te b/packaging/centos63/cloudstack-agent.te new file mode 100644 index 00000000000..4259e173a46 --- /dev/null +++ b/packaging/centos63/cloudstack-agent.te @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +module cloudstack-agent 1.0; + +require { + type nfs_t; + type system_conf_t; + type mount_t; + type qemu_t; + class file unlink; + class filesystem getattr; +} + +#============= mount_t ============== +allow mount_t system_conf_t:file unlink; + +#============= qemu_t ============== +allow qemu_t nfs_t:filesystem getattr; diff --git a/packaging/centos63/package.sh b/packaging/centos63/package.sh index 2515ecba11f..802cf7e5e0b 100755 --- a/packaging/centos63/package.sh +++ b/packaging/centos63/package.sh @@ -16,30 +16,130 @@ # specific language governing permissions and limitations # under the License. -CWD=`pwd` -RPMDIR=$CWD/../../dist/rpmbuild -PACK_PROJECT=cloudstack +function usage() { + echo "" + echo "usage: ./package.sh [-p|--pack] [-h|--help] [ARGS]" + echo "" + echo "oss|OSS To package oss components only" + echo "nonoss|NONOSS To package non-oss and oss components" + echo "" + echo "Examples: ./package.sh -p|--pack oss|OSS" + echo " ./package.sh -p|--pack nonoss|NONOSS" + echo " ./package.sh (Default OSS)" + echo "" + exit 1 +} + +function defaultPackaging() { + CWD=`pwd` + RPMDIR=$CWD/../../dist/rpmbuild + PACK_PROJECT=cloudstack + VERSION=`(cd ../../; mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version) | grep '^[0-9]\.'` + + if echo $VERSION | grep SNAPSHOT ; then + REALVER=`echo $VERSION | cut -d '-' -f 1` + DEFVER="-D_ver $REALVER" + DEFPRE="-D_prerelease 1" + DEFREL="-D_rel SNAPSHOT" + else + DEFVER="-D_ver $REALVER" + DEFPRE= + DEFREL= + fi + mkdir -p $RPMDIR/SPECS + mkdir -p $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION + + (cd ../../; tar -c --exclude .git --exclude dist . | tar -C $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION -x) + (cd $RPMDIR/SOURCES/; tar -czf $PACK_PROJECT-$VERSION.tgz $PACK_PROJECT-$VERSION) + + cp cloud.spec $RPMDIR/SPECS + + (cd $RPMDIR; rpmbuild -ba SPECS/cloud.spec "-D_topdir $RPMDIR" "$DEFVER" "$DEFREL" "$DEFPRE") + + exit +} + +function packaging() { + CWD=`pwd` + RPMDIR=$CWD/../../dist/rpmbuild + PACK_PROJECT=cloudstack + DEFOSSNOSS="-D_ossnoss $packageval" + VERSION=`(cd ../../; mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version) | grep '^[0-9]\.'` + + if echo $VERSION | grep SNAPSHOT ; then + REALVER=`echo $VERSION | cut -d '-' -f 1` + DEFVER="-D_ver $REALVER" + DEFPRE="-D_prerelease 1" + DEFREL="-D_rel SNAPSHOT" + else + DEFVER="-D_ver $REALVER" + DEFPRE= + DEFREL= + fi + + mkdir -p $RPMDIR/SPECS + mkdir -p $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION + + (cd ../../; tar -c --exclude .git --exclude dist . | tar -C $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION -x ) + (cd $RPMDIR/SOURCES/; tar -czf $PACK_PROJECT-$VERSION.tgz $PACK_PROJECT-$VERSION) + + cp cloud.spec $RPMDIR/SPECS + + (cd $RPMDIR; rpmbuild -ba SPECS/cloud.spec "-D_topdir $RPMDIR" "$DEFVER" "$DEFREL" "$DEFPRE" "$DEFOSSNOSS") + + exit +} -VERSION=`(cd ../../; mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version) | grep '^[0-9]\.'` -if echo $VERSION | grep SNAPSHOT ; then - REALVER=`echo $VERSION | cut -d '-' -f 1` - DEFVER="-D_ver $REALVER" - DEFPRE="-D_prerelease 1" - DEFREL="-D_rel SNAPSHOT" +if [ $# -lt 1 ] ; then + defaultPackaging +elif [ $# -gt 0 ] ; then + SHORTOPTS="hp:" + LONGOPTS="help,pack:" + ARGS=$(getopt -s bash -u -a --options $SHORTOPTS --longoptions $LONGOPTS --name $0 -- "$@" ) + + eval set -- "$ARGS" + + while [ $# -gt 0 ] ; do + case "$1" in + -h | --help) + usage + exit 0 + ;; + -p | --pack) + echo "Packaging Apache CloudStack" + packageval=$2 + if [ "$packageval" == "oss" -o "$packageval" == "OSS" ] ; then + defaultPackaging + elif [ "$packageval" == "nonoss" -o "$packageval" == "NONOSS" ] ; then + packaging + else + echo "Error: Incorrect usage. See help ./package.sh --help|-h." + exit 1 + fi + ;; + -) + echo "Unrecognized option." + usage + exit 1 + ;; + --) + echo "Unrecognized option." + usage + exit 1 + ;; + -*) + echo "Unrecognized option." + usage + exit 1 + ;; + *) + shift + break + ;; + esac + done else - DEFVER="-D_ver $REALVER" - DEFPRE= - DEFREL= + echo "Incorrect choice. Nothing to do." >&2 + echo "./package.sh --help for details" fi - -mkdir -p $RPMDIR/SPECS -mkdir -p $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION - - -(cd ../../; tar -c --exclude .git --exclude dist . | tar -C $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION -x ) -(cd $RPMDIR/SOURCES/; tar -czf $PACK_PROJECT-$VERSION.tgz $PACK_PROJECT-$VERSION) - -cp cloud.spec $RPMDIR/SPECS - -(cd $RPMDIR; rpmbuild -ba SPECS/cloud.spec "-D_topdir $RPMDIR" "$DEFVER" "$DEFREL" "$DEFPRE") diff --git a/packaging/debian/init/cloud-management b/packaging/debian/init/cloud-management index 490bf1e8e68..69a0428db21 100755 --- a/packaging/debian/init/cloud-management +++ b/packaging/debian/init/cloud-management @@ -31,11 +31,11 @@ ### END INIT INFO PATH=/bin:/usr/bin:/sbin:/usr/sbin -NAME=cloud-management +NAME=cloudstack-management DESC="CloudStack-specific Tomcat servlet engine" DAEMON=/usr/bin/jsvc -CATALINA_HOME=/usr/share/cloud/management -DEFAULT=/etc/cloud/management/tomcat6.conf +CATALINA_HOME=/usr/share/cloudstack/management +DEFAULT=/etc/cloudstack/management/tomcat6.conf JVM_TMP=/tmp/$NAME-temp # We have to explicitly set the HOME variable to the homedir from the user "cloud" @@ -76,7 +76,7 @@ done export JAVA_HOME # Directory for per-instance configuration files and webapps -CATALINA_BASE=/usr/share/cloud/management +CATALINA_BASE=/usr/share/cloudstack/management # Use the Java security manager? (yes/no) TOMCAT6_SECURITY=no diff --git a/packaging/debian/replace.properties b/packaging/debian/replace.properties index fee1defd357..8705c78a8f7 100644 --- a/packaging/debian/replace.properties +++ b/packaging/debian/replace.properties @@ -59,4 +59,4 @@ SYSTEMJARS= USAGECLASSPATH= USAGELOG=/var/log/cloudstack/usage USAGESYSCONFDIR=/etc/cloudstack/usage -PACKAGE=cloud +PACKAGE=cloudstack diff --git a/patches/systemvm/debian/config/etc/init.d/cloud-early-config b/patches/systemvm/debian/config/etc/init.d/cloud-early-config index 1f0b9a7a187..2b99c5b6cf3 100755 --- a/patches/systemvm/debian/config/etc/init.d/cloud-early-config +++ b/patches/systemvm/debian/config/etc/init.d/cloud-early-config @@ -390,7 +390,28 @@ setup_common() { then ip route add default via $GW dev eth0 else - ip route add default via $GW dev $3 + timer=0 + #default route add fails if we run before interface configured with ip + while true + do + ip=$(ifconfig $3 | grep "inet addr:" | awk '{print $2}' | awk -F: '{print $2}') + if [ -z $ip ] + then + sleep 1; + #waiting for the interface to setup with ip + echo "waiting for $3 interface setup with ip" + else + ip route add default via $GW dev $3 + break + fi + + if [ $timer -gt 5 ] + then + echo "interface $3 is not set up with ip... configuring default route failed" + break + fi + timer=`expr $timer + 1` + done fi fi diff --git a/plugins/alert-handlers/snmp-alerts/pom.xml b/plugins/alert-handlers/snmp-alerts/pom.xml new file mode 100644 index 00000000000..b5cebf31b7a --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/pom.xml @@ -0,0 +1,45 @@ + + + + cloudstack-plugins + org.apache.cloudstack + 4.2.0-SNAPSHOT + ../../pom.xml + + 4.0.0 + Apache CloudStack Plugin - SNMP Alerts + cloud-plugin-snmp-alerts + + + + org.apache.servicemix.bundles + org.apache.servicemix.bundles.snmp4j + 2.1.0_1 + + + log4j + log4j + ${cs.log4j.version} + + + + diff --git a/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/CsSnmpConstants.java b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/CsSnmpConstants.java new file mode 100644 index 00000000000..36970a958fd --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/CsSnmpConstants.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +/** + *

+ * IMPORTANT + *

+ * These OIDs are based on CS-ROOT-MIB MIB file. If there is any change in MIB file + * then that should be reflected in this file also * + *

+ * suffix 2 due to conflict with SnmpConstants class of snmp4j + */ +public class CsSnmpConstants { + public static final String CLOUDSTACK = "1.3.6.1.4.1.18060.15"; + + public static final String OBJECTS_PREFIX = CLOUDSTACK + ".1.1."; + + public static final String TRAPS_PREFIX = CLOUDSTACK + ".1.2.0."; + + public static final String DATA_CENTER_ID = OBJECTS_PREFIX + 1; + + public static final String POD_ID = OBJECTS_PREFIX + 2; + + public static final String CLUSTER_ID = OBJECTS_PREFIX + 3; + + public static final String MESSAGE = OBJECTS_PREFIX + 4; + + public static final String GENERATION_TIME = OBJECTS_PREFIX + 5; +} \ No newline at end of file diff --git a/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayout.java b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayout.java new file mode 100644 index 00000000000..67420915607 --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayout.java @@ -0,0 +1,107 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +import org.apache.log4j.EnhancedPatternLayout; +import org.apache.log4j.spi.LoggingEvent; + +import java.util.Date; +import java.util.StringTokenizer; + +public class SnmpEnhancedPatternLayout extends EnhancedPatternLayout { + private String _pairDelimiter = "//"; + private String _keyValueDelimiter = "::"; + + private static final int LENGTH_OF_STRING_MESSAGE_AND_KEY_VALUE_DELIMITER = 9; + private static final int LENGTH_OF_STRING_MESSAGE = 8; + + public String getKeyValueDelimeter() { + return _keyValueDelimiter; + } + + public void setKeyValueDelimiter(String keyValueDelimiter) { + this._keyValueDelimiter = keyValueDelimiter; + } + + public String getPairDelimiter() { + return _pairDelimiter; + } + + public void setPairDelimiter(String pairDelimiter) { + this._pairDelimiter = pairDelimiter; + } + + public SnmpTrapInfo parseEvent(LoggingEvent event) { + SnmpTrapInfo snmpTrapInfo = null; + + final String message = event.getRenderedMessage(); + if (message.contains("alertType") && message.contains("message")) { + snmpTrapInfo = new SnmpTrapInfo(); + final StringTokenizer messageSplitter = new StringTokenizer(message, _pairDelimiter); + while (messageSplitter.hasMoreTokens()) { + final String pairToken = messageSplitter.nextToken(); + final StringTokenizer pairSplitter = new StringTokenizer(pairToken, _keyValueDelimiter); + String keyToken; + String valueToken; + + if (pairSplitter.hasMoreTokens()) { + keyToken = pairSplitter.nextToken().trim(); + } else { + break; + } + + if (pairSplitter.hasMoreTokens()) { + valueToken = pairSplitter.nextToken().trim(); + } else { + break; + } + + if (keyToken.equalsIgnoreCase("alertType") && !valueToken.equalsIgnoreCase("null")) { + snmpTrapInfo.setAlertType(Short.parseShort(valueToken)); + } else if (keyToken.equalsIgnoreCase("dataCenterId") && !valueToken.equalsIgnoreCase("null")) { + snmpTrapInfo.setDataCenterId(Long.parseLong(valueToken)); + } else if (keyToken.equalsIgnoreCase("podId") && !valueToken.equalsIgnoreCase("null")) { + snmpTrapInfo.setPodId(Long.parseLong(valueToken)); + } else if (keyToken.equalsIgnoreCase("clusterId") && !valueToken.equalsIgnoreCase("null")) { + snmpTrapInfo.setClusterId(Long.parseLong(valueToken)); + } else if (keyToken.equalsIgnoreCase("message") && !valueToken.equalsIgnoreCase("null")) { + snmpTrapInfo.setMessage(getSnmpMessage(message)); + } + } + + snmpTrapInfo.setGenerationTime(new Date(event.getTimeStamp())); + } + return snmpTrapInfo; + } + + private String getSnmpMessage(String message) { + int lastIndexOfKeyValueDelimiter = message.lastIndexOf(_keyValueDelimiter); + int lastIndexOfMessageInString = message.lastIndexOf("message"); + + if (lastIndexOfKeyValueDelimiter - lastIndexOfMessageInString <= + LENGTH_OF_STRING_MESSAGE_AND_KEY_VALUE_DELIMITER) { + return message.substring(lastIndexOfKeyValueDelimiter + _keyValueDelimiter.length()).trim(); + } else if (lastIndexOfMessageInString < lastIndexOfKeyValueDelimiter) { + return message.substring( + lastIndexOfMessageInString + _keyValueDelimiter.length() + LENGTH_OF_STRING_MESSAGE).trim(); + } + + return message.substring(message.lastIndexOf("message" + _keyValueDelimiter) + + LENGTH_OF_STRING_MESSAGE_AND_KEY_VALUE_DELIMITER).trim(); + } +} \ No newline at end of file diff --git a/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpHelper.java b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpHelper.java new file mode 100644 index 00000000000..4bee94bd9d0 --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpHelper.java @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.snmp4j.CommunityTarget; +import org.snmp4j.PDU; +import org.snmp4j.Snmp; +import org.snmp4j.mp.SnmpConstants; +import org.snmp4j.smi.OID; +import org.snmp4j.smi.OctetString; +import org.snmp4j.smi.UdpAddress; +import org.snmp4j.smi.UnsignedInteger32; +import org.snmp4j.smi.VariableBinding; +import org.snmp4j.transport.DefaultUdpTransportMapping; + +import java.io.IOException; + +public class SnmpHelper { + private Snmp _snmp; + private CommunityTarget _target; + + public SnmpHelper(String address, String community) { + _target = new CommunityTarget(); + _target.setCommunity(new OctetString(community)); + _target.setVersion(SnmpConstants.version2c); + _target.setAddress(new UdpAddress(address)); + try { + _snmp = new Snmp(new DefaultUdpTransportMapping()); + } catch (IOException e) { + _snmp = null; + throw new CloudRuntimeException(" Error in crearting snmp object, " + e.getMessage()); + } + } + + public void sendSnmpTrap(SnmpTrapInfo snmpTrapInfo) { + try { + if (_snmp != null) { + _snmp.send(createPDU(snmpTrapInfo), _target, null, null); + } + } catch (IOException e) { + throw new CloudRuntimeException(" Error in sending SNMP Trap, " + e.getMessage()); + } + } + + private PDU createPDU(SnmpTrapInfo snmpTrapInfo) { + PDU trap = new PDU(); + trap.setType(PDU.TRAP); + + int alertType = snmpTrapInfo.getAlertType() + 1; + if (alertType > 0) { + trap.add(new VariableBinding(SnmpConstants.snmpTrapOID, getOID(CsSnmpConstants.TRAPS_PREFIX + alertType))); + if (snmpTrapInfo.getDataCenterId() != 0) { + trap.add(new VariableBinding(getOID(CsSnmpConstants.DATA_CENTER_ID), + new UnsignedInteger32(snmpTrapInfo.getDataCenterId()))); + } + + if (snmpTrapInfo.getPodId() != 0) { + trap.add(new VariableBinding(getOID(CsSnmpConstants.POD_ID), new UnsignedInteger32(snmpTrapInfo + .getPodId()))); + } + + if (snmpTrapInfo.getClusterId() != 0) { + trap.add(new VariableBinding(getOID(CsSnmpConstants.CLUSTER_ID), new UnsignedInteger32(snmpTrapInfo + .getClusterId()))); + } + + if (snmpTrapInfo.getMessage() != null) { + trap.add(new VariableBinding(getOID(CsSnmpConstants.MESSAGE), new OctetString(snmpTrapInfo.getMessage + ()))); + } else { + throw new CloudRuntimeException(" What is the use of alert without message "); + } + + if (snmpTrapInfo.getGenerationTime() != null) { + trap.add(new VariableBinding(getOID(CsSnmpConstants.GENERATION_TIME), + new OctetString(snmpTrapInfo.getGenerationTime().toString()))); + } else { + trap.add(new VariableBinding(getOID(CsSnmpConstants.GENERATION_TIME))); + } + } else { + throw new CloudRuntimeException(" Invalid alert Type "); + } + + return trap; + } + + private OID getOID(String oidString) { + return new OID(oidString); + } +} \ No newline at end of file diff --git a/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapAppender.java b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapAppender.java new file mode 100644 index 00000000000..eaa4a132b7e --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapAppender.java @@ -0,0 +1,207 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +import com.cloud.utils.net.NetUtils; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.spi.ErrorCode; +import org.apache.log4j.spi.LoggingEvent; + +import java.util.ArrayList; +import java.util.List; +import java.util.StringTokenizer; + +public class SnmpTrapAppender extends AppenderSkeleton { + private String _delimiter = ","; + private String _snmpManagerIpAddresses; + private String _snmpManagerPorts; + private String _snmpManagerCommunities; + + private String _oldSnmpManagerIpAddresses = null; + private String _oldSnmpManagerPorts = null; + private String _oldSnmpManagerCommunities = null; + + private List _ipAddresses = null; + private List _communities = null; + private List _ports = null; + + List _snmpHelpers = new ArrayList(); + + @Override + protected void append(LoggingEvent event) { + SnmpEnhancedPatternLayout snmpEnhancedPatternLayout; + + if (getLayout() == null) { + errorHandler.error("No layout set for the Appender named [" + getName() + ']', null, + ErrorCode.MISSING_LAYOUT); + return; + } + + if (getLayout() instanceof SnmpEnhancedPatternLayout) { + snmpEnhancedPatternLayout = (SnmpEnhancedPatternLayout) getLayout(); + } else { + return; + } + + if (!isAsSevereAsThreshold(event.getLevel())) { + return; + } + + SnmpTrapInfo snmpTrapInfo = snmpEnhancedPatternLayout.parseEvent(event); + + if (snmpTrapInfo != null && !_snmpHelpers.isEmpty()) { + for (SnmpHelper helper : _snmpHelpers) { + try { + helper.sendSnmpTrap(snmpTrapInfo); + } catch (Exception e) { + errorHandler.error(e.getMessage()); + } + } + } + } + + void setSnmpHelpers() { + if (_snmpManagerIpAddresses == null || _snmpManagerIpAddresses.trim().isEmpty() || _snmpManagerCommunities == + null || _snmpManagerCommunities.trim().isEmpty() || _snmpManagerPorts == null || + _snmpManagerPorts.trim().isEmpty()) { + reset(); + return; + } + + if (_oldSnmpManagerIpAddresses != null && _oldSnmpManagerIpAddresses.equals(_snmpManagerIpAddresses) && + _oldSnmpManagerCommunities.equals(_snmpManagerCommunities) && + _oldSnmpManagerPorts.equals(_snmpManagerPorts)) { + return; + } + + _oldSnmpManagerIpAddresses = _snmpManagerIpAddresses; + _oldSnmpManagerPorts = _snmpManagerPorts; + _oldSnmpManagerCommunities = _snmpManagerCommunities; + + _ipAddresses = parse(_snmpManagerIpAddresses); + _communities = parse(_snmpManagerCommunities); + _ports = parse(_snmpManagerPorts); + + if (!(_ipAddresses.size() == _communities.size() && _ipAddresses.size() == _ports.size())) { + reset(); + errorHandler.error(" size of ip addresses , communities, " + "and ports list doesn't match, " + + "setting all to null"); + return; + } + + if (!validateIpAddresses() || !validatePorts()) { + reset(); + errorHandler.error(" Invalid format for the IP Addresses or Ports parameter "); + return; + } + + String address; + + for (int i = 0; i < _ipAddresses.size(); i++) { + address = _ipAddresses.get(i) + "/" + _ports.get(i); + try { + _snmpHelpers.add(new SnmpHelper(address, _communities.get(i))); + } catch (Exception e) { + errorHandler.error(e.getMessage()); + } + } + } + + private void reset() { + _ipAddresses = null; + _communities = null; + _ports = null; + _snmpHelpers.clear(); + } + + @Override + public void close() { + if (!closed) closed = true; + } + + @Override + public boolean requiresLayout() { + return true; + } + + private List parse(String str) { + List result = new ArrayList(); + + final StringTokenizer tokenizer = new StringTokenizer(str, _delimiter); + while (tokenizer.hasMoreTokens()) { + result.add(tokenizer.nextToken().trim()); + } + return result; + } + + private boolean validatePorts() { + for (String port : _ports) { + if (!NetUtils.isValidPort(port)) { + return false; + } + } + return true; + } + + private boolean validateIpAddresses() { + for (String ipAddress : _ipAddresses) { + if (ipAddress.trim().equalsIgnoreCase("localhost")) { + continue; + } + if (!NetUtils.isValidIp(ipAddress)) { + return false; + } + } + return true; + } + + public String getSnmpManagerIpAddresses() { + return _snmpManagerIpAddresses; + } + + public void setSnmpManagerIpAddresses(String snmpManagerIpAddresses) { + this._snmpManagerIpAddresses = snmpManagerIpAddresses; + setSnmpHelpers(); + } + + public String getSnmpManagerPorts() { + return _snmpManagerPorts; + } + + public void setSnmpManagerPorts(String snmpManagerPorts) { + this._snmpManagerPorts = snmpManagerPorts; + setSnmpHelpers(); + } + + public String getSnmpManagerCommunities() { + return _snmpManagerCommunities; + } + + public void setSnmpManagerCommunities(String snmpManagerCommunities) { + this._snmpManagerCommunities = snmpManagerCommunities; + setSnmpHelpers(); + } + + public String getDelimiter() { + return _delimiter; + } + + public void setDelimiter(String delimiter) { + this._delimiter = delimiter; + } +} \ No newline at end of file diff --git a/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapInfo.java b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapInfo.java new file mode 100644 index 00000000000..71bfee02cb6 --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapInfo.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +import java.util.Date; + +public class SnmpTrapInfo { + private String message; + private long podId; + private long dataCenterId; + private long clusterId; + private Date generationTime; + private short alertType; + + public SnmpTrapInfo() { + } + + public SnmpTrapInfo(short alertType, long dataCenterId, long podId, long clusterId, String message, + Date generationTime) { + this.podId = podId; + this.alertType = alertType; + this.clusterId = clusterId; + this.dataCenterId = dataCenterId; + this.generationTime = generationTime; + this.message = message; + } + + public short getAlertType() { + return alertType; + } + + public void setAlertType(short alertType) { + this.alertType = alertType; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public long getPodId() { + return podId; + } + + public void setPodId(long podId) { + this.podId = podId; + } + + public long getDataCenterId() { + return dataCenterId; + } + + public void setDataCenterId(long dataCenterId) { + this.dataCenterId = dataCenterId; + } + + public long getClusterId() { + return clusterId; + } + + public void setClusterId(long clusterId) { + this.clusterId = clusterId; + } + + public Date getGenerationTime() { + return generationTime; + } + + public void setGenerationTime(Date generationTime) { + this.generationTime = generationTime; + } +} \ No newline at end of file diff --git a/plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayoutTest.java b/plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayoutTest.java new file mode 100644 index 00000000000..b903a1e18b9 --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayoutTest.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +import org.apache.log4j.spi.LoggingEvent; +import org.junit.Before; +import org.junit.Test; + +import javax.naming.ConfigurationException; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertNotNull; +import static junit.framework.Assert.assertNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SnmpEnhancedPatternLayoutTest { + SnmpEnhancedPatternLayout _snmpEnhancedPatternLayout = new SnmpEnhancedPatternLayout(); + + @Before + public void setUp() throws ConfigurationException { + _snmpEnhancedPatternLayout.setKeyValueDelimiter("::"); + _snmpEnhancedPatternLayout.setPairDelimiter("//"); + } + + @Test + public void parseAlertTest() { + LoggingEvent event = mock(LoggingEvent.class); + setMessage(" alertType:: 14 // dataCenterId:: 1 // podId:: 1 // " + "clusterId:: null // message:: Management" + + " network CIDR is not configured originally. Set it default to 10.102.192.0/22", event); + SnmpTrapInfo info = _snmpEnhancedPatternLayout.parseEvent(event); + commonAssertions(info, "Management network CIDR is not configured originally. Set it default to 10.102.192" + + ".0/22"); + } + + @Test + public void ParseAlertWithPairDelimeterInMessageTest() { + LoggingEvent event = mock(LoggingEvent.class); + setMessage(" alertType:: 14 // dataCenterId:: 1 // podId:: 1 // " + "clusterId:: null // message:: Management" + + " //network CIDR is not configured originally. Set it default to 10.102.192.0/22", event); + SnmpTrapInfo info = _snmpEnhancedPatternLayout.parseEvent(event); + commonAssertions(info, "Management //network CIDR is not configured originally. Set it default to 10.102.192" + + ".0/22"); + } + + @Test + public void ParseAlertWithKeyValueDelimeterInMessageTest() { + LoggingEvent event = mock(LoggingEvent.class); + setMessage(" alertType:: 14 // dataCenterId:: 1 // podId:: 1 // " + "clusterId:: null // message:: Management" + + " ::network CIDR is not configured originally. Set it default to 10.102.192.0/22", event); + SnmpTrapInfo info = _snmpEnhancedPatternLayout.parseEvent(event); + commonAssertions(info, "Management ::network CIDR is not configured originally. Set it default to 10.102.192" + + ".0/22"); + } + + @Test + public void parseRandomTest() { + LoggingEvent event = mock(LoggingEvent.class); + when(event.getRenderedMessage()).thenReturn("Problem clearing email alert"); + assertNull(" Null value was expected ", _snmpEnhancedPatternLayout.parseEvent(event)); + } + + private void commonAssertions(SnmpTrapInfo info, String message) { + assertEquals(" alert type not as expected ", 14, info.getAlertType()); + assertEquals(" data center id not as expected ", 1, info.getDataCenterId()); + assertEquals(" pod id os not as expected ", 1, info.getPodId()); + assertEquals(" cluster id is not as expected ", 0, info.getClusterId()); + assertNotNull(" generation time is set to null", info.getGenerationTime()); + assertEquals(" message is not as expected ", message, info.getMessage()); + } + + private void setMessage(String message, LoggingEvent event) { + when(event.getRenderedMessage()).thenReturn(message); + } +} \ No newline at end of file diff --git a/plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpTrapAppenderTest.java b/plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpTrapAppenderTest.java new file mode 100644 index 00000000000..2a65d90acc2 --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpTrapAppenderTest.java @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +import org.apache.log4j.spi.LoggingEvent; +import org.junit.Test; +import org.mockito.Mock; + +import java.util.List; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertTrue; +import static org.mockito.Mockito.mock; + +public class SnmpTrapAppenderTest { + SnmpTrapAppender _appender = new SnmpTrapAppender(); + LoggingEvent _event = mock(LoggingEvent.class); + SnmpEnhancedPatternLayout _snmpEnhancedPatternLayout = mock(SnmpEnhancedPatternLayout.class); + @Mock + List snmpHelpers; + + @Test + public void appendTest() { + _appender.setSnmpManagerIpAddresses("10.1.1.1,10.1.1.2"); + _appender.setSnmpManagerPorts("162,164"); + _appender.setSnmpManagerCommunities("public,snmp"); + + _appender.setSnmpHelpers(); + assertEquals(" error snmpHelper list size not as expected ", _appender._snmpHelpers.size(), 2); + } + + @Test + public void InvalidInputTest() { + _appender.setSnmpManagerIpAddresses("10.1.1.1,10.1.1.2"); + _appender.setSnmpManagerPorts("162,164"); + _appender.setSnmpManagerCommunities("public"); + + _appender.setSnmpHelpers(); + assertTrue(" list was expected to be empty", _appender._snmpHelpers.isEmpty()); + } + + @Test + public void InvalidIpInputTest() { + _appender.setSnmpManagerIpAddresses("10.1.1,10.1.1.2"); + _appender.setSnmpManagerPorts("162,164"); + _appender.setSnmpManagerCommunities("public,snmp"); + + _appender.setSnmpHelpers(); + assertTrue(" list was expected to be empty", _appender._snmpHelpers.isEmpty()); + } + + @Test + public void InvalidPortInputTest() { + _appender.setSnmpManagerIpAddresses("10.1.1,10.1.1.2"); + _appender.setSnmpManagerPorts("162,164897489978"); + _appender.setSnmpManagerCommunities("public,snmp"); + + _appender.setSnmpHelpers(); + assertTrue(" list was expected to be empty", _appender._snmpHelpers.isEmpty()); + } + + @Test + public void mismatchListLengthInputTest() { + _appender.setSnmpManagerIpAddresses("10.1.1"); + _appender.setSnmpManagerPorts("162,164"); + _appender.setSnmpManagerCommunities("public,snmp"); + + _appender.setSnmpHelpers(); + assertTrue(" list was expected to be empty", _appender._snmpHelpers.isEmpty()); + } +} \ No newline at end of file diff --git a/plugins/hypervisors/baremetal/resources/security_group_agent/setup.py b/plugins/hypervisors/baremetal/resources/security_group_agent/setup.py index 2de41d265ba..384e04d6a96 100755 --- a/plugins/hypervisors/baremetal/resources/security_group_agent/setup.py +++ b/plugins/hypervisors/baremetal/resources/security_group_agent/setup.py @@ -30,7 +30,7 @@ security group agent for CloudStack Baremetal""", keywords='security group cloudstack', author='Frank Zhang', author_email='frank.zhang@citrix.com', - url='http://incubator.apache.org/cloudstack/', + url='http://cloudstack.apache.org', license='Apache License 2', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), include_package_data=True, diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 8ee3ea4e200..f786f886bf1 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -2751,17 +2751,74 @@ ServerResource { return stats; } + protected String VPCNetworkUsage(final String privateIpAddress, final String publicIp, + final String option, final String vpcCIDR) { + Script getUsage = new Script(_routerProxyPath, s_logger); + getUsage.add("vpc_netusage.sh"); + getUsage.add(privateIpAddress); + getUsage.add("-l", publicIp); + + if (option.equals("get")) { + getUsage.add("-g"); + } else if (option.equals("create")) { + getUsage.add("-c"); + getUsage.add("-v", vpcCIDR); + } else if (option.equals("reset")) { + getUsage.add("-r"); + } else if (option.equals("vpn")) { + getUsage.add("-n"); + } else if (option.equals("remove")) { + getUsage.add("-d"); + } + + final OutputInterpreter.OneLineParser usageParser = new OutputInterpreter.OneLineParser(); + String result = getUsage.execute(usageParser); + if (result != null) { + s_logger.debug("Failed to execute VPCNetworkUsage:" + result); + return null; + } + return usageParser.getLine(); + } + + protected long[] getVPCNetworkStats(String privateIP, String publicIp, String option) { + String result = VPCNetworkUsage(privateIP, publicIp, option, null); + long[] stats = new long[2]; + if (result != null) { + String[] splitResult = result.split(":"); + int i = 0; + while (i < splitResult.length - 1) { + stats[0] += (new Long(splitResult[i++])).longValue(); + stats[1] += (new Long(splitResult[i++])).longValue(); + } + } + return stats; + } + private Answer execute(NetworkUsageCommand cmd) { - if (cmd.getOption() != null && cmd.getOption().equals("create")) { - String result = networkUsage(cmd.getPrivateIP(), "create", null); - NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, result, 0L, - 0L); + if (cmd.isForVpc()) { + if (cmd.getOption() != null && cmd.getOption().equals("create")) { + String result = VPCNetworkUsage(cmd.getPrivateIP(),cmd.getGatewayIP(), "create", cmd.getVpcCIDR()); + NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, result, 0L, 0L); + return answer; + } else if (cmd.getOption() != null && (cmd.getOption().equals("get") || cmd.getOption().equals("vpn"))) { + long[] stats = getVPCNetworkStats(cmd.getPrivateIP(), cmd.getGatewayIP(), cmd.getOption()); + NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, "", stats[0], stats[1]); + return answer; + } else { + String result = VPCNetworkUsage(cmd.getPrivateIP(),cmd.getGatewayIP(), cmd.getOption(), cmd.getVpcCIDR()); + NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, result, 0L, 0L); + return answer; + } + } else { + if (cmd.getOption() != null && cmd.getOption().equals("create")) { + String result = networkUsage(cmd.getPrivateIP(), "create", null); + NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, result, 0L, 0L); + return answer; + } + long[] stats = getNetworkStats(cmd.getPrivateIP()); + NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, "", stats[0], stats[1]); return answer; } - long[] stats = getNetworkStats(cmd.getPrivateIP()); - NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, "", stats[0], - stats[1]); - return answer; } private Answer execute(RebootCommand cmd) { @@ -3044,7 +3101,7 @@ ServerResource { ConsoleDef console = new ConsoleDef("pty", null, null, (short) 0); devices.addDevice(console); - GraphicDef grap = new GraphicDef("vnc", (short) 0, true, vmTO.getVncAddr(), null, + GraphicDef grap = new GraphicDef("vnc", (short) 0, true, null, null, null); devices.addDevice(grap); @@ -3502,7 +3559,7 @@ ServerResource { localStoragePool.getUuid(), cmd.getPrivateIpAddress(), _localStoragePath, _localStoragePath, StoragePoolType.Filesystem, localStoragePool.getCapacity(), - localStoragePool.getUsed()); + localStoragePool.getAvailable()); sscmd = new StartupStorageCommand(); sscmd.setPoolInfo(pi); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java index 5437e7c69b0..d32a6fdae46 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java @@ -39,6 +39,8 @@ public interface KVMStoragePool { public long getUsed(); + public long getAvailable(); + public boolean refresh(); public boolean isExternalSnapshot(); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index d5e6ad6fe00..ca5da5cf683 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@ -351,6 +351,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { pool.refresh(); pool.setCapacity(storage.getInfo().capacity); pool.setUsed(storage.getInfo().allocation); + pool.setAvailable(storage.getInfo().available); return pool; } catch (LibvirtException e) { @@ -483,6 +484,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { pool.setCapacity(sp.getInfo().capacity); pool.setUsed(sp.getInfo().allocation); + pool.setAvailable(sp.getInfo().available); return pool; } catch (LibvirtException e) { diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java index 32f8ce99d9c..1396097a1a7 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java @@ -28,6 +28,7 @@ public class LibvirtStoragePool implements KVMStoragePool { protected String uri; protected long capacity; protected long used; + protected long available; protected String name; protected String localPath; protected PhysicalDiskFormat defaultFormat; @@ -48,6 +49,7 @@ public class LibvirtStoragePool implements KVMStoragePool { this._storageAdaptor = adaptor; this.capacity = 0; this.used = 0; + this.available = 0; this._pool = pool; } @@ -65,11 +67,19 @@ public class LibvirtStoragePool implements KVMStoragePool { this.used = used; } + public void setAvailable(long available) { + this.available = available; + } + @Override public long getUsed() { return this.used; } + public long getAvailable() { + return this.available; + } + public StoragePoolType getStoragePoolType() { return this.type; } diff --git a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java index 018f2f5330e..39e36d65c65 100644 --- a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java +++ b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java @@ -19,13 +19,13 @@ package com.cloud.hypervisor.kvm.resource; -import org.junit.Test; import com.cloud.agent.api.to.VirtualMachineTO; -import com.cloud.hypervisor.kvm.resource.LibvirtVMDef; import com.cloud.template.VirtualMachineTemplate.BootloaderType; import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachine.Type; +import org.junit.Test; + import java.util.Random; + import static org.junit.Assert.assertEquals; public class LibvirtComputingResourceTest { @@ -54,7 +54,7 @@ public class LibvirtComputingResourceTest { boolean haEnabled = false; boolean limitCpuUse = false; - String vncAddr = "1.2.3.4"; + String vncAddr = ""; String vncPassword = "mySuperSecretPassword"; LibvirtComputingResource lcr = new LibvirtComputingResource(); @@ -79,7 +79,7 @@ public class LibvirtComputingResourceTest { vmStr += "\n"; vmStr += "\n"; vmStr += "\n"; - vmStr += "\n"; + vmStr += "\n"; vmStr += "\n"; vmStr += "\n"; vmStr += "\n"; @@ -129,7 +129,7 @@ public class LibvirtComputingResourceTest { boolean haEnabled = false; boolean limitCpuUse = false; - String vncAddr = "1.2.3.4"; + String vncAddr = ""; String vncPassword = "mySuperSecretPassword"; LibvirtComputingResource lcr = new LibvirtComputingResource(); @@ -154,7 +154,7 @@ public class LibvirtComputingResourceTest { vmStr += "\n"; vmStr += "\n"; vmStr += "\n"; - vmStr += "\n"; + vmStr += "\n"; vmStr += "\n"; vmStr += "\n"; vmStr += "\n"; diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java index 1dd71c5c27f..3a8cf17e24b 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java @@ -44,7 +44,7 @@ import com.cloud.storage.secondary.SecondaryStorageDiscoverer; import com.cloud.utils.exception.CloudRuntimeException; import org.springframework.stereotype.Component; -@Component + @Local(value=Discoverer.class) public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer implements ResourceStateAdapter, Listener { private static final Logger s_logger = Logger.getLogger(SimulatorSecondaryDiscoverer.class); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java index e11dd53f3c9..1f116455761 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java @@ -684,13 +684,16 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); String srcOVAFileName = secondaryMountPoint + "/" + secStorageDir + "/" + backupName + "." + ImageFormat.OVA.getFileExtension(); - + String snapshotDir = ""; + if (backupName.contains("/")){ + snapshotDir = backupName.split("/")[0]; + } String srcFileName = getOVFFilePath(srcOVAFileName); if(srcFileName == null) { Script command = new Script("tar", 0, s_logger); command.add("--no-same-owner"); command.add("-xf", srcOVAFileName); - command.setWorkDir(secondaryMountPoint + "/" + secStorageDir); + command.setWorkDir(secondaryMountPoint + "/" + secStorageDir + "/" + snapshotDir); s_logger.info("Executing command: " + command.toString()); String result = command.execute(); if(result != null) { @@ -731,7 +734,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { String backupUuid = UUID.randomUUID().toString(); exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, getSnapshotRelativeDirInSecStorage(accountId, volumeId), backupUuid, workerVmName); - return backupUuid; + return backupUuid + "/" + backupUuid; } private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, @@ -739,8 +742,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { String workerVmName) throws Exception { String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); - String exportPath = secondaryMountPoint + "/" + secStorageDir; - + String exportPath = secondaryMountPoint + "/" + secStorageDir + "/" + exportName; + synchronized(exportPath.intern()) { if(!new File(exportPath).exists()) { Script command = new Script(false, "mkdir", _timeout, s_logger); diff --git a/plugins/network-elements/dns-notifier/resources/components-example.xml b/plugins/network-elements/dns-notifier/resources/components-example.xml index 36441bd667b..3a92a258e9f 100755 --- a/plugins/network-elements/dns-notifier/resources/components-example.xml +++ b/plugins/network-elements/dns-notifier/resources/components-example.xml @@ -155,6 +155,8 @@ under the License. + + diff --git a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java index abb36c36963..bebba3cb09d 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java +++ b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java @@ -28,6 +28,7 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.deploy.DeployDestination; @@ -197,4 +198,10 @@ public class ElasticLoadBalancerElement extends AdapterBase implements LoadBalan public IpDeployer getIpDeployer(Network network) { return this; } + + @Override + public List updateHealthChecks(Network network, List lbrules) { + return null; + } + } diff --git a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java index 81039d1f3c7..283b517dce9 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java +++ b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java @@ -94,6 +94,7 @@ import com.cloud.network.dao.NetworkVO; import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; import com.cloud.network.dao.VirtualRouterProviderDao; import com.cloud.network.lb.LoadBalancingRule.LbDestination; +import com.cloud.network.lb.LoadBalancingRule.LbHealthCheckPolicy; import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.network.lb.dao.ElasticLbVmMapDao; import com.cloud.network.router.VirtualRouter; @@ -367,9 +368,10 @@ ElasticLoadBalancerManager, VirtualMachineGuru { for (LoadBalancerVO lb : lbs) { List dstList = _lbMgr.getExistingDestinations(lb.getId()); List policyList = _lbMgr.getStickinessPolicies(lb.getId()); + List hcPolicyList = _lbMgr.getHealthCheckPolicies(lb.getId()); LoadBalancingRule loadBalancing = new LoadBalancingRule( - lb, dstList, policyList); - lbRules.add(loadBalancing); + lb, dstList, policyList, hcPolicyList); + lbRules.add(loadBalancing); } return applyLBRules(elbVm, lbRules, network.getId()); } else if (elbVm.getState() == State.Stopped @@ -940,7 +942,8 @@ ElasticLoadBalancerManager, VirtualMachineGuru { for (LoadBalancerVO lb : lbs) { List dstList = _lbMgr.getExistingDestinations(lb.getId()); List policyList = _lbMgr.getStickinessPolicies(lb.getId()); - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList); + List hcPolicyList = _lbMgr.getHealthCheckPolicies(lb.getId()); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList, hcPolicyList); lbRules.add(loadBalancing); } diff --git a/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java b/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java index 94c098ed4bb..3e75c3f1afe 100644 --- a/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java +++ b/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.api.response.ExternalLoadBalancerResponse; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import org.apache.log4j.Logger; +import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.api.ApiDBUtils; import com.cloud.api.commands.AddExternalLoadBalancerCmd; import com.cloud.api.commands.AddF5LoadBalancerCmd; @@ -496,4 +497,11 @@ public class F5ExternalLoadBalancerElement extends ExternalLoadBalancerDeviceMan } return this; } + + @Override + public List updateHealthChecks(Network network, + List lbrules) { + // TODO Auto-generated method stub + return null; + } } diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java index c1c735aa270..a90440cc2f3 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java @@ -292,7 +292,8 @@ StaticNatServiceProvider { lbCapabilities.put(Capability.SupportedStickinessMethods, stickyMethodList); lbCapabilities.put(Capability.ElasticLb, "true"); - + //Setting HealthCheck Capability to True for Netscaler element + lbCapabilities.put(Capability.HealthCheckPolicy, "true"); capabilities.put(Service.Lb, lbCapabilities); Map staticNatCapabilities = new HashMap(); @@ -814,4 +815,26 @@ StaticNatServiceProvider { } return null; } + + @Override + public List updateHealthChecks(Network network, List lbrules) { + + if (canHandle(network, Service.Lb)) { + try { + return getLBHealthChecks(network, lbrules); + } catch (ResourceUnavailableException e) { + s_logger.error("Error in getting the LB Rules from NetScaler " + e); + } + } else { + s_logger.error("Network cannot handle to LB service "); + } + return null; + } + + @Override + public List getLBHealthChecks(Network network, List rules) + throws ResourceUnavailableException { + return super.getLBHealthChecks(network, rules); + + } } diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java b/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java index abea4649dbe..c09869b996a 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java @@ -11,11 +11,12 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.network.resource; +import java.util.ArrayList; import java.util.Formatter; import java.util.HashMap; import java.util.List; @@ -31,6 +32,7 @@ import com.citrix.netscaler.nitro.resource.base.base_response; import com.citrix.netscaler.nitro.resource.config.autoscale.autoscalepolicy; import com.citrix.netscaler.nitro.resource.config.autoscale.autoscaleprofile; import com.citrix.netscaler.nitro.resource.config.basic.server_service_binding; +import com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding; import com.citrix.netscaler.nitro.resource.config.basic.servicegroup; import com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding; import com.citrix.netscaler.nitro.resource.config.lb.lbmetrictable; @@ -71,6 +73,8 @@ import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupExternalLoadBalancerCommand; import com.cloud.agent.api.routing.CreateLoadBalancerApplianceCommand; import com.cloud.agent.api.routing.DestroyLoadBalancerApplianceCommand; +import com.cloud.agent.api.routing.HealthCheckLBConfigAnswer; +import com.cloud.agent.api.routing.HealthCheckLBConfigCommand; import com.cloud.agent.api.routing.IpAssocAnswer; import com.cloud.agent.api.routing.IpAssocCommand; import com.cloud.agent.api.routing.LoadBalancerConfigCommand; @@ -84,6 +88,7 @@ import com.cloud.agent.api.to.LoadBalancerTO.AutoScaleVmProfileTO; import com.cloud.agent.api.to.LoadBalancerTO.ConditionTO; import com.cloud.agent.api.to.LoadBalancerTO.CounterTO; import com.cloud.agent.api.to.LoadBalancerTO.DestinationTO; +import com.cloud.agent.api.to.LoadBalancerTO.HealthCheckPolicyTO; import com.cloud.agent.api.to.LoadBalancerTO.StickinessPolicyTO; import com.cloud.agent.api.to.StaticNatRuleTO; import org.apache.cloudstack.api.ApiConstants; @@ -396,12 +401,14 @@ public class NetscalerResource implements ServerResource { return execute((DestroyLoadBalancerApplianceCommand) cmd, numRetries); } else if (cmd instanceof SetStaticNatRulesCommand) { return execute((SetStaticNatRulesCommand) cmd, numRetries); - } else { + } else if (cmd instanceof HealthCheckLBConfigCommand) { + return execute((HealthCheckLBConfigCommand) cmd, numRetries); + }else { return Answer.createUnsupportedCommandAnswer(cmd); } } - private Answer execute(ReadyCommand cmd) { + private Answer execute(ReadyCommand cmd) { return new ReadyAnswer(cmd); } @@ -450,6 +457,65 @@ public class NetscalerResource implements ServerResource { return new IpAssocAnswer(cmd, results); } + private Answer execute(HealthCheckLBConfigCommand cmd, int numRetries) { + + List hcLB = new ArrayList(); + try { + + if (_isSdx) { + return Answer.createUnsupportedCommandAnswer(cmd); + } + + LoadBalancerTO[] loadBalancers = cmd.getLoadBalancers(); + + if (loadBalancers == null) { + return new HealthCheckLBConfigAnswer(hcLB); + } + + for (LoadBalancerTO loadBalancer : loadBalancers) { + HealthCheckPolicyTO[] healthCheckPolicies = loadBalancer.getHealthCheckPolicies(); + if ((healthCheckPolicies != null) && (healthCheckPolicies.length > 0) + && (healthCheckPolicies[0] != null)) { + String nsVirtualServerName = generateNSVirtualServerName(loadBalancer.getSrcIp(), + loadBalancer.getSrcPort()); + + com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding[] serviceBindings = com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding + .get(_netscalerService, nsVirtualServerName); + + if (serviceBindings != null) { + for (DestinationTO destination : loadBalancer.getDestinations()) { + String nsServiceName = generateNSServiceName(destination.getDestIp(), + destination.getDestPort()); + for (com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding binding : serviceBindings) { + if (nsServiceName.equalsIgnoreCase(binding.get_servicename())) { + destination.setMonitorState(binding.get_curstate()); + break; + } + } + } + hcLB.add(loadBalancer); + } + } + } + + } catch (ExecutionException e) { + s_logger.error("Failed to execute HealthCheckLBConfigCommand due to ", e); + if (shouldRetry(numRetries)) { + return retry(cmd, numRetries); + } else { + return new HealthCheckLBConfigAnswer(hcLB); + } + } catch (Exception e) { + s_logger.error("Failed to execute HealthCheckLBConfigCommand due to ", e); + if (shouldRetry(numRetries)) { + return retry(cmd, numRetries); + } else { + return new HealthCheckLBConfigAnswer(hcLB); + } + } + return new HealthCheckLBConfigAnswer(hcLB); + } + private synchronized Answer execute(LoadBalancerConfigCommand cmd, int numRetries) { try { if (_isSdx) { @@ -467,12 +533,13 @@ public class NetscalerResource implements ServerResource { String lbProtocol = getNetScalerProtocol(loadBalancer); String lbAlgorithm = loadBalancer.getAlgorithm(); String nsVirtualServerName = generateNSVirtualServerName(srcIp, srcPort); - + String nsMonitorName = generateNSMonitorName(srcIp, srcPort); if(loadBalancer.isAutoScaleVmGroupTO()) { applyAutoScaleConfig(loadBalancer); return new Answer(cmd); } - + boolean hasMonitor = false; + boolean deleteMonitor = false; boolean destinationsToAdd = false; for (DestinationTO destination : loadBalancer.getDestinations()) { if (!destination.isRevoked()) { @@ -489,11 +556,28 @@ public class NetscalerResource implements ServerResource { s_logger.debug("Created load balancing virtual server " + nsVirtualServerName + " on the Netscaler device"); } + // create a new monitor + HealthCheckPolicyTO[] healthCheckPolicies = loadBalancer.getHealthCheckPolicies(); + if ((healthCheckPolicies != null) && (healthCheckPolicies.length > 0) + && (healthCheckPolicies[0] != null)) { + + for (HealthCheckPolicyTO healthCheckPolicyTO : healthCheckPolicies) { + if ( !healthCheckPolicyTO.isRevoked() ) { + addLBMonitor(nsMonitorName, lbProtocol, healthCheckPolicyTO); + hasMonitor = true; + } + else { + deleteMonitor = true; + hasMonitor = false; + } + } + + } + for (DestinationTO destination : loadBalancer.getDestinations()) { String nsServerName = generateNSServerName(destination.getDestIp()); String nsServiceName = generateNSServiceName(destination.getDestIp(), destination.getDestPort()); - if (!destination.isRevoked()) { // add a new destination to deployed load balancing rule @@ -534,6 +618,26 @@ public class NetscalerResource implements ServerResource { throw new ExecutionException("Failed to bind service: " + nsServiceName + " to the lb virtual server: " + nsVirtualServerName + " on Netscaler device"); } } + + // After binding the service to the LB Vserver + // successfully, bind the created monitor to the + // service. + if (hasMonitor) { + if (!isServiceBoundToMonitor(nsServiceName, nsMonitorName)) { + bindServiceToMonitor(nsServiceName, nsMonitorName); + } + } else { + // check if any monitor created by CS is already + // existing, if yes, unbind it from services and + // delete it. + if (nsMonitorExist(nsMonitorName)) { + // unbind the service from the monitor and + // delete the monitor + unBindServiceToMonitor(nsServiceName, nsMonitorName); + deleteMonitor = true; + } + + } if (s_logger.isDebugEnabled()) { s_logger.debug("Successfully added LB destination: " + destination.getDestIp() + ":" + destination.getDestPort() + " to load balancer " + srcIp + ":" + srcPort); } @@ -609,8 +713,13 @@ public class NetscalerResource implements ServerResource { } } removeLBVirtualServer(nsVirtualServerName); + deleteMonitor = true; } } + if(deleteMonitor) { + removeLBMonitor(nsMonitorName); + } + } if (s_logger.isInfoEnabled()) { @@ -1223,23 +1332,64 @@ public class NetscalerResource implements ServerResource { } } + private lbmonitor getMonitorIfExisits(String lbMonitorName) throws ExecutionException { + try { + return lbmonitor.get(_netscalerService, lbMonitorName); + } catch (nitro_exception e) { + if (e.getErrorCode() == NitroError.NS_RESOURCE_NOT_EXISTS) { + return null; + } else { + throw new ExecutionException(e.getMessage()); + } + } catch (Exception e) { + throw new ExecutionException(e.getMessage()); + } + } + private boolean isServiceBoundToVirtualServer(String serviceName) throws ExecutionException { try { lbvserver[] lbservers = lbvserver.get(_netscalerService); for (lbvserver vserver : lbservers) { filtervalue[] filter = new filtervalue[1]; filter[0] = new filtervalue("servicename", serviceName); - lbvserver_service_binding[] result = lbvserver_service_binding.get_filtered(_netscalerService, vserver.get_name(), filter); + lbvserver_service_binding[] result = lbvserver_service_binding.get_filtered(_netscalerService, + vserver.get_name(), filter); if (result != null && result.length > 0) { return true; } } return false; } catch (Exception e) { - throw new ExecutionException("Failed to verify service " + serviceName + " is bound to any virtual server due to " + e.getMessage()); + throw new ExecutionException("Failed to verify service " + serviceName + + " is bound to any virtual server due to " + e.getMessage()); } } + private boolean isServiceBoundToMonitor(String nsServiceName, String nsMonitorName) throws ExecutionException { + + filtervalue[] filter = new filtervalue[1]; + filter[0] = new filtervalue("monitor_name", nsMonitorName); + service_lbmonitor_binding[] result; + try { + result = service_lbmonitor_binding.get_filtered(_netscalerService, nsServiceName, filter); + if (result != null && result.length > 0) { + return true; + } + + } catch (Exception e) { + throw new ExecutionException("Failed to verify service " + nsServiceName + + " is bound to any monitor due to " + e.getMessage()); + } + return false; + } + + private boolean nsMonitorExist(String nsMonitorname) throws ExecutionException { + if (getMonitorIfExisits(nsMonitorname) != null) + return true; + else + return false; + } + private boolean nsServiceExists(String serviceName) throws ExecutionException { try { if (com.citrix.netscaler.nitro.resource.config.basic.service.get(_netscalerService, serviceName) != null) { @@ -1480,29 +1630,137 @@ public class NetscalerResource implements ServerResource { } } + // Monitor related methods + private void addLBMonitor(String nsMonitorName, String lbProtocol, HealthCheckPolicyTO hcp) + throws ExecutionException { + try { + // check if the monitor exists + boolean csMonitorExisis = false; + lbmonitor csMonitor = getMonitorIfExisits(nsMonitorName); + if (csMonitor != null) { + if (!csMonitor.get_type().equalsIgnoreCase(lbProtocol)) { + throw new ExecutionException("Can not update monitor :" + nsMonitorName + " as current protocol:" + + csMonitor.get_type() + " of monitor is different from the " + " intended protocol:" + + lbProtocol); + } + csMonitorExisis = true; + } + if (!csMonitorExisis) { + lbmonitor csMon = new lbmonitor(); + csMon.set_monitorname(nsMonitorName); + csMon.set_type(lbProtocol); + if (lbProtocol.equalsIgnoreCase("HTTP")) { + csMon.set_httprequest(hcp.getpingPath()); + s_logger.trace("LB Protocol is HTTP, Applying ping path on HealthCheck Policy"); + } else { + s_logger.debug("LB Protocol is not HTTP, Skipping to apply ping path on HealthCheck Policy"); + } + + csMon.set_interval(hcp.getHealthcheckInterval()); + csMon.set_resptimeout(hcp.getResponseTime()); + csMon.set_failureretries(hcp.getUnhealthThresshold()); + csMon.set_successretries(hcp.getHealthcheckThresshold()); + s_logger.debug("Monitor properites going to get created :interval :: " + csMon.get_interval() + "respTimeOUt:: " + csMon.get_resptimeout() + +"failure retires(unhealththresshold) :: " + csMon.get_failureretries() + "successtries(healththresshold) ::" + csMon.get_successretries()); + lbmonitor.add(_netscalerService, csMon); + } else { + s_logger.debug("Monitor :" + nsMonitorName + " is already existing. Skipping to delete and create it"); + } + } catch (nitro_exception e) { + throw new ExecutionException("Failed to create new monitor :" + nsMonitorName + " due to " + e.getMessage()); + } catch (Exception e) { + throw new ExecutionException("Failed to create new monitor :" + nsMonitorName + " due to " + e.getMessage()); + } + } + + private void bindServiceToMonitor(String nsServiceName, String nsMonitorName) throws ExecutionException { + + try { + com.citrix.netscaler.nitro.resource.config.basic.service serviceObject = new com.citrix.netscaler.nitro.resource.config.basic.service(); + serviceObject = com.citrix.netscaler.nitro.resource.config.basic.service.get(_netscalerService, + nsServiceName); + if (serviceObject != null) { + com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding serviceMonitor = new com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding(); + serviceMonitor.set_monitor_name(nsMonitorName); + serviceMonitor.set_name(nsServiceName); + serviceMonitor.set_monstate("ENABLED"); + s_logger.debug("Trying to bind the monitor :" + nsMonitorName + " to the service :" + nsServiceName); + com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding.add(_netscalerService, + serviceMonitor); + s_logger.debug("Successfully binded the monitor :" + nsMonitorName + " to the service :" + + nsServiceName); + } + } catch (nitro_exception e) { + throw new ExecutionException("Failed to create new monitor :" + nsMonitorName + " due to " + e.getMessage()); + } catch (Exception e) { + throw new ExecutionException("Failed to create new monitor :" + nsMonitorName + " due to " + e.getMessage()); + } + } + + private void unBindServiceToMonitor(String nsServiceName, String nsMonitorName) throws ExecutionException { + + try { + com.citrix.netscaler.nitro.resource.config.basic.service serviceObject = new com.citrix.netscaler.nitro.resource.config.basic.service(); + serviceObject = com.citrix.netscaler.nitro.resource.config.basic.service.get(_netscalerService, + nsServiceName); + + if (serviceObject != null) { + com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding serviceMonitor = new com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding(); + serviceMonitor.set_monitor_name(nsMonitorName); + serviceMonitor.set_name(nsServiceName); + s_logger.debug("Trying to unbind the monitor :" + nsMonitorName + " from the service :" + + nsServiceName); + service_lbmonitor_binding.delete(_netscalerService, serviceMonitor); + s_logger.debug("Successfully unbinded the monitor :" + nsMonitorName + " from the service :" + + nsServiceName); + } + + } catch (nitro_exception e) { + if (e.getErrorCode() == NitroError.NS_RESOURCE_NOT_EXISTS) { + return; + } else { + throw new ExecutionException("Failed to unbind monitor :" + nsMonitorName + "from the service :" + + nsServiceName + "due to " + e.getMessage()); + } + } catch (Exception e) { + throw new ExecutionException("Failed to unbind monitor :" + nsMonitorName + "from the service :" + + nsServiceName + "due to " + e.getMessage()); + } + + } + + private void removeLBMonitor(String nsMonitorName) throws ExecutionException { + + try { + if (nsMonitorExist(nsMonitorName)) { + lbmonitor monitorObj = lbmonitor.get(_netscalerService, nsMonitorName); + monitorObj.set_respcode(null); + lbmonitor.delete(_netscalerService, monitorObj); + s_logger.info("Successfully deleted monitor : " + nsMonitorName); + } + } catch (nitro_exception e) { + if (e.getErrorCode() == NitroError.NS_RESOURCE_NOT_EXISTS) { + return; + } else { + throw new ExecutionException("Failed to delete monitor :" + nsMonitorName + " due to " + e.getMessage()); + } + } catch (Exception e) { + throw new ExecutionException("Failed to delete monitor :" + nsMonitorName + " due to " + e.getMessage()); + } + + } + public synchronized void applyAutoScaleConfig(LoadBalancerTO loadBalancer) throws Exception, ExecutionException { AutoScaleVmGroupTO vmGroupTO = loadBalancer.getAutoScaleVmGroupTO(); if(!isAutoScaleSupportedInNetScaler()) { throw new ExecutionException("AutoScale not supported in this version of NetScaler"); } - if(vmGroupTO.getState().equals("new")) { - assert !loadBalancer.isRevoked(); - createAutoScaleConfig(loadBalancer); - } - else if(loadBalancer.isRevoked() || vmGroupTO.getState().equals("revoke")) { + if(loadBalancer.isRevoked() || vmGroupTO.getState().equals("revoke")) { removeAutoScaleConfig(loadBalancer); } - else if(vmGroupTO.getState().equals("enabled")) { - assert !loadBalancer.isRevoked(); - enableAutoScaleConfig(loadBalancer, false); - } - else if(vmGroupTO.getState().equals("disabled")) { - assert !loadBalancer.isRevoked(); - disableAutoScaleConfig(loadBalancer, false); - } else { - ///// This should never happen - throw new ExecutionException("Unknown AutoScale Vm Group State :" + vmGroupTO.getState()); + else { + createAutoScaleConfig(loadBalancer); } // AutoScale APIs are successful executed, now save the configuration. saveConfiguration(); @@ -1557,7 +1815,14 @@ public class NetscalerResource implements ServerResource { } // Create the autoscale config - enableAutoScaleConfig(loadBalancerTO, false); + if(!loadBalancerTO.getAutoScaleVmGroupTO().getState().equals("disabled")) { + // on restart of network, there might be vmgrps in disabled state, no need to create autoscale config for them + enableAutoScaleConfig(loadBalancerTO, false); + } + else if(loadBalancerTO.getAutoScaleVmGroupTO().getState().equals("disabled")) { + disableAutoScaleConfig(loadBalancerTO, false); + } + return true; } @@ -2229,6 +2494,11 @@ public class NetscalerResource implements ServerResource { return genObjectName("Cloud-VirtualServer", srcIp, srcPort); } + private String generateNSMonitorName(String srcIp, long srcPort) { + // maximum length supported by NS is 31 + return genObjectName("Cloud-Hc", srcIp, srcPort); + } + private String generateNSServerName(String serverIP) { return genObjectName("Cloud-Server-", serverIP); } diff --git a/plugins/pom.xml b/plugins/pom.xml index 88f617b4560..39d99073f09 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -57,6 +57,8 @@ network-elements/dns-notifier storage/image/s3 storage/volume/solidfire + storage/volume/default + alert-handlers/snmp-alerts diff --git a/plugins/storage/volume/default/pom.xml b/plugins/storage/volume/default/pom.xml new file mode 100644 index 00000000000..1eb2e12a816 --- /dev/null +++ b/plugins/storage/volume/default/pom.xml @@ -0,0 +1,56 @@ + + + 4.0.0 + cloud-plugin-storage-volume-default + Apache CloudStack Plugin - Storage Volume default provider + + org.apache.cloudstack + cloudstack-plugins + 4.2.0-SNAPSHOT + ../../../pom.xml + + + + org.apache.cloudstack + cloud-engine-storage-volume + ${project.version} + + + mysql + mysql-connector-java + ${cs.mysql.version} + provided + + + + install + src + test + + + maven-surefire-plugin + + true + + + + integration-test + + test + + + + + + + diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java similarity index 98% rename from engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java rename to plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java index 440cb8c5ea0..04869020468 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java @@ -71,9 +71,9 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; import com.cloud.vm.dao.VMInstanceDao; -public class AncientPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { +public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { private static final Logger s_logger = Logger - .getLogger(AncientPrimaryDataStoreDriverImpl.class); + .getLogger(CloudStackPrimaryDataStoreDriverImpl.class); @Inject DiskOfferingDao diskOfferingDao; @Inject VMTemplateDao templateDao; @Inject VolumeDao volumeDao; diff --git a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java new file mode 100644 index 00000000000..b8b08598f69 --- /dev/null +++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -0,0 +1,542 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.lifecycle; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CreateStoragePoolCommand; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.alert.AlertManager; +import com.cloud.capacity.Capacity; +import com.cloud.capacity.CapacityVO; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.exception.DiscoveryException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.resource.ResourceManager; +import com.cloud.server.ManagementServer; +import com.cloud.storage.OCFS2Manager; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolAutomation; +import com.cloud.storage.StoragePoolDiscoverer; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.StoragePoolStatus; +import com.cloud.storage.StoragePoolWorkVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.StoragePoolWorkDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.User; +import com.cloud.user.UserContext; +import com.cloud.user.dao.UserDao; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.UriUtils; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExecutionException; +import com.cloud.vm.ConsoleProxyVO; +import com.cloud.vm.DomainRouterVO; +import com.cloud.vm.SecondaryStorageVmVO; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.dao.ConsoleProxyDao; +import com.cloud.vm.dao.DomainRouterDao; +import com.cloud.vm.dao.SecondaryStorageVmDao; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +public class CloudStackPrimaryDataStoreLifeCycleImpl implements + PrimaryDataStoreLifeCycle { + private static final Logger s_logger = Logger + .getLogger(CloudStackPrimaryDataStoreLifeCycleImpl.class); + @Inject + protected ResourceManager _resourceMgr; + protected List _discoverers; + @Inject + PrimaryDataStoreDao primaryDataStoreDao; + @Inject + protected OCFS2Manager _ocfs2Mgr; + @Inject + DataStoreManager dataStoreMgr; + @Inject + AgentManager agentMgr; + @Inject + StorageManager storageMgr; + + + @Inject + VolumeDao volumeDao; + @Inject + VMInstanceDao vmDao; + @Inject + ManagementServer server; + @Inject + protected VirtualMachineManager vmMgr; + @Inject + protected SecondaryStorageVmDao _secStrgDao; + @Inject + UserVmDao userVmDao; + @Inject + protected UserDao _userDao; + @Inject + protected DomainRouterDao _domrDao; + @Inject + protected StoragePoolHostDao _storagePoolHostDao; + @Inject + protected AlertManager _alertMgr; + @Inject + protected ConsoleProxyDao _consoleProxyDao; + + @Inject + protected StoragePoolWorkDao _storagePoolWorkDao; + @Inject + PrimaryDataStoreHelper dataStoreHelper; + @Inject + StoragePoolAutomation storagePoolAutmation; + + @Override + public DataStore initialize(Map dsInfos) { + Long clusterId = (Long) dsInfos.get("clusterId"); + Long podId = (Long) dsInfos.get("podId"); + Long zoneId = (Long) dsInfos.get("zoneId"); + String url = (String) dsInfos.get("url"); + String providerName = (String)dsInfos.get("providerName"); + if (clusterId != null && podId == null) { + throw new InvalidParameterValueException( + "Cluster id requires pod id"); + } + + PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); + + URI uri = null; + try { + uri = new URI(UriUtils.encodeURIComponent(url)); + if (uri.getScheme() == null) { + throw new InvalidParameterValueException("scheme is null " + + url + ", add nfs:// as a prefix"); + } else if (uri.getScheme().equalsIgnoreCase("nfs")) { + String uriHost = uri.getHost(); + String uriPath = uri.getPath(); + if (uriHost == null || uriPath == null + || uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) { + throw new InvalidParameterValueException( + "host or path is null, should be nfs://hostname/path"); + } + } else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) { + String uriPath = uri.getPath(); + if (uriPath == null) { + throw new InvalidParameterValueException( + "host or path is null, should be sharedmountpoint://localhost/path"); + } + } else if (uri.getScheme().equalsIgnoreCase("rbd")) { + String uriPath = uri.getPath(); + if (uriPath == null) { + throw new InvalidParameterValueException( + "host or path is null, should be rbd://hostname/pool"); + } + } + } catch (URISyntaxException e) { + throw new InvalidParameterValueException(url + + " is not a valid uri"); + } + + String tags = (String) dsInfos.get("tags"); + Map details = (Map) dsInfos + .get("details"); + + parameters.setTags(tags); + parameters.setDetails(details); + + String scheme = uri.getScheme(); + String storageHost = uri.getHost(); + String hostPath = uri.getPath(); + Object localStorage = dsInfos.get("localStorage"); + if (localStorage != null) { + hostPath = hostPath.replaceFirst("/", ""); + } + String userInfo = uri.getUserInfo(); + int port = uri.getPort(); + StoragePoolVO pool = null; + if (s_logger.isDebugEnabled()) { + s_logger.debug("createPool Params @ scheme - " + scheme + + " storageHost - " + storageHost + " hostPath - " + + hostPath + " port - " + port); + } + if (scheme.equalsIgnoreCase("nfs")) { + if (port == -1) { + port = 2049; + } + parameters.setType(StoragePoolType.NetworkFilesystem); + parameters.setHost(storageHost); + parameters.setPort(port); + parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("file")) { + if (port == -1) { + port = 0; + } + parameters.setType(StoragePoolType.Filesystem); + parameters.setHost("localhost"); + parameters.setPort(0); + parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("sharedMountPoint")) { + parameters.setType(StoragePoolType.SharedMountPoint); + parameters.setHost(storageHost); + parameters.setPort(0); + parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("clvm")) { + parameters.setType(StoragePoolType.CLVM); + parameters.setHost(storageHost); + parameters.setPort(0); + parameters.setPath(hostPath.replaceFirst("/", "")); + } else if (scheme.equalsIgnoreCase("rbd")) { + if (port == -1) { + port = 6789; + } + parameters.setType(StoragePoolType.RBD); + parameters.setHost(storageHost); + parameters.setPort(port); + parameters.setPath(hostPath.replaceFirst("/", "")); + parameters.setUserInfo(userInfo); + } else if (scheme.equalsIgnoreCase("PreSetup")) { + parameters.setType(StoragePoolType.PreSetup); + parameters.setHost(storageHost); + parameters.setPort(0); + parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("iscsi")) { + String[] tokens = hostPath.split("/"); + int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1); + if (port == -1) { + port = 3260; + } + if (lun != -1) { + if (clusterId == null) { + throw new IllegalArgumentException( + "IscsiLUN need to have clusters specified"); + } + hostPath.replaceFirst("/", ""); + parameters.setType(StoragePoolType.IscsiLUN); + parameters.setHost(storageHost); + parameters.setPort(port); + parameters.setPath(hostPath); + } else { + for (StoragePoolDiscoverer discoverer : _discoverers) { + Map> pools; + try { + pools = discoverer.find(zoneId, podId, uri, details); + } catch (DiscoveryException e) { + throw new IllegalArgumentException( + "Not enough information for discovery " + uri, + e); + } + if (pools != null) { + Map.Entry> entry = pools + .entrySet().iterator().next(); + pool = entry.getKey(); + details = entry.getValue(); + break; + } + } + } + } else if (scheme.equalsIgnoreCase("iso")) { + if (port == -1) { + port = 2049; + } + parameters.setType(StoragePoolType.ISO); + parameters.setHost(storageHost); + parameters.setPort(port); + parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("vmfs")) { + parameters.setType(StoragePoolType.VMFS); + parameters.setHost("VMFS datastore: " + hostPath); + parameters.setPort(0); + parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("ocfs2")) { + port = 7777; + parameters.setType(StoragePoolType.OCFS2); + parameters.setHost("clustered"); + parameters.setPort(port); + parameters.setPath(hostPath); + } else { + StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme); + + if (type != null) { + parameters.setType(type); + parameters.setHost(storageHost); + parameters.setPort(0); + parameters.setPath(hostPath); + } else { + s_logger.warn("Unable to figure out the scheme for URI: " + uri); + throw new IllegalArgumentException( + "Unable to figure out the scheme for URI: " + uri); + } + } + + if (localStorage == null) { + List pools = primaryDataStoreDao + .listPoolByHostPath(storageHost, hostPath); + if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) { + Long oldPodId = pools.get(0).getPodId(); + throw new CloudRuntimeException("Storage pool " + uri + + " already in use by another pod (id=" + oldPodId + ")"); + } + } + + Object existingUuid = dsInfos.get("uuid"); + String uuid = null; + + if (existingUuid != null) { + uuid = (String)existingUuid; + } else if (scheme.equalsIgnoreCase("sharedmountpoint") + || scheme.equalsIgnoreCase("clvm")) { + uuid = UUID.randomUUID().toString(); + } else if (scheme.equalsIgnoreCase("PreSetup")) { + uuid = hostPath.replace("/", ""); + } else { + uuid = UUID.nameUUIDFromBytes( + new String(storageHost + hostPath).getBytes()).toString(); + } + + List spHandles = primaryDataStoreDao + .findIfDuplicatePoolsExistByUUID(uuid); + if ((spHandles != null) && (spHandles.size() > 0)) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Another active pool with the same uuid already exists"); + } + throw new CloudRuntimeException( + "Another active pool with the same uuid already exists"); + } + + String poolName = (String) dsInfos.get("name"); + + parameters.setUuid(uuid); + parameters.setZoneId(zoneId); + parameters.setPodId(podId); + parameters.setName(poolName); + parameters.setClusterId(clusterId); + parameters.setProviderName(providerName); + + return dataStoreHelper.createPrimaryDataStore(parameters); + } + + protected boolean createStoragePool(long hostId, StoragePool pool) { + s_logger.debug("creating pool " + pool.getName() + " on host " + + hostId); + if (pool.getPoolType() != StoragePoolType.NetworkFilesystem + && pool.getPoolType() != StoragePoolType.Filesystem + && pool.getPoolType() != StoragePoolType.IscsiLUN + && pool.getPoolType() != StoragePoolType.Iscsi + && pool.getPoolType() != StoragePoolType.VMFS + && pool.getPoolType() != StoragePoolType.SharedMountPoint + && pool.getPoolType() != StoragePoolType.PreSetup + && pool.getPoolType() != StoragePoolType.OCFS2 + && pool.getPoolType() != StoragePoolType.RBD + && pool.getPoolType() != StoragePoolType.CLVM) { + s_logger.warn(" Doesn't support storage pool type " + + pool.getPoolType()); + return false; + } + CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); + final Answer answer = agentMgr.easySend(hostId, cmd); + if (answer != null && answer.getResult()) { + return true; + } else { + primaryDataStoreDao.expunge(pool.getId()); + String msg = ""; + if (answer != null) { + msg = "Can not create storage pool through host " + hostId + + " due to " + answer.getDetails(); + s_logger.warn(msg); + } else { + msg = "Can not create storage pool through host " + hostId + + " due to CreateStoragePoolCommand returns null"; + s_logger.warn(msg); + } + throw new CloudRuntimeException(msg); + } + } + + @Override + public boolean attachCluster(DataStore store, ClusterScope scope) { + PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store; + // Check if there is host up in this cluster + List allHosts = _resourceMgr.listAllUpAndEnabledHosts( + Host.Type.Routing, primarystore.getClusterId(), + primarystore.getPodId(), primarystore.getDataCenterId()); + if (allHosts.isEmpty()) { + throw new CloudRuntimeException( + "No host up to associate a storage pool with in cluster " + + primarystore.getClusterId()); + } + + if (primarystore.getPoolType() == StoragePoolType.OCFS2 + && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) { + s_logger.warn("Can not create storage pool " + primarystore + + " on cluster " + primarystore.getClusterId()); + primaryDataStoreDao.expunge(primarystore.getId()); + return false; + } + + boolean success = false; + for (HostVO h : allHosts) { + success = createStoragePool(h.getId(), primarystore); + if (success) { + break; + } + } + + s_logger.debug("In createPool Adding the pool to each of the hosts"); + List poolHosts = new ArrayList(); + for (HostVO h : allHosts) { + try { + this.storageMgr.connectHostToSharedPool(h.getId(), + primarystore.getId()); + poolHosts.add(h); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + h + + " and " + primarystore, e); + } + } + + if (poolHosts.isEmpty()) { + s_logger.warn("No host can access storage pool " + primarystore + + " on cluster " + primarystore.getClusterId()); + primaryDataStoreDao.expunge(primarystore.getId()); + return false; + } + + this.dataStoreHelper.attachCluster(store); + return true; + } + + @Override + public boolean attachZone(DataStore dataStore, ZoneScope scope) { + List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); + for (HostVO host : hosts) { + try { + this.storageMgr.connectHostToSharedPool(host.getId(), + dataStore.getId()); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + host + + " and " + dataStore, e); + } + } + this.dataStoreHelper.attachZone(dataStore); + return true; + } + + @Override + public boolean dettach() { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean unmanaged() { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean maintain(DataStore dataStore) { + storagePoolAutmation.maintain(dataStore); + this.dataStoreHelper.maintain(dataStore); + return true; + } + + @Override + public boolean cancelMaintain(DataStore store) { + this.dataStoreHelper.cancelMaintain(store); + storagePoolAutmation.cancelMaintain(store); + return true; + } + + @DB + @Override + public boolean deleteDataStore(DataStore store) { + List hostPoolRecords = this._storagePoolHostDao + .listByPoolId(store.getId()); + StoragePool pool = (StoragePool)store; + boolean deleteFlag = false; + // Remove the SR associated with the Xenserver + for (StoragePoolHostVO host : hostPoolRecords) { + DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand( + pool); + final Answer answer = agentMgr.easySend(host.getHostId(), + deleteCmd); + + if (answer != null && answer.getResult()) { + deleteFlag = true; + break; + } else { + if (answer != null) { + s_logger.debug("Failed to delete storage pool: " + answer.getResult()); + } + } + } + + if (!deleteFlag) { + throw new CloudRuntimeException("Failed to delete storage pool on host"); + } + + this.dataStoreHelper.deletePrimaryDataStore(store); + return false; + } + + @Override + public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { + this.dataStoreHelper.attachHost(store, scope, existingInfo); + return true; + } +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/provider/CloudStackPrimaryDataStoreProviderImpl.java similarity index 58% rename from engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java rename to plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/provider/CloudStackPrimaryDataStoreProviderImpl.java index 09e78e45659..4d46d99fab3 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java +++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/provider/CloudStackPrimaryDataStoreProviderImpl.java @@ -18,61 +18,63 @@ */ package org.apache.cloudstack.storage.datastore.provider; +import java.util.HashSet; import java.util.Map; - -import javax.inject.Inject; +import java.util.Set; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; -import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; -import org.apache.cloudstack.storage.datastore.driver.AncientPrimaryDataStoreDriverImpl; -import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCycleImpl; -import org.springframework.stereotype.Component; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.storage.datastore.driver.CloudStackPrimaryDataStoreDriverImpl; +import org.apache.cloudstack.storage.datastore.lifecycle.CloudStackPrimaryDataStoreLifeCycleImpl; import com.cloud.utils.component.ComponentContext; -@Component -public class AncientPrimaryDataStoreProviderImpl implements +public class CloudStackPrimaryDataStoreProviderImpl implements PrimaryDataStoreProvider { private final String providerName = "ancient primary data store provider"; protected PrimaryDataStoreDriver driver; - @Inject - PrimaryDataStoreProviderManager storeMgr; + protected HypervisorHostListener listener; protected DataStoreLifeCycle lifecyle; - protected String uuid; - protected long id; + + CloudStackPrimaryDataStoreProviderImpl() { + + } + @Override public String getName() { return providerName; } @Override - public DataStoreLifeCycle getLifeCycle() { + public DataStoreLifeCycle getDataStoreLifeCycle() { return this.lifecyle; } @Override public boolean configure(Map params) { - lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCycleImpl.class); - driver = ComponentContext.inject(AncientPrimaryDataStoreDriverImpl.class); - uuid = (String)params.get("uuid"); - id = (Long)params.get("id"); - storeMgr.registerDriver(uuid, this.driver); - HypervisorHostListener listener = ComponentContext.inject(DefaultHostListener.class); - storeMgr.registerHostListener(uuid, listener); + lifecyle = ComponentContext.inject(CloudStackPrimaryDataStoreLifeCycleImpl.class); + driver = ComponentContext.inject(CloudStackPrimaryDataStoreDriverImpl.class); + listener = ComponentContext.inject(DefaultHostListener.class); return true; } @Override - public String getUuid() { - return this.uuid; + public PrimaryDataStoreDriver getDataStoreDriver() { + return this.driver; } @Override - public long getId() { - return this.id; + public HypervisorHostListener getHostListener() { + return this.listener; + } + + @Override + public Set getTypes() { + Set types = new HashSet(); + types.add(DataStoreProviderType.PRIMARY); + return types; } - } diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java index 91c446fe5ae..dc29fb835d2 100644 --- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java +++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java @@ -26,10 +26,10 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProvider; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; diff --git a/pom.xml b/pom.xml index e75c420a616..dab3db2c7c1 100644 --- a/pom.xml +++ b/pom.xml @@ -26,8 +26,8 @@ Apache CloudStack is an IaaS (“Infrastracture as a Service”) cloud orchestration platform. http://www.cloudstack.org - scm:git:https://git-wip-us.apache.org/repos/asf/incubator-cloudstack.git - scm:git:https://git-wip-us.apache.org/repos/asf/incubator-cloudstack.git + scm:git:https://git-wip-us.apache.org/repos/asf/cloudstack.git + scm:git:https://git-wip-us.apache.org/repos/asf/cloudstack.git jira @@ -99,40 +99,49 @@ Apache CloudStack User List - cloudstack-users-subscribe@incubator.apache.org - cloudstack-users-unsubscribe@incubator.apache.org - cloudstack-users@incubator.apache.org - http://mail-archives.apache.org/mod_mbox/incubator-cloudstack-users + users-subscribe@cloudstack.apache.org + users-unsubscribe@cloudstack.apache.org + users@cloudstack.apache.org + http://mail-archives.apache.org/mod_mbox/cloudstack-users + + http://mail-archives.apache.org/mod_mbox/incubator-cloudstack-users + Apache CloudStack Developer List - cloudstack-dev-subscribe@incubator.apache.org - cloudstack-dev-unsubscribe@incubator.apache.org - cloudstack-dev@incubator.apache.org - http://mail-archives.apache.org/mod_mbox/incubator-cloudstack-dev + dev-subscribe@cloudstack.apache.org + dev-unsubscribe@cloudstack.apache.org + dev@cloudstack.apache.org + http://mail-archives.apache.org/mod_mbox/cloudstack-dev + + http://mail-archives.apache.org/mod_mbox/incubator-cloudstack-dev + Apache CloudStack Commits List - cloudstack-commits-subscribe@incubator.apache.org - cloudstack-commits-unsubscribe@incubator.apache.org - cloudstack-commits@incubator.apache.org - http://mail-archives.apache.org/mod_mbox/incubator-cloudstack-commits + commits-subscribe@cloudstack.apache.org + commits-unsubscribe@cloudstack.apache.org + commits@cloudstack.apache.org + http://mail-archives.apache.org/mod_mbox/cloudstack-commits + + http://mail-archives.apache.org/mod_mbox/incubator-cloudstack-commits + The Apache CloudStack Team - cloudstack-dev@incubator.apache.org - http://incubator.apache.org/projects/cloudstack.html + dev@cloudstack.apache.org + http://cloudstack.apache.org/ Apache Software Foundation http://apache.org/ - Jenkin - http://jenkins.cloudstack.org/ + Jenkins + http://builds.apache.org/ @@ -182,7 +191,6 @@ ${cs.junit.version} test - org.springframework spring-core @@ -222,14 +230,12 @@ 1.9.5 test - org.springframework spring-test ${org.springframework.version} test - org.aspectj aspectjrt @@ -276,7 +282,7 @@ - + @@ -334,16 +340,9 @@ dist/console-proxy/js/jquery.js scripts/vm/systemvm/id_rsa.cloud tools/devcloud/basebuild/puppet-devcloudinitial/files/network.conf - tools/appliance/definitions/systemvmtemplate/base.sh - tools/appliance/definitions/systemvmtemplate/cleanup.sh - tools/appliance/definitions/systemvmtemplate/definition.rb - tools/appliance/definitions/systemvmtemplate/preseed.cfg - tools/appliance/definitions/systemvmtemplate/zerodisk.sh - tools/appliance/definitions/systemvmtemplate64/base.sh - tools/appliance/definitions/systemvmtemplate64/cleanup.sh - tools/appliance/definitions/systemvmtemplate64/definition.rb - tools/appliance/definitions/systemvmtemplate64/preseed.cfg - tools/appliance/definitions/systemvmtemplate64/zerodisk.sh + tools/appliance/definitions/devcloud/* + tools/appliance/definitions/systemvmtemplate/* + tools/appliance/definitions/systemvmtemplate64/* tools/cli/cloudmonkey.egg-info/* tools/devcloud/src/deps/boxes/basebox-build/definition.rb tools/devcloud/src/deps/boxes/basebox-build/preseed.cfg @@ -509,113 +508,5 @@ vmware-base - - simulator - - - deploydb-simulator - - - - - - org.codehaus.mojo - properties-maven-plugin - 1.0-alpha-2 - - - initialize - - read-project-properties - - - - ${project.basedir}/utils/conf/db.properties - ${project.basedir}/utils/conf/db.properties.override - - true - - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - - mysql - mysql-connector-java - ${cs.mysql.version} - - - commons-dbcp - commons-dbcp - ${cs.dbcp.version} - - - commons-pool - commons-pool - ${cs.pool.version} - - - org.jasypt - jasypt - ${cs.jasypt.version} - - - org.apache.cloudstack - cloud-utils - ${project.version} - - - org.apache.cloudstack - cloud-server - ${project.version} - - - - - process-resources - create-schema - - java - - - - - false - true - - org.apache.cloudstack - cloud-server - - com.cloud.upgrade.DatabaseCreator - - - ${project.basedir}/utils/conf/db.properties - ${project.basedir}/utils/conf/db.properties.override - - ${basedir}/target/db/create-schema-simulator.sql - ${basedir}/target/db/templates.simulator.sql - - com.cloud.upgrade.DatabaseUpgradeChecker - --database=simulator - --rootpassword=${db.root.password} - - - - - catalina.home - ${project.basedir}/utils - - - - - - - diff --git a/python/lib/cloudutils/serviceConfigServer.py b/python/lib/cloudutils/serviceConfigServer.py index a08ce02d766..2c19d7e2a31 100644 --- a/python/lib/cloudutils/serviceConfigServer.py +++ b/python/lib/cloudutils/serviceConfigServer.py @@ -33,11 +33,11 @@ class cloudManagementConfig(serviceCfgBase): raise CloudInternalException("Cannot get hostname, 'hostname --fqdn failed'") if self.syscfg.env.svrMode == "mycloud": - cfo = configFileOps("/usr/share/cloud/management/conf/environment.properties", self) + cfo = configFileOps("/usr/share/cloudstack-management/conf/environment.properties", self) cfo.addEntry("cloud-stack-components-specification", "components-cloudzones.xml") cfo.save() - cfo = configFileOps("/usr/share/cloud/management/conf/db.properties", self) + cfo = configFileOps("/usr/share/cloudstack-management/conf/db.properties", self) dbHost = cfo.getEntry("db.cloud.host") dbPort = cfo.getEntry("db.cloud.port") dbUser = cfo.getEntry("db.cloud.username") diff --git a/python/lib/cloudutils/utilities.py b/python/lib/cloudutils/utilities.py index 739a48385a0..f7f25f4ca00 100755 --- a/python/lib/cloudutils/utilities.py +++ b/python/lib/cloudutils/utilities.py @@ -110,7 +110,7 @@ class Distribution: self.distro = "Fedora" elif os.path.exists("/etc/redhat-release"): version = file("/etc/redhat-release").readline() - if version.find("Red Hat Enterprise Linux Server release 6") != -1 or version.find("Scientific Linux release 6") != -1 or version.find("CentOS Linux release 6") != -1 or version.find("CentOS release 6.2") != -1 or version.find("CentOS release 6.3") != -1: + if version.find("Red Hat Enterprise Linux Server release 6") != -1 or version.find("Scientific Linux release 6") != -1 or version.find("CentOS Linux release 6") != -1 or version.find("CentOS release 6.") != -1: self.distro = "RHEL6" elif version.find("CentOS release") != -1: self.distro = "CentOS" diff --git a/server/src/com/cloud/alert/AlertManagerImpl.java b/server/src/com/cloud/alert/AlertManagerImpl.java index f8a8fd8b1b9..a45482fd4ef 100755 --- a/server/src/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/com/cloud/alert/AlertManagerImpl.java @@ -84,6 +84,7 @@ import com.sun.mail.smtp.SMTPTransport; @Local(value={AlertManager.class}) public class AlertManagerImpl extends ManagerBase implements AlertManager { private static final Logger s_logger = Logger.getLogger(AlertManagerImpl.class.getName()); + private static final Logger s_alertsLogger = Logger.getLogger("org.apache.cloudstack.alerts"); private static final long INITIAL_CAPACITY_CHECK_DELAY = 30L * 1000L; // thirty seconds expressed in milliseconds @@ -256,6 +257,9 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager { try { if (_emailAlert != null) { _emailAlert.sendAlert(alertType, dataCenterId, podId, null, subject, body); + } else { + s_alertsLogger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + + podId + " // clusterId:: " + null + " // message:: " + subject ); } } catch (Exception ex) { s_logger.error("Problem sending email alert", ex); @@ -789,6 +793,8 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager { // TODO: make sure this handles SSL transport (useAuth is true) and regular public void sendAlert(short alertType, long dataCenterId, Long podId, Long clusterId, String subject, String content) throws MessagingException, UnsupportedEncodingException { + s_alertsLogger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + + podId + " // clusterId:: " + null + " // message:: " + subject); AlertVO alert = null; if ((alertType != AlertManager.ALERT_TYPE_HOST) && (alertType != AlertManager.ALERT_TYPE_USERVM) && diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index 2546f292883..663139da41f 100755 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -46,6 +46,8 @@ import org.apache.cloudstack.api.response.ControlledViewEntityResponse; import org.apache.cloudstack.api.response.IPAddressResponse; import org.apache.cloudstack.api.response.InstanceGroupResponse; import org.apache.cloudstack.api.response.IpForwardingRuleResponse; +import org.apache.cloudstack.api.response.LBHealthCheckPolicyResponse; +import org.apache.cloudstack.api.response.LBHealthCheckResponse; import org.apache.cloudstack.api.response.LBStickinessPolicyResponse; import org.apache.cloudstack.api.response.LBStickinessResponse; import org.apache.cloudstack.api.response.LDAPConfigResponse; @@ -143,6 +145,13 @@ import com.cloud.network.dao.NetworkVO; import com.cloud.network.dao.PhysicalNetworkVO; import com.cloud.network.router.VirtualRouter; import com.cloud.network.rules.*; +import com.cloud.network.rules.FirewallRule; +import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.network.rules.HealthCheckPolicy; +import com.cloud.network.rules.LoadBalancer; +import com.cloud.network.rules.PortForwardingRule; +import com.cloud.network.rules.StaticNatRule; +import com.cloud.network.rules.StickinessPolicy; import com.cloud.network.security.SecurityGroup; import com.cloud.network.security.SecurityGroupVO; import com.cloud.network.security.SecurityRule; @@ -1099,6 +1108,7 @@ public class ApiResponseHelper implements ResponseGenerator { VpnUsersResponse vpnResponse = new VpnUsersResponse(); vpnResponse.setId(vpnUser.getUuid()); vpnResponse.setUserName(vpnUser.getUsername()); + vpnResponse.setState(vpnUser.getState().toString()); populateOwner(vpnResponse, vpnUser); @@ -2750,6 +2760,58 @@ public class ApiResponseHelper implements ResponseGenerator { return spResponse; } + @Override + public LBHealthCheckResponse createLBHealthCheckPolicyResponse( + List healthcheckPolicies, LoadBalancer lb) { + LBHealthCheckResponse hcResponse = new LBHealthCheckResponse(); + + if (lb == null) + return hcResponse; + hcResponse.setlbRuleId(lb.getUuid()); + Account account = ApiDBUtils.findAccountById(lb.getAccountId()); + if (account != null) { + hcResponse.setAccountName(account.getAccountName()); + Domain domain = ApiDBUtils.findDomainById(account.getDomainId()); + if (domain != null) { + hcResponse.setDomainId(domain.getUuid()); + hcResponse.setDomainName(domain.getName()); + } + } + + List responses = new ArrayList(); + for (HealthCheckPolicy healthcheckPolicy : healthcheckPolicies) { + LBHealthCheckPolicyResponse ruleResponse = new LBHealthCheckPolicyResponse(healthcheckPolicy); + responses.add(ruleResponse); + } + hcResponse.setRules(responses); + + hcResponse.setObjectName("healthcheckpolicies"); + return hcResponse; + } + + @Override + public LBHealthCheckResponse createLBHealthCheckPolicyResponse(HealthCheckPolicy healthcheckPolicy, LoadBalancer lb) { + LBHealthCheckResponse hcResponse = new LBHealthCheckResponse(); + + hcResponse.setlbRuleId(lb.getUuid()); + Account accountTemp = ApiDBUtils.findAccountById(lb.getAccountId()); + if (accountTemp != null) { + hcResponse.setAccountName(accountTemp.getAccountName()); + Domain domain = ApiDBUtils.findDomainById(accountTemp.getDomainId()); + if (domain != null) { + hcResponse.setDomainId(domain.getUuid()); + hcResponse.setDomainName(domain.getName()); + } + } + + List responses = new ArrayList(); + LBHealthCheckPolicyResponse ruleResponse = new LBHealthCheckPolicyResponse(healthcheckPolicy); + responses.add(ruleResponse); + hcResponse.setRules(responses); + hcResponse.setObjectName("healthcheckpolicies"); + return hcResponse; + } + @Override public LDAPConfigResponse createLDAPConfigResponse(String hostname, Integer port, Boolean useSSL, String queryFilter, diff --git a/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index f561449fe2a..8b6abf8a3e4 100644 --- a/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -185,6 +185,7 @@ public class UserVmJoinDaoImpl extends GenericDaoBase implem nicResponse.setGateway(userVm.getGateway()); nicResponse.setNetmask(userVm.getNetmask()); nicResponse.setNetworkid(userVm.getNetworkUuid()); + nicResponse.setNetworkName(userVm.getNetworkName()); nicResponse.setMacAddress(userVm.getMacAddress()); nicResponse.setIp6Address(userVm.getIp6Address()); nicResponse.setIp6Gateway(userVm.getIp6Gateway()); @@ -246,6 +247,7 @@ public class UserVmJoinDaoImpl extends GenericDaoBase implem nicResponse.setGateway(uvo.getGateway()); nicResponse.setNetmask(uvo.getNetmask()); nicResponse.setNetworkid(uvo.getNetworkUuid()); + nicResponse.setNetworkName(uvo.getNetworkName()); nicResponse.setMacAddress(uvo.getMacAddress()); nicResponse.setIp6Address(uvo.getIp6Address()); nicResponse.setIp6Gateway(uvo.getIp6Gateway()); diff --git a/server/src/com/cloud/api/query/vo/UserVmJoinVO.java b/server/src/com/cloud/api/query/vo/UserVmJoinVO.java index d7238224e4e..33c49cdeae9 100644 --- a/server/src/com/cloud/api/query/vo/UserVmJoinVO.java +++ b/server/src/com/cloud/api/query/vo/UserVmJoinVO.java @@ -293,6 +293,9 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="network_uuid") private String networkUuid; + @Column(name="network_name") + private String networkName; + @Column(name="traffic_type") @Enumerated(value=EnumType.STRING) private TrafficType trafficType; @@ -1168,6 +1171,16 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { } + public String getNetworkName() { + return networkName; + } + + + public void setNetworkName(String networkName) { + this.networkName = networkName; + } + + public TrafficType getTrafficType() { return trafficType; } diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index 0e3b18b0a51..2a21528ac6b 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -208,7 +208,11 @@ public enum Config { AlertPurgeInterval("Advanced", ManagementServer.class, Integer.class, "alert.purge.interval", "86400", "The interval (in seconds) to wait before running the alert purge thread", null), AlertPurgeDelay("Advanced", ManagementServer.class, Integer.class, "alert.purge.delay", "0", "Alerts older than specified number days will be purged. Set this value to 0 to never delete alerts", null), - DirectAttachNetworkEnabled("Advanced", ManagementServer.class, Boolean.class, "direct.attach.network.externalIpAllocator.enabled", "false", "Direct-attach VMs using external DHCP server", "true,false"), + // LB HealthCheck Interval. + LBHealthCheck("Advanced", ManagementServer.class, String.class, "healthcheck.update.interval", "600", + "Time Interval to fetch the LB health check states (in sec)", null), + + DirectAttachNetworkEnabled("Advanced", ManagementServer.class, Boolean.class, "direct.attach.network.externalIpAllocator.enabled", "false", "Direct-attach VMs using external DHCP server", "true,false"), DirectAttachNetworkExternalAPIURL("Advanced", ManagementServer.class, String.class, "direct.attach.network.externalIpAllocator.url", null, "Direct-attach VMs using external DHCP server (API url)", null), CheckPodCIDRs("Advanced", ManagementServer.class, String.class, "check.pod.cidrs", "true", "If true, different pods must belong to different CIDR subnets.", "true,false"), NetworkGcWait("Advanced", ManagementServer.class, Integer.class, "network.gc.wait", "600", "Time (in seconds) to wait before shutting down a network that's not in used", null), diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index b1ad6b7fb9d..8dbf08178d4 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -87,7 +87,6 @@ import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterIpAddressDao; import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDao; -import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDaoImpl; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.PodVlanMapDao; import com.cloud.dc.dao.VlanDao; @@ -3337,20 +3336,24 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati String multicastRateStr = _configDao.getValue("multicast.throttling.rate"); int multicastRate = ((multicastRateStr == null) ? 10 : Integer.parseInt(multicastRateStr)); tags = cleanupTags(tags); - - if (specifyVlan != specifyIpRanges) { - throw new InvalidParameterValueException("SpecifyVlan should be equal to specifyIpRanges which is " + specifyIpRanges); - } // specifyVlan should always be true for Shared network offerings if (!specifyVlan && type == GuestType.Shared) { throw new InvalidParameterValueException("SpecifyVlan should be true if network offering's type is " + type); } - //specifyIpRanges should always be false for Isolated offering with Source nat service enabled - if (specifyVlan && type == GuestType.Isolated && serviceProviderMap.containsKey(Service.SourceNat)) { - throw new InvalidParameterValueException("SpecifyVlan should be false if the network offering type is " - + type + " and service " + Service.SourceNat.getName() + " is supported"); + //specifyIpRanges should always be true for Shared networks + //specifyIpRanges can only be true for Isolated networks with no Source Nat service + if (specifyIpRanges) { + if (type == GuestType.Isolated) { + if (serviceProviderMap.containsKey(Service.SourceNat)) { + throw new InvalidParameterValueException("SpecifyIpRanges can only be true for Shared network offerings and Isolated with no SourceNat service"); + } + } + } else { + if (type == GuestType.Shared) { + throw new InvalidParameterValueException("SpecifyIpRanges should always be true for Shared network offerings"); + } } // isPersistent should always be false for Shared network Offerings @@ -3374,7 +3377,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati + " with availability " + Availability.Required); } } - boolean dedicatedLb = false; boolean elasticLb = false; @@ -3481,6 +3483,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return offering; } + @Override public List searchForNetworkOfferings(ListNetworkOfferingsCmd cmd) { Boolean isAscending = Boolean.parseBoolean(_configDao.getValue("sortkey.algorithm")); diff --git a/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java b/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java index 8b2a1441151..0f454cdb582 100755 --- a/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java +++ b/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java @@ -18,12 +18,16 @@ package com.cloud.deploy; import javax.ejb.Local; +import org.apache.log4j.Logger; + import com.cloud.deploy.DeploymentPlanner.AllocationAlgorithm; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.vm.UserVmVO; @Local(value = {DeployPlannerSelector.class}) public class HypervisorVmPlannerSelector extends AbstractDeployPlannerSelector { + private static final Logger s_logger = Logger.getLogger(HypervisorVmPlannerSelector.class); + @Override public String selectPlanner(UserVmVO vm) { if (vm.getHypervisorType() != HypervisorType.BareMetal) { @@ -38,6 +42,10 @@ public class HypervisorVmPlannerSelector extends AbstractDeployPlannerSelector { || _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString())) { return "UserConcentratedPodPlanner"; } + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("The allocation algorithm is null, cannot select the planner"); + } } } diff --git a/server/src/com/cloud/event/ActionEventInterceptor.java b/server/src/com/cloud/event/ActionEventInterceptor.java index fb89498ffce..a6c2565510e 100644 --- a/server/src/com/cloud/event/ActionEventInterceptor.java +++ b/server/src/com/cloud/event/ActionEventInterceptor.java @@ -19,22 +19,29 @@ package com.cloud.event; import java.lang.reflect.AnnotatedElement; import java.lang.reflect.Method; +import org.apache.log4j.Logger; import org.aspectj.lang.ProceedingJoinPoint; import org.aspectj.lang.reflect.MethodSignature; import com.cloud.user.UserContext; +import com.cloud.utils.component.ComponentMethodProxyCache; public class ActionEventInterceptor { + private static final Logger s_logger = Logger.getLogger(ActionEventInterceptor.class); public ActionEventInterceptor() { } public Object AroundAnyMethod(ProceedingJoinPoint call) throws Throwable { MethodSignature methodSignature = (MethodSignature)call.getSignature(); - Method targetMethod = methodSignature.getMethod(); - if(needToIntercept(targetMethod)) { + + // Note: AOP for ActionEvent is triggered annotation, no need to check the annotation on method again + Method targetMethod = ComponentMethodProxyCache.getTargetMethod( + methodSignature.getMethod(), call.getTarget()); + + if(targetMethod != null) { EventVO event = interceptStart(targetMethod); - + boolean success = true; Object ret = null; try { @@ -49,6 +56,8 @@ public class ActionEventInterceptor { } } return ret; + } else { + s_logger.error("Unable to find the proxied method behind. Method: " + methodSignature.getMethod().getName()); } return call.proceed(); } diff --git a/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java index f6ab7780f86..ae00bf2bb13 100644 --- a/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java @@ -25,6 +25,9 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.response.ExternalFirewallResponse; +import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; @@ -42,7 +45,6 @@ import com.cloud.agent.api.to.FirewallRuleTO; import com.cloud.agent.api.to.IpAddressTO; import com.cloud.agent.api.to.PortForwardingRuleTO; import com.cloud.agent.api.to.StaticNatRuleTO; -import org.apache.cloudstack.api.ApiConstants; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.DataCenter; @@ -60,7 +62,6 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDetailsDao; -import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import com.cloud.network.Networks.TrafficType; import com.cloud.network.dao.ExternalFirewallDeviceDao; import com.cloud.network.dao.ExternalFirewallDeviceVO; @@ -80,10 +81,10 @@ import com.cloud.network.dao.PhysicalNetworkServiceProviderVO; import com.cloud.network.dao.PhysicalNetworkVO; import com.cloud.network.dao.VpnUserDao; import com.cloud.network.rules.FirewallRule; +import com.cloud.network.rules.FirewallRule.Purpose; import com.cloud.network.rules.FirewallRuleVO; import com.cloud.network.rules.PortForwardingRule; import com.cloud.network.rules.StaticNat; -import com.cloud.network.rules.FirewallRule.Purpose; import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.dao.NetworkOfferingDao; @@ -92,7 +93,6 @@ import com.cloud.resource.ResourceState; import com.cloud.resource.ResourceStateAdapter; import com.cloud.resource.ServerResource; import com.cloud.resource.UnableDeleteHostException; -import org.apache.cloudstack.api.response.ExternalFirewallResponse; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.dao.AccountDao; @@ -105,9 +105,8 @@ import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.net.UrlUtil; -import com.cloud.vm.Nic.ReservationStrategy; -import com.cloud.vm.Nic.State; import com.cloud.vm.NicVO; +import com.cloud.vm.Nic.ReservationStrategy; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.NicDao; @@ -116,7 +115,8 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl @Inject HostDao _hostDao; @Inject NetworkServiceMapDao _ntwkSrvcProviderDao; @Inject DataCenterDao _dcDao; - @Inject NetworkModel _networkMgr; + @Inject NetworkModel _networkModel; + @Inject NetworkManager _networkMgr; @Inject InlineLoadBalancerNicMapDao _inlineLoadBalancerNicMapDao; @Inject NicDao _nicDao; @Inject AgentManager _agentMgr; @@ -428,7 +428,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl IPAddressVO sourceNatIp = null; if (!sharedSourceNat) { // Get the source NAT IP address for this account - List sourceNatIps = _networkMgr.listPublicIpsAssignedToAccount(network.getAccountId(), + List sourceNatIps = _networkModel.listPublicIpsAssignedToAccount(network.getAccountId(), zoneId, true); if (sourceNatIps.size() != 1) { @@ -455,7 +455,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl } // Get network rate - Integer networkRate = _networkMgr.getNetworkRate(network.getId(), null); + Integer networkRate = _networkModel.getNetworkRate(network.getId(), null); IpAddressTO ip = new IpAddressTO(account.getAccountId(), sourceNatIpAddress, add, false, !sharedSourceNat, publicVlanTag, null, null, null, networkRate, false); IpAddressTO[] ips = new IpAddressTO[1]; @@ -483,7 +483,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl if (add && (!reservedIpAddressesForGuestNetwork.contains(network.getGateway()))) { // Insert a new NIC for this guest network to reserve the gateway address - savePlaceholderNic(network, network.getGateway()); + _networkMgr.savePlaceholderNic(network, network.getGateway()); } // Delete any mappings used for inline external load balancers in this network @@ -499,14 +499,13 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl // on network shutdown, delete placeHolder nics used for the firewall device if (!add) { - List guestIps = _nicDao.listByNetworkId(network.getId()); - for (NicVO guestIp : guestIps) { - // only external firewall and external load balancer will create NicVO with PlaceHolder reservation strategy - if (guestIp.getReservationStrategy().equals(ReservationStrategy.PlaceHolder) && guestIp.getIp4Address().equals(network.getGateway())) { - _nicDao.remove(guestIp.getId()); + List nics = _nicDao.listByNetworkId(network.getId()); + for (NicVO nic : nics) { + if (nic.getReservationStrategy().equals(ReservationStrategy.PlaceHolder) && nic.getIp4Address().equals(network.getGateway())) { + s_logger.debug("Removing placeholder nic " + nic + " for the network " + network); + _nicDao.remove(nic.getId()); } } - freeFirewallForNetwork(network); } @@ -516,6 +515,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl return true; } + @Override public boolean applyFirewallRules(Network network, List rules) throws ResourceUnavailableException { // Find the external firewall in this zone @@ -541,7 +541,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl if (rule.getSourceCidrList() == null && (rule.getPurpose() == Purpose.Firewall || rule.getPurpose() == Purpose.NetworkACL)) { _fwRulesDao.loadSourceCidrs((FirewallRuleVO)rule); } - IpAddress sourceIp = _networkMgr.getIp(rule.getSourceIpAddressId()); + IpAddress sourceIp = _networkModel.getIp(rule.getSourceIpAddressId()); FirewallRuleTO ruleTO = new FirewallRuleTO(rule, null, sourceIp.getAddress().addr()); rulesTO.add(ruleTO); } @@ -568,7 +568,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl List staticNatRules = new ArrayList(); for (StaticNat rule : rules) { - IpAddress sourceIp = _networkMgr.getIp(rule.getSourceIpAddressId()); + IpAddress sourceIp = _networkModel.getIp(rule.getSourceIpAddressId()); Vlan vlan = _vlanDao.findById(sourceIp.getVlanId()); StaticNatRuleTO ruleTO = new StaticNatRuleTO(0,vlan.getVlanTag(), sourceIp.getAddress().addr(), -1, -1, rule.getDestIpAddress(), -1, -1, "any", rule.isForRevoke(), false); @@ -632,7 +632,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl } // Create/delete VPN - IpAddress ip = _networkMgr.getIp(vpn.getServerAddressId()); + IpAddress ip = _networkModel.getIp(vpn.getServerAddressId()); // Mask the IP range with the network's VLAN tag String[] ipRange = vpn.getIpRange().split("-"); @@ -712,14 +712,6 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl return vlanTag - lowestVlanTag; } - private NicVO savePlaceholderNic(Network network, String ipAddress) { - NicVO nic = new NicVO(null, null, network.getId(), null); - nic.setIp4Address(ipAddress); - nic.setReservationStrategy(ReservationStrategy.PlaceHolder); - nic.setState(State.Reserved); - return _nicDao.persist(nic); - } - public int getGloballyConfiguredCidrSize() { try { String globalVlanBits = _configDao.getValue(Config.GuestVlanBits.key()); @@ -771,7 +763,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl List pfRules = new ArrayList(); for (PortForwardingRule rule : rules) { - IpAddress sourceIp = _networkMgr.getIp(rule.getSourceIpAddressId()); + IpAddress sourceIp = _networkModel.getIp(rule.getSourceIpAddressId()); Vlan vlan = _vlanDao.findById(sourceIp.getVlanId()); PortForwardingRuleTO ruleTO = new PortForwardingRuleTO(rule, vlan.getVlanTag(), sourceIp.getAddress().addr()); diff --git a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManager.java b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManager.java index d979f079691..dee3ca966e9 100644 --- a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManager.java +++ b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManager.java @@ -18,6 +18,7 @@ package com.cloud.network; import java.util.List; +import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; @@ -96,7 +97,9 @@ public interface ExternalLoadBalancerDeviceManager extends Manager{ * @throws ResourceUnavailableException * @throws InsufficientCapacityException */ - public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network guestConfig) throws ResourceUnavailableException, + public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network guestConfig) throws ResourceUnavailableException, InsufficientCapacityException; - + + public List getLBHealthChecks(Network network, List rules) + throws ResourceUnavailableException; } diff --git a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java index d7b6d78c9bb..b2a56fc056c 100644 --- a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java @@ -26,6 +26,9 @@ import java.util.UUID; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.response.ExternalLoadBalancerResponse; +import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; @@ -34,12 +37,13 @@ import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupExternalLoadBalancerCommand; import com.cloud.agent.api.routing.CreateLoadBalancerApplianceCommand; import com.cloud.agent.api.routing.DestroyLoadBalancerApplianceCommand; +import com.cloud.agent.api.routing.HealthCheckLBConfigAnswer; +import com.cloud.agent.api.routing.HealthCheckLBConfigCommand; import com.cloud.agent.api.routing.IpAssocCommand; import com.cloud.agent.api.routing.LoadBalancerConfigCommand; import com.cloud.agent.api.routing.NetworkElementCommand; import com.cloud.agent.api.to.IpAddressTO; import com.cloud.agent.api.to.LoadBalancerTO; -import org.apache.cloudstack.api.ApiConstants; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.DataCenter; @@ -59,7 +63,6 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDetailsDao; -import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import com.cloud.network.Network.Provider; import com.cloud.network.Network.Service; import com.cloud.network.Networks.TrafficType; @@ -67,6 +70,8 @@ import com.cloud.network.addr.PublicIp; import com.cloud.network.dao.ExternalFirewallDeviceDao; import com.cloud.network.dao.ExternalLoadBalancerDeviceDao; import com.cloud.network.dao.ExternalLoadBalancerDeviceVO; +import com.cloud.network.dao.ExternalLoadBalancerDeviceVO.LBDeviceAllocationState; +import com.cloud.network.dao.ExternalLoadBalancerDeviceVO.LBDeviceState; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.InlineLoadBalancerNicMapDao; @@ -81,8 +86,6 @@ import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderVO; import com.cloud.network.dao.PhysicalNetworkVO; -import com.cloud.network.dao.ExternalLoadBalancerDeviceVO.LBDeviceAllocationState; -import com.cloud.network.dao.ExternalLoadBalancerDeviceVO.LBDeviceState; import com.cloud.network.element.IpDeployer; import com.cloud.network.element.NetworkElement; import com.cloud.network.element.StaticNatServiceProvider; @@ -102,7 +105,6 @@ import com.cloud.resource.ResourceState; import com.cloud.resource.ResourceStateAdapter; import com.cloud.resource.ServerResource; import com.cloud.resource.UnableDeleteHostException; -import org.apache.cloudstack.api.response.ExternalLoadBalancerResponse; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.dao.AccountDao; @@ -116,8 +118,7 @@ import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.net.UrlUtil; -import com.cloud.vm.Nic.ReservationStrategy; -import com.cloud.vm.Nic.State; +import com.cloud.vm.Nic; import com.cloud.vm.NicVO; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.NicDao; @@ -696,25 +697,6 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase return false; } - private NicVO savePlaceholderNic(Network network, String ipAddress) { - NicVO nic = new NicVO(null, null, network.getId(), null); - nic.setIp4Address(ipAddress); - nic.setReservationStrategy(ReservationStrategy.PlaceHolder); - nic.setState(State.Reserved); - return _nicDao.persist(nic); - } - - private NicVO getPlaceholderNic(Network network) { - List guestIps = _nicDao.listByNetworkId(network.getId()); - for (NicVO guestIp : guestIps) { - // only external firewall and external load balancer will create NicVO with PlaceHolder reservation strategy - if (guestIp.getReservationStrategy().equals(ReservationStrategy.PlaceHolder) && guestIp.getVmType() == null - && guestIp.getReserver() == null && !guestIp.getIp4Address().equals(network.getGateway())) { - return guestIp; - } - } - return null; - } private void applyStaticNatRuleForInlineLBRule(DataCenterVO zone, Network network, boolean revoked, String publicIp, String privateIp) throws ResourceUnavailableException { List staticNats = new ArrayList(); @@ -774,7 +756,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase // If a NIC doesn't exist for the load balancing IP address, create one loadBalancingIpNic = _nicDao.findByIp4AddressAndNetworkId(loadBalancingIpAddress, network.getId()); if (loadBalancingIpNic == null) { - loadBalancingIpNic = savePlaceholderNic(network, loadBalancingIpAddress); + loadBalancingIpNic = _networkMgr.savePlaceholderNic(network, loadBalancingIpAddress); } // Save a mapping between the source IP address and the load balancing IP address NIC @@ -888,7 +870,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase if ((destinations != null && !destinations.isEmpty()) || rule.isAutoScaleConfig()) { boolean inline = _networkMgr.isNetworkInlineMode(network); - LoadBalancerTO loadBalancer = new LoadBalancerTO(uuid, srcIp, srcPort, protocol, algorithm, revoked, false, inline, destinations, rule.getStickinessPolicies()); + LoadBalancerTO loadBalancer = new LoadBalancerTO(uuid, srcIp, srcPort, protocol, algorithm, revoked, false, inline, destinations, rule.getStickinessPolicies(), rule.getHealthCheckPolicies()); if (rule.isAutoScaleConfig()) { loadBalancer.setAutoScaleVmGroup(rule.getAutoScaleVmGroup()); } @@ -983,7 +965,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase if (add) { // on restart network, network could have already been implemented. If already implemented then return - NicVO selfipNic = getPlaceholderNic(guestConfig); + Nic selfipNic = _networkModel.getPlaceholderNic(guestConfig, null); if (selfipNic != null) { return true; } @@ -997,7 +979,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase } } else { // get the self-ip used by the load balancer - NicVO selfipNic = getPlaceholderNic(guestConfig); + Nic selfipNic = _networkModel.getPlaceholderNic(guestConfig, null); if (selfipNic == null) { s_logger.warn("Network shutdwon requested on external load balancer element, which did not implement the network." + " Either network implement failed half way through or already network shutdown is completed. So just returning."); @@ -1025,10 +1007,10 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase if (add) { // Insert a new NIC for this guest network to reserve the self IP - savePlaceholderNic(guestConfig, selfIp); + _networkMgr.savePlaceholderNic(guestConfig, selfIp); } else { // release the self-ip obtained from guest network - NicVO selfipNic = getPlaceholderNic(guestConfig); + Nic selfipNic = _networkModel.getPlaceholderNic(guestConfig, null); _nicDao.remove(selfipNic.getId()); // release the load balancer allocated for the network @@ -1111,4 +1093,95 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase s_logger.info("Let " + element.getName() + " handle ip association for " + getName() + " in network " + network.getId()); return (IpDeployer)element; } + + @Override + public List getLBHealthChecks(Network network, List rules) + throws ResourceUnavailableException { + + // Find the external load balancer in this zone + long zoneId = network.getDataCenterId(); + DataCenterVO zone = _dcDao.findById(zoneId); + HealthCheckLBConfigAnswer answer = null; + + List loadBalancingRules = new ArrayList(); + + for (FirewallRule rule : rules) { + if (rule.getPurpose().equals(Purpose.LoadBalancing)) { + loadBalancingRules.add((LoadBalancingRule) rule); + } + } + + if (loadBalancingRules == null || loadBalancingRules.isEmpty()) { + return null; + } + + ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(network); + if (lbDeviceVO == null) { + s_logger.warn("There is no external load balancer device assigned to this network either network is not implement are already shutdown so just returning"); + return null; + } + + HostVO externalLoadBalancer = _hostDao.findById(lbDeviceVO.getHostId()); + + boolean externalLoadBalancerIsInline = _networkMgr.isNetworkInlineMode(network); + + if (network.getState() == Network.State.Allocated) { + s_logger.debug("External load balancer was asked to apply LB rules for network with ID " + network.getId() + + "; this network is not implemented. Skipping backend commands."); + return null; + } + + List loadBalancersToApply = new ArrayList(); + List mappingStates = new ArrayList(); + for (int i = 0; i < loadBalancingRules.size(); i++) { + LoadBalancingRule rule = loadBalancingRules.get(i); + + boolean revoked = (rule.getState().equals(FirewallRule.State.Revoke)); + String protocol = rule.getProtocol(); + String algorithm = rule.getAlgorithm(); + String uuid = rule.getUuid(); + String srcIp = _networkModel.getIp(rule.getSourceIpAddressId()).getAddress().addr(); + int srcPort = rule.getSourcePortStart(); + List destinations = rule.getDestinations(); + + if (externalLoadBalancerIsInline) { + MappingNic nic = getLoadBalancingIpNic(zone, network, rule.getSourceIpAddressId(), revoked, null); + mappingStates.add(nic.getState()); + NicVO loadBalancingIpNic = nic.getNic(); + if (loadBalancingIpNic == null) { + continue; + } + + // Change the source IP address for the load balancing rule to + // be the load balancing IP address + srcIp = loadBalancingIpNic.getIp4Address(); + } + + if ((destinations != null && !destinations.isEmpty()) || !rule.isAutoScaleConfig()) { + boolean inline = _networkMgr.isNetworkInlineMode(network); + LoadBalancerTO loadBalancer = new LoadBalancerTO(uuid, srcIp, srcPort, protocol, algorithm, revoked, + false, inline, destinations, rule.getStickinessPolicies(), rule.getHealthCheckPolicies()); + loadBalancersToApply.add(loadBalancer); + } + } + + try { + if (loadBalancersToApply.size() > 0) { + int numLoadBalancersForCommand = loadBalancersToApply.size(); + LoadBalancerTO[] loadBalancersForCommand = loadBalancersToApply + .toArray(new LoadBalancerTO[numLoadBalancersForCommand]); + // LoadBalancerConfigCommand cmd = new + // LoadBalancerConfigCommand(loadBalancersForCommand, null); + HealthCheckLBConfigCommand cmd = new HealthCheckLBConfigCommand(loadBalancersForCommand); + long guestVlanTag = Integer.parseInt(network.getBroadcastUri().getHost()); + cmd.setAccessDetail(NetworkElementCommand.GUEST_VLAN_TAG, String.valueOf(guestVlanTag)); + + answer = (HealthCheckLBConfigAnswer) _agentMgr.easySend(externalLoadBalancer.getId(), cmd); + } + } catch (Exception ex) { + s_logger.error("Exception Occured ", ex); + } + return answer.getLoadBalancers(); + } + } diff --git a/server/src/com/cloud/network/LBHealthCheckPolicyVO.java b/server/src/com/cloud/network/LBHealthCheckPolicyVO.java new file mode 100644 index 00000000000..ed03a2bbc89 --- /dev/null +++ b/server/src/com/cloud/network/LBHealthCheckPolicyVO.java @@ -0,0 +1,157 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.PrimaryKeyJoinColumn; +import javax.persistence.Table; + +import com.cloud.network.rules.HealthCheckPolicy; +import org.apache.cloudstack.api.InternalIdentity; + +@Entity +@Table(name = ("load_balancer_healthcheck_policies")) +@PrimaryKeyJoinColumn(name = "load_balancer_id", referencedColumnName = "id") +public class LBHealthCheckPolicyVO implements HealthCheckPolicy { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "load_balancer_id") + private long loadBalancerId; + + @Column(name = "pingpath") + private String pingPath; + + @Column(name = "description") + private String description; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "response_time") + private int responseTime; + + @Column(name = "healthcheck_interval") + private int healthcheckInterval; + + @Column(name = "healthcheck_thresshold") + private int healthcheckThresshold; + + @Column(name = "unhealth_thresshold") + private int unhealthThresshold; + + @Column(name = "revoke") + private boolean revoke = false; + + protected LBHealthCheckPolicyVO() { + this.uuid = UUID.randomUUID().toString(); + } + + public LBHealthCheckPolicyVO(long loadBalancerId, String pingPath, String description, int responseTime, + int healthcheckInterval, int healthcheckThresshold, int unhealthThresshold) { + this.loadBalancerId = loadBalancerId; + + if (pingPath == null || pingPath.isEmpty()) + this.pingPath = "/"; + else + this.pingPath = pingPath; + + if (responseTime == 0) + this.responseTime = 2; + else + this.responseTime = responseTime; + + if (healthcheckInterval == 0) + this.healthcheckInterval = 5; + else + this.healthcheckInterval = healthcheckInterval; + + if (healthcheckThresshold == 0) + this.healthcheckThresshold = 2; + else + this.healthcheckThresshold = healthcheckThresshold; + + if (unhealthThresshold == 0) + this.unhealthThresshold = 1; + else + this.unhealthThresshold = unhealthThresshold; + this.uuid = UUID.randomUUID().toString(); + + } + + public int getResponseTime() { + return responseTime; + } + + public int getHealthcheckInterval() { + return healthcheckInterval; + } + + public int getHealthcheckThresshold() { + return healthcheckThresshold; + } + + public int getUnhealthThresshold() { + return unhealthThresshold; + } + + public long getId() { + return id; + } + + public long getLoadBalancerId() { + return loadBalancerId; + } + + public String getpingpath() { + return pingPath; + } + + public String getDescription() { + return description; + } + + public boolean isRevoke() { + return revoke; + } + + public void setRevoke(boolean revoke) { + this.revoke = revoke; + } + + @Override + public String getUuid() { + return this.uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } +} diff --git a/server/src/com/cloud/network/NetworkManager.java b/server/src/com/cloud/network/NetworkManager.java index 48e017edabd..e961f049d73 100755 --- a/server/src/com/cloud/network/NetworkManager.java +++ b/server/src/com/cloud/network/NetworkManager.java @@ -52,6 +52,7 @@ import com.cloud.utils.Pair; import com.cloud.vm.Nic; import com.cloud.vm.NicProfile; import com.cloud.vm.NicSecondaryIp; +import com.cloud.vm.NicVO; import com.cloud.vm.ReservationContext; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -346,4 +347,6 @@ public interface NetworkManager { String allocatePublicIpForGuestNic(Long networkId, DataCenter dc, Pod pod, Account caller, String requestedIp) throws InsufficientAddressCapacityException; boolean removeVmSecondaryIpsOfNic(long nicId); + NicVO savePlaceholderNic(Network network, String ip4Address); + } diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index 3220c9174eb..b2ceb46d1ab 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -44,7 +44,14 @@ import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; -import com.cloud.agent.api.*; +import com.cloud.agent.api.AgentControlAnswer; +import com.cloud.agent.api.AgentControlCommand; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CheckNetworkAnswer; +import com.cloud.agent.api.CheckNetworkCommand; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.StartupCommand; +import com.cloud.agent.api.StartupRoutingCommand; import com.cloud.agent.api.to.NicTO; import com.cloud.alert.AlertManager; import com.cloud.api.ApiDBUtils; @@ -52,9 +59,15 @@ import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.Resource.ResourceType; import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.dc.*; +import com.cloud.dc.AccountVlanMapVO; +import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.Pod; +import com.cloud.dc.PodVlanMapVO; +import com.cloud.dc.Vlan; import com.cloud.dc.Vlan.VlanType; +import com.cloud.dc.VlanVO; import com.cloud.dc.dao.AccountVlanMapDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.PodVlanMapDao; @@ -67,14 +80,28 @@ import com.cloud.domain.dao.DomainDao; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; import com.cloud.event.dao.UsageEventDao; -import com.cloud.exception.*; +import com.cloud.exception.AccountLimitException; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.ConnectionException; +import com.cloud.exception.InsufficientAddressCapacityException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InsufficientVirtualNetworkCapcityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.UnsupportedServiceException; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.IpAddress.State; -import com.cloud.network.Network.*; +import com.cloud.network.Network.Capability; +import com.cloud.network.Network.Event; +import com.cloud.network.Network.GuestType; +import com.cloud.network.Network.Provider; +import com.cloud.network.Network.Service; import com.cloud.network.Networks.AddressFormat; import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.IsolationType; @@ -84,7 +111,6 @@ import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.LoadBalancerDao; -import com.cloud.network.dao.LoadBalancerVO; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkDomainDao; import com.cloud.network.dao.NetworkServiceMapDao; @@ -104,12 +130,16 @@ import com.cloud.network.element.NetworkElement; import com.cloud.network.element.StaticNatServiceProvider; import com.cloud.network.element.UserDataServiceProvider; import com.cloud.network.guru.NetworkGuru; -import com.cloud.network.lb.LoadBalancingRule; -import com.cloud.network.lb.LoadBalancingRule.LbDestination; -import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.network.lb.LoadBalancingRulesManager; -import com.cloud.network.rules.*; +import com.cloud.network.rules.FirewallManager; +import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.FirewallRule.Purpose; +import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.network.rules.PortForwardingRuleVO; +import com.cloud.network.rules.RulesManager; +import com.cloud.network.rules.StaticNat; +import com.cloud.network.rules.StaticNatRule; +import com.cloud.network.rules.StaticNatRuleImpl; import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.network.vpc.NetworkACLManager; import com.cloud.network.vpc.VpcManager; @@ -122,7 +152,12 @@ import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.org.Grouping; -import com.cloud.user.*; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.User; +import com.cloud.user.UserContext; +import com.cloud.user.UserVO; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; import com.cloud.utils.Journal; @@ -132,18 +167,21 @@ import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; -import com.cloud.utils.db.*; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.JoinBuilder.JoinType; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; -import com.cloud.vm.*; import com.cloud.vm.Nic; +import com.cloud.vm.Nic.ReservationStrategy; import com.cloud.vm.NicProfile; -import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.NicVO; import com.cloud.vm.ReservationContext; import com.cloud.vm.ReservationContextImpl; @@ -151,6 +189,7 @@ import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; +import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.NicSecondaryIpDao; import com.cloud.vm.dao.NicSecondaryIpVO; @@ -1994,28 +2033,20 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L if (cidr != null && gateway != null) { userNetwork.setCidr(cidr); userNetwork.setGateway(gateway); - if (vlanId != null) { - userNetwork.setBroadcastUri(URI.create("vlan://" + vlanId)); - userNetwork.setBroadcastDomainType(BroadcastDomainType.Vlan); - if (!vlanId.equalsIgnoreCase(Vlan.UNTAGGED)) { - userNetwork.setBroadcastDomainType(BroadcastDomainType.Vlan); - } else { - userNetwork.setBroadcastDomainType(BroadcastDomainType.Native); - } - } } if (ip6Cidr != null && ip6Gateway != null) { userNetwork.setIp6Cidr(ip6Cidr); userNetwork.setIp6Gateway(ip6Gateway); - if (vlanId != null) { - userNetwork.setBroadcastUri(URI.create("vlan://" + vlanId)); + } + + if (vlanId != null) { + userNetwork.setBroadcastUri(URI.create("vlan://" + vlanId)); + userNetwork.setBroadcastDomainType(BroadcastDomainType.Vlan); + if (!vlanId.equalsIgnoreCase(Vlan.UNTAGGED)) { userNetwork.setBroadcastDomainType(BroadcastDomainType.Vlan); - if (!vlanId.equalsIgnoreCase(Vlan.UNTAGGED)) { - userNetwork.setBroadcastDomainType(BroadcastDomainType.Vlan); - } else { - userNetwork.setBroadcastDomainType(BroadcastDomainType.Native); - } + } else { + userNetwork.setBroadcastDomainType(BroadcastDomainType.Native); } } @@ -2310,52 +2341,51 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L @Override public boolean applyRules(List rules, FirewallRule.Purpose purpose, NetworkRuleApplier applier, boolean continueOnError) throws ResourceUnavailableException { - if (rules == null || rules.size() == 0) { - s_logger.debug("There are no rules to forward to the network elements"); - return true; - } + if (rules == null || rules.size() == 0) { + s_logger.debug("There are no rules to forward to the network elements"); + return true; + } - boolean success = true; - Network network = _networksDao.findById(rules.get(0).getNetworkId()); + boolean success = true; + Network network = _networksDao.findById(rules.get(0).getNetworkId()); FirewallRuleVO.TrafficType trafficType = rules.get(0).getTrafficType(); - List publicIps = new ArrayList(); + List publicIps = new ArrayList(); - if (! (rules.get(0).getPurpose() == FirewallRule.Purpose.Firewall && trafficType == FirewallRule.TrafficType.Egress)) { + if (!(rules.get(0).getPurpose() == FirewallRule.Purpose.Firewall && trafficType == FirewallRule.TrafficType.Egress)) { // get the list of public ip's owned by the network List userIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), null); if (userIps != null && !userIps.isEmpty()) { for (IPAddressVO userIp : userIps) { - PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); - publicIps.add(publicIp); - } - } + PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); + publicIps.add(publicIp); + } + } - // rules can not programmed unless IP is associated with network service provider, so run IP assoication for - // the network so as to ensure IP is associated before applying rules (in add state) - applyIpAssociations(network, false, continueOnError, publicIps); - } - - try { - applier.applyRules(network, purpose, rules); - } catch (ResourceUnavailableException e) { - if (!continueOnError) { - throw e; - } - s_logger.warn("Problems with applying " + purpose + " rules but pushing on", e); - success = false; - } - - if (! (rules.get(0).getPurpose() == FirewallRule.Purpose.Firewall && trafficType == FirewallRule.TrafficType.Egress) ) { - // if all the rules configured on public IP are revoked then dis-associate IP with network service provider + // rules can not programmed unless IP is associated with network + // service provider, so run IP assoication for + // the network so as to ensure IP is associated before applying + // rules (in add state) + applyIpAssociations(network, false, continueOnError, publicIps); + } + + try { + applier.applyRules(network, purpose, rules); + } catch (ResourceUnavailableException e) { + if (!continueOnError) { + throw e; + } + s_logger.warn("Problems with applying " + purpose + " rules but pushing on", e); + success = false; + } + + if (!(rules.get(0).getPurpose() == FirewallRule.Purpose.Firewall && trafficType == FirewallRule.TrafficType.Egress)) { + // if all the rules configured on public IP are revoked then + // dis-associate IP with network service provider applyIpAssociations(network, true, continueOnError, publicIps); } - return success; + return success; } - - - - public class NetworkGarbageCollector implements Runnable { @@ -3091,26 +3121,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L success = false; } - // remove all LB rules for the network - List lbs = _lbDao.listByNetworkId(networkId); - List lbRules = new ArrayList(); - for (LoadBalancerVO lb : lbs) { - s_logger.trace("Marking lb rule " + lb + " with Revoke state"); - lb.setState(FirewallRule.State.Revoke); - List dstList = _lbMgr.getExistingDestinations(lb.getId()); - List policyList = _lbMgr.getStickinessPolicies(lb.getId()); - // mark all destination with revoke state - for (LbDestination dst : dstList) { - s_logger.trace("Marking lb destination " + dst + " with Revoke state"); - dst.setRevoked(true); - } - - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList); - lbRules.add(loadBalancing); - } - try { - if (!_lbMgr.applyRules(network, Purpose.LoadBalancing, lbRules)) { + if (!_lbMgr.revokeLoadBalancersForNetwork(networkId)) { s_logger.warn("Failed to cleanup lb rules as a part of shutdownNetworkRules"); success = false; } @@ -3427,14 +3439,35 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return success; } + @Override + @DB public void allocateDirectIp(NicProfile nic, DataCenter dc, VirtualMachineProfile vm, Network network, String requestedIpv4, String requestedIpv6) throws InsufficientVirtualNetworkCapcityException, InsufficientAddressCapacityException { boolean ipv4 = false, ipv6 = false; + + Transaction txn = Transaction.currentTxn(); + txn.start(); + if (network.getGateway() != null) { if (nic.getIp4Address() == null) { ipv4 = true; - PublicIp ip = assignPublicIpAddress(dc.getId(), null, vm.getOwner(), VlanType.DirectAttached, network.getId(), requestedIpv4, false); + PublicIp ip = null; + + //Get ip address from the placeholder and don't allocate a new one + if (requestedIpv4 != null && vm.getType() == VirtualMachine.Type.DomainRouter) { + Nic placeholderNic = _networkModel.getPlaceholderNic(network, null); + if (placeholderNic != null) { + IPAddressVO userIp = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), placeholderNic.getIp4Address()); + ip = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); + s_logger.debug("Nic got an ip address " + placeholderNic.getIp4Address() + " stored in placeholder nic for the network " + network); + } + } + + if (ip == null) { + ip = assignPublicIpAddress(dc.getId(), null, vm.getOwner(), VlanType.DirectAttached, network.getId(), requestedIpv4, false); + } + nic.setIp4Address(ip.getAddress().toString()); nic.setGateway(ip.getGateway()); nic.setNetmask(ip.getNetmask()); @@ -3449,6 +3482,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L nic.setDns2(dc.getDns2()); } + //FIXME - get ipv6 address from the placeholder if it's stored there if (network.getIp6Gateway() != null) { if (nic.getIp6Address() == null) { ipv6 = true; @@ -3471,7 +3505,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L nic.setIp6Dns1(dc.getIp6Dns1()); nic.setIp6Dns2(dc.getIp6Dns2()); } - + + txn.commit(); } @Override @@ -3675,5 +3710,14 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L Ip ipAddr = ip.getAddress(); return ipAddr.addr(); } - + + @Override + public NicVO savePlaceholderNic(Network network, String ip4Address) { + NicVO nic = new NicVO(null, null, network.getId(), null); + nic.setIp4Address(ip4Address); + nic.setReservationStrategy(ReservationStrategy.PlaceHolder); + nic.setState(Nic.State.Reserved); + return _nicDao.persist(nic); + } + } diff --git a/server/src/com/cloud/network/NetworkModelImpl.java b/server/src/com/cloud/network/NetworkModelImpl.java index 52089df4d95..41ce103b360 100644 --- a/server/src/com/cloud/network/NetworkModelImpl.java +++ b/server/src/com/cloud/network/NetworkModelImpl.java @@ -93,7 +93,6 @@ import com.cloud.user.DomainManager; import com.cloud.user.dao.AccountDao; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.ComponentContext; -import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.JoinBuilder; @@ -1457,11 +1456,11 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { if (network.getGuestType() != Network.GuestType.Shared) { List networkMap = _networksDao.listBy(owner.getId(), network.getId()); if (networkMap == null || networkMap.isEmpty()) { - throw new PermissionDeniedException("Unable to use network with id= " + network.getId() + ", permission denied"); + throw new PermissionDeniedException("Unable to use network with id= " + network.getUuid() + ", permission denied"); } } else { if (!isNetworkAvailableInDomain(network.getId(), owner.getDomainId())) { - throw new PermissionDeniedException("Shared network id=" + network.getId() + " is not available in domain id=" + owner.getDomainId()); + throw new PermissionDeniedException("Shared network id=" + network.getUuid() + " is not available in domain id=" + owner.getDomainId()); } } } @@ -1644,6 +1643,11 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { if (usedIps.size() != 0) { allPossibleIps.removeAll(usedIps); } + + String gateway = network.getGateway(); + if ((gateway != null) && (allPossibleIps.contains(NetUtils.ip2Long(gateway)))) + allPossibleIps.remove(NetUtils.ip2Long(gateway)); + return allPossibleIps; } @@ -1989,4 +1993,26 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { } return startIpv6; } + + @Override + public NicVO getPlaceholderNic(Network network, Long podId) { + List nics = _nicDao.listPlaceholderNicsByNetworkId(network.getId()); + for (NicVO nic : nics) { + if (nic.getVmType() == null && nic.getReserver() == null && nic.getIp4Address() != null && !nic.getIp4Address().equals(network.getGateway())) { + if (podId == null) { + return nic; + } else { + //return nic only when its ip address belong to the pod range (for the Basic zone case) + List vlans = _vlanDao.listVlansForPod(podId); + for (Vlan vlan : vlans) { + IpAddress ip = _ipAddressDao.findByIpAndNetworkId(network.getId(), nic.getIp4Address()); + if (ip != null && ip.getVlanId() == vlan.getId()) { + return nic; + } + } + } + } + } + return null; + } } diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index 52e81e5c8c8..8303b0bba3c 100755 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -2046,6 +2046,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (usedIps.size() != 0) { allPossibleIps.removeAll(usedIps); } + + String gateway = network.getGateway(); + if ((gateway != null) && (allPossibleIps.contains(NetUtils.ip2Long(gateway)))) + allPossibleIps.remove(NetUtils.ip2Long(gateway)); + return allPossibleIps; } diff --git a/server/src/com/cloud/network/dao/LBHealthCheckPolicyDao.java b/server/src/com/cloud/network/dao/LBHealthCheckPolicyDao.java new file mode 100644 index 00000000000..42a9e421485 --- /dev/null +++ b/server/src/com/cloud/network/dao/LBHealthCheckPolicyDao.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.dao; + +import java.util.List; + +import com.cloud.network.LBHealthCheckPolicyVO; +import com.cloud.utils.db.GenericDao; + + +public interface LBHealthCheckPolicyDao extends + GenericDao { + void remove(long loadBalancerId); + + void remove(long loadBalancerId, Boolean pending); + + List listByLoadBalancerId(long loadBalancerId); + + List listByLoadBalancerId(long loadBalancerId, + boolean revoke); +} diff --git a/server/src/com/cloud/network/dao/LBHealthCheckPolicyDaoImpl.java b/server/src/com/cloud/network/dao/LBHealthCheckPolicyDaoImpl.java new file mode 100644 index 00000000000..65e0689e79a --- /dev/null +++ b/server/src/com/cloud/network/dao/LBHealthCheckPolicyDaoImpl.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.dao; + +import java.util.List; + +import javax.ejb.Local; + +import org.springframework.stereotype.Component; + +import com.cloud.network.LBHealthCheckPolicyVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchCriteria; + + +@Component +@Local(value = { LBHealthCheckPolicyDao.class }) +public class LBHealthCheckPolicyDaoImpl extends + GenericDaoBase implements + LBHealthCheckPolicyDao { + + @Override + public void remove(long loadBalancerId) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("loadBalancerId", SearchCriteria.Op.EQ, loadBalancerId); + + expunge(sc); + } + + @Override + public void remove(long loadBalancerId, Boolean revoke) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("loadBalancerId", SearchCriteria.Op.EQ, loadBalancerId); + sc.addAnd("revoke", SearchCriteria.Op.EQ, revoke); + + expunge(sc); + } + + @Override + public List listByLoadBalancerId(long loadBalancerId) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("loadBalancerId", SearchCriteria.Op.EQ, loadBalancerId); + + return listBy(sc); + } + + @Override + public List listByLoadBalancerId(long loadBalancerId, + boolean pending) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("loadBalancerId", SearchCriteria.Op.EQ, loadBalancerId); + sc.addAnd("revoke", SearchCriteria.Op.EQ, pending); + + return listBy(sc); + } + +} diff --git a/server/src/com/cloud/network/dao/LoadBalancerVMMapVO.java b/server/src/com/cloud/network/dao/LoadBalancerVMMapVO.java index 8856993a982..852302e0949 100644 --- a/server/src/com/cloud/network/dao/LoadBalancerVMMapVO.java +++ b/server/src/com/cloud/network/dao/LoadBalancerVMMapVO.java @@ -39,10 +39,14 @@ public class LoadBalancerVMMapVO implements InternalIdentity { @Column(name="instance_id") private long instanceId; - @Column(name="revoke") + @Column(name = "revoke") private boolean revoke = false; - public LoadBalancerVMMapVO() { } + @Column(name = "state") + private String state; + + public LoadBalancerVMMapVO() { + } public LoadBalancerVMMapVO(long loadBalancerId, long instanceId) { this.loadBalancerId = loadBalancerId; @@ -74,4 +78,12 @@ public class LoadBalancerVMMapVO implements InternalIdentity { public void setRevoke(boolean revoke) { this.revoke = revoke; } + + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } } diff --git a/server/src/com/cloud/network/element/VirtualRouterElement.java b/server/src/com/cloud/network/element/VirtualRouterElement.java index 500d0b68ece..169db3283e3 100755 --- a/server/src/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VirtualRouterElement.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.api.command.admin.router.CreateVirtualRouterElement import org.apache.cloudstack.api.command.admin.router.ListVirtualRouterElementsCmd; import org.apache.log4j.Logger; +import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.DataCenter; @@ -363,7 +364,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl return true; } } else { - return true; + return false; } } @@ -938,4 +939,11 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl protected VirtualRouterProviderType getVirtualRouterProvider() { return VirtualRouterProviderType.VirtualRouter; } + + @Override + public List updateHealthChecks(Network network, + List lbrules) { + // TODO Auto-generated method stub + return null; + } } diff --git a/server/src/com/cloud/network/element/VpcVirtualRouterElement.java b/server/src/com/cloud/network/element/VpcVirtualRouterElement.java index aa8f10d9c2a..c7d4aeda344 100644 --- a/server/src/com/cloud/network/element/VpcVirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VpcVirtualRouterElement.java @@ -142,7 +142,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc Long vpcId = network.getVpcId(); if (vpcId == null) { - s_logger.warn("Network " + network + " is not associated with any VPC"); + s_logger.trace("Network " + network + " is not associated with any VPC"); return false; } @@ -185,7 +185,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc Long vpcId = network.getVpcId(); if (vpcId == null) { - s_logger.warn("Network " + network + " is not associated with any VPC"); + s_logger.trace("Network " + network + " is not associated with any VPC"); return false; } diff --git a/server/src/com/cloud/network/guru/DirectNetworkGuru.java b/server/src/com/cloud/network/guru/DirectNetworkGuru.java index 8707cfd418c..2c8acbb25e9 100755 --- a/server/src/com/cloud/network/guru/DirectNetworkGuru.java +++ b/server/src/com/cloud/network/guru/DirectNetworkGuru.java @@ -44,7 +44,6 @@ import com.cloud.network.NetworkProfile; import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.Mode; import com.cloud.network.Networks.TrafficType; -import com.cloud.network.UserIpv6AddressVO; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.NetworkVO; @@ -55,13 +54,15 @@ import com.cloud.user.Account; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.vm.Nic; import com.cloud.vm.Nic.ReservationStrategy; -import com.cloud.vm.dao.NicSecondaryIpDao; import com.cloud.vm.NicProfile; -import com.cloud.vm.NicSecondaryIp; +import com.cloud.vm.NicVO; import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.NicSecondaryIpDao; @Local(value = { NetworkGuru.class }) public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { @@ -85,6 +86,8 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { Ipv6AddressManager _ipv6Mgr; @Inject NicSecondaryIpDao _nicSecondaryIpDao; + @Inject + NicDao _nicDao; private static final TrafficType[] _trafficTypes = {TrafficType.Guest}; @@ -198,7 +201,7 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { nic.setStrategy(ReservationStrategy.Create); } - _networkMgr.allocateDirectIp(nic, dc, vm, network, nic.getRequestedIpv4(), nic.getRequestedIpv6()); + allocateDirectIp(nic, network, vm, dc, nic.getRequestedIpv4(), nic.getRequestedIpv6()); nic.setStrategy(ReservationStrategy.Create); return nic; @@ -208,11 +211,31 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws InsufficientVirtualNetworkCapcityException, InsufficientAddressCapacityException, ConcurrentOperationException { if (nic.getIp4Address() == null && nic.getIp6Address() == null) { - _networkMgr.allocateDirectIp(nic, dest.getDataCenter(), vm, network, null, null); + allocateDirectIp(nic, network, vm, dest.getDataCenter(), null, null); nic.setStrategy(ReservationStrategy.Create); } } + @DB + protected void allocateDirectIp(NicProfile nic, Network network, VirtualMachineProfile vm, DataCenter dc, String requestedIp4Addr, String requestedIp6Addr) + throws InsufficientVirtualNetworkCapcityException, + InsufficientAddressCapacityException { + + //FIXME - save ipv6 informaiton in the placeholder nic + Transaction txn = Transaction.currentTxn(); + txn.start(); + _networkMgr.allocateDirectIp(nic, dc, vm, network, requestedIp4Addr, requestedIp6Addr); + //save the placeholder nic if the vm is the Virtual router + if (vm.getType() == VirtualMachine.Type.DomainRouter) { + Nic placeholderNic = _networkModel.getPlaceholderNic(network, null); + if (placeholderNic == null) { + s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIp4Address() + " and ipv6 address " + requestedIp6Addr + " for the network " + network); + _networkMgr.savePlaceholderNic(network, nic.getIp4Address()); + } + } + txn.commit(); + } + @Override public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservationId) { return true; @@ -230,24 +253,32 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { } if (nic.getIp4Address() != null) { - IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); - if (ip != null) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - _networkMgr.markIpAsUnavailable(ip.getId()); - _ipAddressDao.unassignIpAddress(ip.getId()); - //unassign nic secondary ip address - s_logger.debug("remove nic " + nic.getId() + " secondary ip "); - List nicSecIps = null; - nicSecIps = _nicSecondaryIpDao.getSecondaryIpAddressesForNic(nic.getId()); - for (String secIp: nicSecIps) { - IPAddressVO pubIp = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), secIp); - _networkMgr.markIpAsUnavailable(pubIp.getId()); - _ipAddressDao.unassignIpAddress(pubIp.getId()); + IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); + if (ip != null) { + Transaction txn = Transaction.currentTxn(); + txn.start(); + + // if the ip address a part of placeholder, don't release it + Nic placeholderNic = _networkModel.getPlaceholderNic(network, null); + if (placeholderNic != null && placeholderNic.getIp4Address().equalsIgnoreCase(ip.getAddress().addr())) { + s_logger.debug("Not releasing direct ip " + ip.getId() +" yet as its ip is saved in the placeholder"); + } else { + _networkMgr.markIpAsUnavailable(ip.getId()); + _ipAddressDao.unassignIpAddress(ip.getId()); + } + + //unassign nic secondary ip address + s_logger.debug("remove nic " + nic.getId() + " secondary ip "); + List nicSecIps = null; + nicSecIps = _nicSecondaryIpDao.getSecondaryIpAddressesForNic(nic.getId()); + for (String secIp: nicSecIps) { + IPAddressVO pubIp = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), secIp); + _networkMgr.markIpAsUnavailable(pubIp.getId()); + _ipAddressDao.unassignIpAddress(pubIp.getId()); + } + + txn.commit(); } - - txn.commit(); - } } if (nic.getIp6Address() != null) { @@ -261,7 +292,24 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { } @Override + @DB public boolean trash(Network network, NetworkOffering offering, Account owner) { + //Have to remove all placeholder nics + List nics = _nicDao.listPlaceholderNicsByNetworkId(network.getId()); + Transaction txn = Transaction.currentTxn(); + txn.start(); + for (Nic nic : nics) { + if (nic.getIp4Address() != null) { + s_logger.debug("Releasing ip " + nic.getIp4Address() + " of placeholder nic " + nic); + IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); + _networkMgr.markIpAsUnavailable(ip.getId()); + _ipAddressDao.unassignIpAddress(ip.getId()); + s_logger.debug("Removing placeholder nic " + nic); + _nicDao.remove(nic.getId()); + } + } + + txn.commit(); return true; } diff --git a/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java b/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java index 8efbcd7b7d1..d74cd064488 100755 --- a/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java +++ b/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java @@ -17,6 +17,7 @@ package com.cloud.network.guru; import java.net.URI; +import java.util.List; import javax.ejb.Local; import javax.inject.Inject; @@ -50,6 +51,7 @@ import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.Nic; import com.cloud.vm.Nic.ReservationStrategy; import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; @@ -155,11 +157,34 @@ public class DirectPodBasedNetworkGuru extends DirectNetworkGuru { nic.setDns2(dc.getDns2()); } + @DB protected void getIp(NicProfile nic, Pod pod, VirtualMachineProfile vm, Network network) throws InsufficientVirtualNetworkCapcityException, InsufficientAddressCapacityException, ConcurrentOperationException { DataCenter dc = _dcDao.findById(pod.getDataCenterId()); if (nic.getIp4Address() == null) { - PublicIp ip = _networkMgr.assignPublicIpAddress(dc.getId(), pod.getId(), vm.getOwner(), VlanType.DirectAttached, network.getId(), null, false); + Transaction txn = Transaction.currentTxn(); + txn.start(); + + PublicIp ip = null; + List podRefs = _podVlanDao.listPodVlanMapsByPod(pod.getId()); + String podRangeGateway = null; + if (!podRefs.isEmpty()) { + podRangeGateway = _vlanDao.findById(podRefs.get(0).getVlanDbId()).getVlanGateway(); + } + //Get ip address from the placeholder and don't allocate a new one + if (vm.getType() == VirtualMachine.Type.DomainRouter) { + Nic placeholderNic = _networkModel.getPlaceholderNic(network, null); + if (placeholderNic != null) { + IPAddressVO userIp = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), placeholderNic.getIp4Address()); + ip = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); + s_logger.debug("Nic got an ip address " + placeholderNic.getIp4Address() + " stored in placeholder nic for the network " + network + " and gateway " + podRangeGateway); + } + } + + if (ip == null) { + ip = _networkMgr.assignPublicIpAddress(dc.getId(), pod.getId(), vm.getOwner(), VlanType.DirectAttached, network.getId(), null, false); + } + nic.setIp4Address(ip.getAddress().toString()); nic.setFormat(AddressFormat.Ip4); nic.setGateway(ip.getGateway()); @@ -171,6 +196,16 @@ public class DirectPodBasedNetworkGuru extends DirectNetworkGuru { } nic.setReservationId(String.valueOf(ip.getVlanTag())); nic.setMacAddress(ip.getMacAddress()); + + //save the placeholder nic if the vm is the Virtual router + if (vm.getType() == VirtualMachine.Type.DomainRouter) { + Nic placeholderNic = _networkModel.getPlaceholderNic(network, null); + if (placeholderNic == null) { + s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIp4Address() + " for the network " + network + " with the gateway " + podRangeGateway); + _networkMgr.savePlaceholderNic(network, nic.getIp4Address()); + } + } + txn.commit(); } nic.setDns1(dc.getDns1()); nic.setDns2(dc.getDns2()); diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java b/server/src/com/cloud/network/lb/LBHealthCheckManager.java similarity index 79% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java rename to server/src/com/cloud/network/lb/LBHealthCheckManager.java index fdf5958f1ab..2e24965aa35 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java +++ b/server/src/com/cloud/network/lb/LBHealthCheckManager.java @@ -14,10 +14,11 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package org.apache.cloudstack.storage.datastore.provider; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +package com.cloud.network.lb; -public interface PrimaryDataStoreProvider extends DataStoreProvider { +public interface LBHealthCheckManager { + + void updateLBHealthCheck(); + } diff --git a/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java b/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java new file mode 100644 index 00000000000..90547328714 --- /dev/null +++ b/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.lb; + +import static java.lang.String.format; + +import java.util.Map; + +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.configuration.Config; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.component.Manager; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.concurrency.NamedThreadFactory; + +@Component +@Local(value = {LBHealthCheckManager.class}) +public class LBHealthCheckManagerImpl extends ManagerBase implements LBHealthCheckManager, Manager { + private static final Logger s_logger = Logger.getLogger(LBHealthCheckManagerImpl.class); + + @Inject + ConfigurationDao _configDao; + @Inject + LoadBalancingRulesService _lbService; + + private String name; + private Map _configs; + ScheduledExecutorService _executor; + + private long _interval; + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + _configs = _configDao.getConfiguration("management-server", params); + if (s_logger.isInfoEnabled()) { + s_logger.info(format("Configuring LBHealthCheck Manager %1$s", name)); + } + this.name = name; + _interval = NumbersUtil.parseLong(_configs.get(Config.LBHealthCheck.key()), 600); + _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("LBHealthCheck")); + return true; + } + + @Override + public boolean start() { + s_logger.debug("LB HealthCheckmanager is getting Started"); + _executor.scheduleAtFixedRate(new UpdateLBHealthCheck(), 10, _interval, TimeUnit.SECONDS); + return true; + } + + @Override + public boolean stop() { + s_logger.debug("HealthCheckmanager is getting Stopped"); + _executor.shutdown(); + return true; + } + + @Override + public String getName() { + return this.name; + } + + protected class UpdateLBHealthCheck implements Runnable { + @Override + public void run() { + try { + updateLBHealthCheck(); + } catch (Exception e) { + s_logger.error("Exception in LB HealthCheck Update Checker", e); + } + } + } + + @Override + public void updateLBHealthCheck() { + try { + _lbService.updateLBHealthChecks(); + } catch (ResourceUnavailableException e) { + s_logger.debug("Error while updating the LB HealtCheck ", e); + } + s_logger.debug("LB HealthCheck Manager is running and getting the updates from LB providers and updating service status"); + } + +} diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManager.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManager.java index 9d7d22fdad7..d98872a0906 100644 --- a/server/src/com/cloud/network/lb/LoadBalancingRulesManager.java +++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManager.java @@ -20,6 +20,7 @@ import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.Network; import com.cloud.network.lb.LoadBalancingRule.LbDestination; +import com.cloud.network.lb.LoadBalancingRule.LbHealthCheckPolicy; import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.LbStickinessMethod; @@ -38,6 +39,7 @@ public interface LoadBalancingRulesManager extends LoadBalancingRulesService { List getExistingDestinations(long lbId); List getStickinessPolicies(long lbId); List getStickinessMethods(long networkid); + List getHealthCheckPolicies(long lbId); /** * Remove vm from all load balancers @@ -49,4 +51,5 @@ public interface LoadBalancingRulesManager extends LoadBalancingRulesService { boolean applyLoadBalancersForNetwork(long networkId) throws ResourceUnavailableException; String getLBCapability(long networkid, String capabilityName); boolean configureLbAutoScaleVmGroup(long vmGroupid, String currentState) throws ResourceUnavailableException; + boolean revokeLoadBalancersForNetwork(long networkId) throws ResourceUnavailableException; } diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index 531a42805b6..80e75cd3d66 100755 --- a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -33,8 +33,11 @@ import javax.inject.Inject; import com.cloud.event.UsageEventUtils; import org.apache.log4j.Logger; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.command.user.loadbalancer.CreateLBHealthCheckPolicyCmd; import org.apache.cloudstack.api.command.user.loadbalancer.CreateLBStickinessPolicyCmd; import org.apache.cloudstack.api.command.user.loadbalancer.CreateLoadBalancerRuleCmd; +import org.apache.cloudstack.api.command.user.loadbalancer.ListLBHealthCheckPoliciesCmd; import org.apache.cloudstack.api.command.user.loadbalancer.ListLBStickinessPoliciesCmd; import org.apache.cloudstack.api.command.user.loadbalancer.ListLoadBalancerRuleInstancesCmd; import org.apache.cloudstack.api.command.user.loadbalancer.ListLoadBalancerRulesCmd; @@ -42,6 +45,7 @@ import org.apache.cloudstack.api.command.user.loadbalancer.UpdateLoadBalancerRul import org.apache.cloudstack.api.response.ServiceResponse; import org.springframework.stereotype.Component; +import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; @@ -61,6 +65,7 @@ import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.ExternalLoadBalancerUsageManager; import com.cloud.network.IpAddress; +import com.cloud.network.LBHealthCheckPolicyVO; import com.cloud.network.Network; import com.cloud.network.Network.Capability; import com.cloud.network.Network.Provider; @@ -87,6 +92,7 @@ import com.cloud.network.dao.FirewallRulesCidrsDao; import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; +import com.cloud.network.dao.LBHealthCheckPolicyDao; import com.cloud.network.dao.LBStickinessPolicyDao; import com.cloud.network.dao.LBStickinessPolicyVO; import com.cloud.network.dao.LoadBalancerDao; @@ -97,17 +103,20 @@ import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkServiceMapDao; import com.cloud.network.dao.NetworkVO; import com.cloud.network.element.LoadBalancingServiceProvider; +import com.cloud.network.element.NetworkElement; import com.cloud.network.lb.LoadBalancingRule.LbAutoScalePolicy; import com.cloud.network.lb.LoadBalancingRule.LbAutoScaleVmGroup; import com.cloud.network.lb.LoadBalancingRule.LbAutoScaleVmProfile; import com.cloud.network.lb.LoadBalancingRule.LbCondition; import com.cloud.network.lb.LoadBalancingRule.LbDestination; +import com.cloud.network.lb.LoadBalancingRule.LbHealthCheckPolicy; import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.network.rules.FirewallManager; import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.FirewallRule.FirewallRuleType; import com.cloud.network.rules.FirewallRule.Purpose; import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.network.rules.HealthCheckPolicy; import com.cloud.network.rules.LbStickinessMethod; import com.cloud.network.rules.LbStickinessMethod.LbStickinessMethodParam; import com.cloud.network.rules.LoadBalancer; @@ -151,7 +160,8 @@ import com.google.gson.reflect.TypeToken; @Component @Local(value = { LoadBalancingRulesManager.class, LoadBalancingRulesService.class }) -public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancingRulesManager, LoadBalancingRulesService, NetworkRuleApplier { +public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancingRulesManager, + LoadBalancingRulesService, NetworkRuleApplier { private static final Logger s_logger = Logger.getLogger(LoadBalancingRulesManagerImpl.class); @Inject @@ -175,6 +185,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Inject LBStickinessPolicyDao _lb2stickinesspoliciesDao; @Inject + LBHealthCheckPolicyDao _lb2healthcheckDao; + @Inject UserVmDao _vmDao; @Inject AccountDao _accountDao; @@ -199,7 +211,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Inject ExternalLoadBalancerUsageManager _externalLBUsageMgr; - @Inject + @Inject NetworkServiceMapDao _ntwkSrvcDao; @Inject ResourceTagDao _resourceTagDao; @@ -229,9 +241,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements DataCenterDao _dcDao = null; @Inject UserDao _userDao; - @Inject List _lbProviders; + @Inject + List _lbProviders; - // Will return a string. For LB Stickiness this will be a json, for autoscale this will be "," separated values + // Will return a string. For LB Stickiness this will be a json, for + // autoscale this will be "," separated values @Override public String getLBCapability(long networkid, String capabilityName) { Map> serviceCapabilitiesMap = _networkModel.getNetworkCapabilities(networkid); @@ -240,11 +254,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements ServiceResponse serviceResponse = new ServiceResponse(); serviceResponse.setName(service.getName()); if ("Lb".equalsIgnoreCase(service.getName())) { - Map serviceCapabilities = serviceCapabilitiesMap - .get(service); + Map serviceCapabilities = serviceCapabilitiesMap.get(service); if (serviceCapabilities != null) { - for (Capability capability : serviceCapabilities - .keySet()) { + for (Capability capability : serviceCapabilities.keySet()) { if (capabilityName.equals(capability.getName())) { return serviceCapabilities.get(capability); } @@ -255,14 +267,17 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } return null; } + private LbAutoScaleVmGroup getLbAutoScaleVmGroup(AutoScaleVmGroupVO vmGroup, String currentState, LoadBalancerVO lb) { long lbNetworkId = lb.getNetworkId(); String lbName = lb.getName(); - List vmGroupPolicyMapList = _autoScaleVmGroupPolicyMapDao.listByVmGroupId(vmGroup.getId()); + List vmGroupPolicyMapList = _autoScaleVmGroupPolicyMapDao.listByVmGroupId(vmGroup + .getId()); List autoScalePolicies = new ArrayList(); for (AutoScaleVmGroupPolicyMapVO vmGroupPolicyMap : vmGroupPolicyMapList) { AutoScalePolicy autoScalePolicy = _autoScalePolicyDao.findById(vmGroupPolicyMap.getPolicyId()); - List autoScalePolicyConditionMapList = _autoScalePolicyConditionMapDao.listByAll(autoScalePolicy.getId(), null); + List autoScalePolicyConditionMapList = _autoScalePolicyConditionMapDao + .listByAll(autoScalePolicy.getId(), null); List lbConditions = new ArrayList(); for (AutoScalePolicyConditionMapVO autoScalePolicyConditionMap : autoScalePolicyConditionMapList) { Condition condition = _conditionDao.findById(autoScalePolicyConditionMap.getConditionId()); @@ -296,32 +311,40 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } } - if (apiKey == null) { - throw new InvalidParameterValueException("apiKey for user: " + user.getUsername() + " is empty. Please generate it"); + throw new InvalidParameterValueException("apiKey for user: " + user.getUsername() + + " is empty. Please generate it"); } if (secretKey == null) { - throw new InvalidParameterValueException("secretKey for user: " + user.getUsername() + " is empty. Please generate it"); + throw new InvalidParameterValueException("secretKey for user: " + user.getUsername() + + " is empty. Please generate it"); } if (csUrl == null || csUrl.contains("localhost")) { - throw new InvalidParameterValueException("Global setting endpointe.url has to be set to the Management Server's API end point"); + throw new InvalidParameterValueException( + "Global setting endpointe.url has to be set to the Management Server's API end point"); } - - LbAutoScaleVmProfile lbAutoScaleVmProfile = new LbAutoScaleVmProfile(autoScaleVmProfile, apiKey, secretKey, csUrl, zoneId, domainId, serviceOfferingId, templateId, vmName, lbNetworkUuid); + LbAutoScaleVmProfile lbAutoScaleVmProfile = new LbAutoScaleVmProfile(autoScaleVmProfile, apiKey, secretKey, + csUrl, zoneId, domainId, serviceOfferingId, templateId, vmName, lbNetworkUuid); return new LbAutoScaleVmGroup(vmGroup, autoScalePolicies, lbAutoScaleVmProfile, currentState); } - private boolean applyAutoScaleConfig(LoadBalancerVO lb, AutoScaleVmGroupVO vmGroup, String currentState) throws ResourceUnavailableException { + private boolean applyAutoScaleConfig(LoadBalancerVO lb, AutoScaleVmGroupVO vmGroup, String currentState) + throws ResourceUnavailableException { LbAutoScaleVmGroup lbAutoScaleVmGroup = getLbAutoScaleVmGroup(vmGroup, currentState, lb); - /* Regular config like destinations need not be packed for applying autoscale config as of today.*/ - LoadBalancingRule rule = new LoadBalancingRule(lb, null, null); + /* + * Regular config like destinations need not be packed for applying + * autoscale config as of today. + */ + List policyList = getStickinessPolicies(lb.getId()); + LoadBalancingRule rule = new LoadBalancingRule(lb, null, policyList, null); rule.setAutoScaleVmGroup(lbAutoScaleVmGroup); if (!isRollBackAllowedForProvider(lb)) { - // this is for Netscaler type of devices. if their is failure the db entries will be rollbacked. + // this is for Netscaler type of devices. if their is failure the db + // entries will be rollbacked. return false; } @@ -348,9 +371,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (vmGroup.getState().equals(AutoScaleVmGroup.State_New)) { loadBalancer.setState(FirewallRule.State.Add); _lbDao.persist(loadBalancer); - } - else if (loadBalancer.getState() == FirewallRule.State.Active && - vmGroup.getState().equals(AutoScaleVmGroup.State_Revoke)) { + } else if (loadBalancer.getState() == FirewallRule.State.Active + && vmGroup.getState().equals(AutoScaleVmGroup.State_Revoke)) { loadBalancer.setState(FirewallRule.State.Add); _lbDao.persist(loadBalancer); } @@ -358,11 +380,13 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { success = applyAutoScaleConfig(loadBalancer, vmGroup, currentState); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: " + loadBalancer.getId() + " because resource is unavaliable:", e); + s_logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: " + loadBalancer.getId() + + " because resource is unavaliable:", e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating AutoscaleVmGroup"); + s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + + " lb state rolback while creating AutoscaleVmGroup"); } throw e; } finally { @@ -387,15 +411,24 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return success; } + private boolean validateHealthCheck(CreateLBHealthCheckPolicyCmd cmd) { + LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); + String capability = getLBCapability(loadBalancer.getNetworkId(), Capability.HealthCheckPolicy.getName()); + if (capability != null) { + return true; + } + return false; + } + private boolean genericValidator(CreateLBStickinessPolicyCmd cmd) throws InvalidParameterValueException { LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); /* Validation : check for valid Method name and params */ - List stickinessMethodList = getStickinessMethods(loadBalancer - .getNetworkId()); + List stickinessMethodList = getStickinessMethods(loadBalancer.getNetworkId()); boolean methodMatch = false; if (stickinessMethodList == null) { - throw new InvalidParameterValueException("Failed: No Stickiness method available for LB rule:" + cmd.getLbRuleId()); + throw new InvalidParameterValueException("Failed: No Stickiness method available for LB rule:" + + cmd.getLbRuleId()); } for (LbStickinessMethod method : stickinessMethodList) { if (method.getMethodName().equalsIgnoreCase(cmd.getStickinessMethodName())) { @@ -422,14 +455,16 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (LbStickinessMethodParam param : methodParamList) { if (param.getParamName().equalsIgnoreCase(paramName)) { if ((param.getIsflag() == false) && (paramValue == null)) { - throw new InvalidParameterValueException("Failed : Value expected for the Param :" + param.getParamName()); + throw new InvalidParameterValueException("Failed : Value expected for the Param :" + + param.getParamName()); } found = true; break; } } if (!found) { - throw new InvalidParameterValueException("Failed : Stickiness policy does not support param name :" + paramName); + throw new InvalidParameterValueException( + "Failed : Stickiness policy does not support param name :" + paramName); } } } @@ -438,7 +473,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (LbStickinessMethodParam param : methodParamList) { if (param.getRequired()) { if (tempParamList.get(param.getParamName()) == null) { - throw new InvalidParameterValueException("Failed : Missing Manadatory Param :" + param.getParamName()); + throw new InvalidParameterValueException("Failed : Missing Manadatory Param :" + + param.getParamName()); } } } @@ -447,13 +483,16 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } } if (methodMatch == false) { - throw new InvalidParameterValueException("Failed to match Stickiness method name for LB rule:" + cmd.getLbRuleId()); + throw new InvalidParameterValueException("Failed to match Stickiness method name for LB rule:" + + cmd.getLbRuleId()); } /* Validation : check for the multiple policies to the rule id */ - List stickinessPolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(cmd.getLbRuleId(), false); + List stickinessPolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId( + cmd.getLbRuleId(), false); if (stickinessPolicies.size() > 0) { - throw new InvalidParameterValueException("Failed to create Stickiness policy: Already policy attached " + cmd.getLbRuleId()); + throw new InvalidParameterValueException("Failed to create Stickiness policy: Already policy attached " + + cmd.getLbRuleId()); } return true; } @@ -462,7 +501,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override @DB @ActionEvent(eventType = EventTypes.EVENT_LB_STICKINESSPOLICY_CREATE, eventDescription = "create lb stickinesspolicy to load balancer", create = true) - public StickinessPolicy createLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd) throws NetworkRuleConflictException { + public StickinessPolicy createLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd) + throws NetworkRuleConflictException { UserContext caller = UserContext.current(); /* Validation : check corresponding load balancer rule exist */ @@ -473,30 +513,108 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _accountMgr.checkAccess(caller.getCaller(), null, true, loadBalancer); if (loadBalancer.getState() == FirewallRule.State.Revoke) { - throw new InvalidParameterValueException("Failed: LB rule id: " + cmd.getLbRuleId() + " is in deleting state: "); + throw new InvalidParameterValueException("Failed: LB rule id: " + cmd.getLbRuleId() + + " is in deleting state: "); } /* Generic validations */ if (!genericValidator(cmd)) { - throw new InvalidParameterValueException("Failed to create Stickiness policy: Validation Failed " + cmd.getLbRuleId()); + throw new InvalidParameterValueException("Failed to create Stickiness policy: Validation Failed " + + cmd.getLbRuleId()); } - /* Specific validations using network element validator for specific validations */ - LBStickinessPolicyVO lbpolicy = new LBStickinessPolicyVO(loadBalancer.getId(), cmd.getLBStickinessPolicyName(), cmd.getStickinessMethodName(), cmd.getparamList(), cmd.getDescription()); + /* + * Specific validations using network element validator for specific + * validations + */ + LBStickinessPolicyVO lbpolicy = new LBStickinessPolicyVO(loadBalancer.getId(), cmd.getLBStickinessPolicyName(), + cmd.getStickinessMethodName(), cmd.getparamList(), cmd.getDescription()); List policyList = new ArrayList(); policyList.add(new LbStickinessPolicy(cmd.getStickinessMethodName(), lbpolicy.getParams())); - LoadBalancingRule lbRule = new LoadBalancingRule(loadBalancer, getExistingDestinations(lbpolicy.getId()), policyList); + LoadBalancingRule lbRule = new LoadBalancingRule(loadBalancer, getExistingDestinations(lbpolicy.getId()), + policyList, null); if (!validateRule(lbRule)) { - throw new InvalidParameterValueException("Failed to create Stickiness policy: Validation Failed " + cmd.getLbRuleId()); + throw new InvalidParameterValueException("Failed to create Stickiness policy: Validation Failed " + + cmd.getLbRuleId()); } /* Finally Insert into DB */ - LBStickinessPolicyVO policy = new LBStickinessPolicyVO(loadBalancer.getId(), cmd.getLBStickinessPolicyName(), cmd.getStickinessMethodName(), cmd.getparamList(), cmd.getDescription()); + LBStickinessPolicyVO policy = new LBStickinessPolicyVO(loadBalancer.getId(), cmd.getLBStickinessPolicyName(), + cmd.getStickinessMethodName(), cmd.getparamList(), cmd.getDescription()); policy = _lb2stickinesspoliciesDao.persist(policy); return policy; } + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_LB_HEALTHCHECKPOLICY_CREATE, eventDescription = "create load balancer health check to load balancer", create = true) + public HealthCheckPolicy createLBHealthCheckPolicy(CreateLBHealthCheckPolicyCmd cmd) { + UserContext caller = UserContext.current(); + + /* + * Validation of cmd Monitor interval must be greater than response + * timeout + */ + Map paramMap = cmd.getFullUrlParams(); + + if (paramMap.containsKey(ApiConstants.HEALTHCHECK_RESPONSE_TIMEOUT) + && paramMap.containsKey(ApiConstants.HEALTHCHECK_INTERVAL_TIME)) { + if (cmd.getResponsTimeOut() > cmd.getHealthCheckInterval()) + throw new InvalidParameterValueException( + "Failed to create HealthCheck policy : Monitor interval must be greater than response timeout"); + } + /* Validation : check corresponding load balancer rule exist */ + LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); + if (loadBalancer == null) { + throw new InvalidParameterValueException("Failed: LB rule id: " + cmd.getLbRuleId() + " not present "); + } + + _accountMgr.checkAccess(caller.getCaller(), null, true, loadBalancer); + + if (loadBalancer.getState() == FirewallRule.State.Revoke) { + throw new InvalidParameterValueException("Failed: LB rule id: " + cmd.getLbRuleId() + + " is in deleting state: "); + } + + /* + * Validate Whether LB Provider has the capabilities to support Health + * Checks + */ + if (!validateHealthCheck(cmd)) { + throw new InvalidParameterValueException( + "Failed to create HealthCheck policy: Validation Failed (HealthCheck Policy is not supported by LB Provider for the LB rule id :)" + + cmd.getLbRuleId()); + } + + /* Validation : check for the multiple hc policies to the rule id */ + List hcPolicies = _lb2healthcheckDao.listByLoadBalancerId(cmd.getLbRuleId(), false); + if (hcPolicies.size() > 0) { + throw new InvalidParameterValueException( + "Failed to create HealthCheck policy: Already policy attached for the LB Rule id :" + + cmd.getLbRuleId()); + } + /* + * Specific validations using network element validator for specific + * validations + */ + LBHealthCheckPolicyVO hcpolicy = new LBHealthCheckPolicyVO(loadBalancer.getId(), cmd.getPingPath(), + cmd.getDescription(), cmd.getResponsTimeOut(), cmd.getHealthCheckInterval(), cmd.getHealthyThreshold(), + cmd.getUnhealthyThreshold()); + + List hcPolicyList = new ArrayList(); + hcPolicyList.add(new LbHealthCheckPolicy(hcpolicy.getpingpath(), hcpolicy.getDescription(), hcpolicy + .getResponseTime(), hcpolicy.getHealthcheckInterval(), hcpolicy.getHealthcheckThresshold(), hcpolicy + .getUnhealthThresshold())); + + // Finally Insert into DB + LBHealthCheckPolicyVO policy = new LBHealthCheckPolicyVO(loadBalancer.getId(), cmd.getPingPath(), + cmd.getDescription(), cmd.getResponsTimeOut(), cmd.getHealthCheckInterval(), cmd.getHealthyThreshold(), + cmd.getUnhealthyThreshold()); + + policy = _lb2healthcheckDao.persist(policy); + return policy; + } private boolean validateRule(LoadBalancingRule lbRule) { Network network = _networkDao.findById(lbRule.getNetworkId()); @@ -506,7 +624,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return false; } for (LoadBalancingServiceProvider ne : _lbProviders) { - boolean validated = ne.validateLBRule(network, lbRule); + boolean validated = ne.validateLBRule(network, lbRule); if (!validated) return false; } @@ -521,7 +639,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid Load balancer Id:" + cmd.getLbRuleId()); + throw new InvalidParameterException("Invalid Load balancer Id:" + cmd.getLbRuleId()); } FirewallRule.State backupState = loadBalancer.getState(); loadBalancer.setState(FirewallRule.State.Add); @@ -529,11 +647,13 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to apply Stickiness policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavaliable:", e); + s_logger.warn("Unable to apply Stickiness policy to the lb rule: " + cmd.getLbRuleId() + + " because resource is unavaliable:", e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating sticky policy"); + s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + + " lb state rolback while creating sticky policy"); } deleteLBStickinessPolicy(cmd.getEntityId(), false); success = false; @@ -542,6 +662,36 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return success; } + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_LB_HEALTHCHECKPOLICY_CREATE, eventDescription = "Apply HealthCheckPolicy to load balancer ", async = true) + public boolean applyLBHealthCheckPolicy(CreateLBHealthCheckPolicyCmd cmd) { + boolean success = true; + + LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); + if (loadBalancer == null) { + throw new InvalidParameterException("Invalid Load balancer Id:" + cmd.getLbRuleId()); + } + FirewallRule.State backupState = loadBalancer.getState(); + loadBalancer.setState(FirewallRule.State.Add); + _lbDao.persist(loadBalancer); + try { + applyLoadBalancerConfig(cmd.getLbRuleId()); + } catch (ResourceUnavailableException e) { + s_logger.warn("Unable to apply healthcheck policy to the lb rule: " + cmd.getLbRuleId() + + " because resource is unavaliable:", e); + if (isRollBackAllowedForProvider(loadBalancer)) { + loadBalancer.setState(backupState); + _lbDao.persist(loadBalancer); + s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + + " lb state rolback while creating healthcheck policy"); + } + deleteLBHealthCheckPolicy(cmd.getEntityId(), false); + success = false; + } + return success; + } + @Override @ActionEvent(eventType = EventTypes.EVENT_LB_STICKINESSPOLICY_DELETE, eventDescription = "revoking LB Stickiness policy ", async = true) public boolean deleteLBStickinessPolicy(long stickinessPolicyId, boolean apply) { @@ -555,13 +705,13 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(stickinessPolicy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid Load balancer : " + stickinessPolicy.getLoadBalancerId() + " for Stickiness policy id: " + stickinessPolicyId); + throw new InvalidParameterException("Invalid Load balancer : " + stickinessPolicy.getLoadBalancerId() + + " for Stickiness policy id: " + stickinessPolicyId); } long loadBalancerId = loadBalancer.getId(); FirewallRule.State backupState = loadBalancer.getState(); _accountMgr.checkAccess(caller.getCaller(), null, true, loadBalancer); - if (apply) { if (loadBalancer.getState() == FirewallRule.State.Active) { loadBalancer.setState(FirewallRule.State.Add); @@ -571,12 +721,15 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements boolean backupStickyState = stickinessPolicy.isRevoke(); stickinessPolicy.setRevoke(true); _lb2stickinesspoliciesDao.persist(stickinessPolicy); - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", stickinesspolicyID " + stickinessPolicyId); + s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", stickinesspolicyID " + + stickinessPolicyId); try { if (!applyLoadBalancerConfig(loadBalancerId)) { - s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); - throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); + s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + + " for stickinesspolicyID " + stickinessPolicyId); + throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + + " for stickinesspolicyID " + stickinessPolicyId); } } catch (ResourceUnavailableException e) { if (isRollBackAllowedForProvider(loadBalancer)) { @@ -584,7 +737,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lb2stickinesspoliciesDao.persist(stickinessPolicy); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting sticky policy: " + stickinessPolicyId); + s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting sticky policy: " + + stickinessPolicyId); } s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); success = false; @@ -592,9 +746,149 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } else { _lb2stickinesspoliciesDao.remove(stickinessPolicy.getLoadBalancerId()); } - return success; - } + } + + @DB + @Override + @ActionEvent(eventType = EventTypes.EVENT_LB_HEALTHCHECKPOLICY_DELETE, eventDescription = "revoking LB HealthCheck policy ", async = true) + public boolean deleteLBHealthCheckPolicy(long healthCheckPolicyId, boolean apply) { + boolean success = true; + + UserContext caller = UserContext.current(); + LBHealthCheckPolicyVO healthCheckPolicy = _lb2healthcheckDao.findById(healthCheckPolicyId); + + if (healthCheckPolicy == null) { + throw new InvalidParameterException("Invalid HealthCheck policy id value: " + healthCheckPolicyId); + } + LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(healthCheckPolicy.getLoadBalancerId())); + if (loadBalancer == null) { + throw new InvalidParameterException("Invalid Load balancer : " + healthCheckPolicy.getLoadBalancerId() + + " for HealthCheck policy id: " + healthCheckPolicyId); + } + long loadBalancerId = loadBalancer.getId(); + FirewallRule.State backupState = loadBalancer.getState(); + _accountMgr.checkAccess(caller.getCaller(), null, true, loadBalancer); + + if (apply) { + if (loadBalancer.getState() == FirewallRule.State.Active) { + loadBalancer.setState(FirewallRule.State.Add); + _lbDao.persist(loadBalancer); + } + + boolean backupStickyState = healthCheckPolicy.isRevoke(); + healthCheckPolicy.setRevoke(true); + _lb2healthcheckDao.persist(healthCheckPolicy); + s_logger.debug("Set health check policy to revoke for loadbalancing rule id : " + loadBalancerId + + ", healthCheckpolicyID " + healthCheckPolicyId); + + // removing the state of services set by the monitor. + List maps = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); + if (maps != null) { + Transaction txn = Transaction.currentTxn(); + txn.start(); + s_logger.debug("Resetting health state policy for services in loadbalancing rule id : " + + loadBalancerId); + for (LoadBalancerVMMapVO map : maps) { + map.setState(null); + _lb2VmMapDao.persist(map); + } + txn.commit(); + } + + try { + if (!applyLoadBalancerConfig(loadBalancerId)) { + s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + + " for healthCheckpolicyID " + healthCheckPolicyId); + throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + + " for healthCheckpolicyID " + healthCheckPolicyId); + } + } catch (ResourceUnavailableException e) { + if (isRollBackAllowedForProvider(loadBalancer)) { + healthCheckPolicy.setRevoke(backupStickyState); + _lb2healthcheckDao.persist(healthCheckPolicy); + loadBalancer.setState(backupState); + _lbDao.persist(loadBalancer); + s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + + " while deleting healthcheck policy: " + healthCheckPolicyId); + } + s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); + success = false; + } + } else { + _lb2healthcheckDao.remove(healthCheckPolicy.getLoadBalancerId()); + } + return success; + } + + // This method will check the status of services which has monitors created + // by CloudStack and update them in lbvmmap table + @DB + @Override + public void updateLBHealthChecks() throws ResourceUnavailableException { + List rules = _lbDao.listAll(); + List networks = _networkDao.listAll(); + List stateRules = null; + boolean isHandled = false; + for (NetworkVO ntwk : networks) { + Network network = _networkDao.findById(ntwk.getId()); + String capability = getLBCapability(network.getId(), Capability.HealthCheckPolicy.getName()); + + if (capability != null && capability.equalsIgnoreCase("true")) { + /* + * s_logger.debug( + * "HealthCheck Manager :: LB Provider in the Network has the Healthcheck policy capability :: " + * + provider.get(0).getName()); + */ + rules = _lbDao.listByNetworkId(network.getId()); + if (rules != null && rules.size() > 0) { + List lbrules = new ArrayList(); + for (LoadBalancerVO lb : rules) { + List dstList = getExistingDestinations(lb.getId()); + List hcPolicyList = getHealthCheckPolicies(lb.getId()); + // adding to lbrules list only if the LB rule + // hashealtChecks + if (hcPolicyList != null && hcPolicyList.size() > 0) { + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, null, hcPolicyList); + lbrules.add(loadBalancing); + } + } + if (lbrules.size() > 0) { + isHandled = false; + for (LoadBalancingServiceProvider lbElement : _lbProviders) { + stateRules = lbElement.updateHealthChecks(network, (List) lbrules); + if (stateRules != null && stateRules.size() > 0) { + for (LoadBalancerTO lbto : stateRules) { + LoadBalancerVO ulb = _lbDao.findByUuid(lbto.getUuid()); + List lbVmMaps = _lb2VmMapDao.listByLoadBalancerId(ulb.getId()); + for (LoadBalancerVMMapVO lbVmMap : lbVmMaps) { + UserVm vm = _vmDao.findById(lbVmMap.getInstanceId()); + Nic nic = _nicDao.findByInstanceIdAndNetworkIdIncludingRemoved( + ulb.getNetworkId(), vm.getId()); + String dstIp = nic.getIp4Address(); + for (int i = 0; i < lbto.getDestinations().length; i++) { + LoadBalancerTO.DestinationTO des = lbto.getDestinations()[i]; + if (dstIp.equalsIgnoreCase(lbto.getDestinations()[i].getDestIp())) { + lbVmMap.setState(des.getMonitorState()); + _lb2VmMapDao.persist(lbVmMap); + s_logger.debug("Updating the LB VM Map table with the service state"); + } + } + } + } + isHandled = true; + } + if (isHandled) { + break; + } + } + } + } + } else { + // s_logger.debug("HealthCheck Manager :: LB Provider in the Network DNOT the Healthcheck policy capability "); + } + } + } private boolean isRollBackAllowedForProvider(LoadBalancerVO loadBalancer) { Network network = _networkDao.findById(loadBalancer.getNetworkId()); @@ -607,6 +901,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } return false; } + @Override @DB @ActionEvent(eventType = EventTypes.EVENT_ASSIGN_TO_LOAD_BALANCER_RULE, eventDescription = "assigning to load balancer", async = true) @@ -616,7 +911,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVO loadBalancer = _lbDao.findById(loadBalancerId); if (loadBalancer == null) { - throw new InvalidParameterValueException("Failed to assign to load balancer " + loadBalancerId + ", the load balancer was not found."); + throw new InvalidParameterValueException("Failed to assign to load balancer " + loadBalancerId + + ", the load balancer was not found."); } List mappedInstances = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId, false); @@ -635,7 +931,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements UserVm vm = _vmDao.findById(instanceId); if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging) { InvalidParameterValueException ex = new InvalidParameterValueException("Invalid instance id specified"); - ex.addProxyObject(vm, instanceId, "instanceId"); + ex.addProxyObject(vm, instanceId, "instanceId"); throw ex; } @@ -645,7 +941,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements throw new PermissionDeniedException("Cannot add virtual machines that do not belong to the same owner."); } - // Let's check to make sure the vm has a nic in the same network as the load balancing rule. + // Let's check to make sure the vm has a nic in the same network as + // the load balancing rule. List nics = _networkModel.getNics(vm.getId()); Nic nicInSameNetwork = null; for (Nic nic : nics) { @@ -656,8 +953,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } if (nicInSameNetwork == null) { - InvalidParameterValueException ex = new InvalidParameterValueException("VM " + instanceId + " cannot be added because it doesn't belong in the same network."); - ex.addProxyObject(vm, instanceId, "instanceId"); + InvalidParameterValueException ex = new InvalidParameterValueException("VM " + instanceId + + " cannot be added because it doesn't belong in the same network."); + ex.addProxyObject(vm, instanceId, "instanceId"); throw ex; } @@ -698,7 +996,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements txn.commit(); if (!vmInstanceIds.isEmpty()) { _lb2VmMapDao.remove(loadBalancer.getId(), vmInstanceIds, null); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while attaching VM: " + vmInstanceIds); + s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while attaching VM: " + + vmInstanceIds); } loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); @@ -707,9 +1006,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } if (!success) { - CloudRuntimeException ex = new CloudRuntimeException("Failed to add specified loadbalancerruleid for vms " + instanceIds); - ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); - // TBD: Also pack in the instanceIds in the exception using the right VO object or table name. + CloudRuntimeException ex = new CloudRuntimeException("Failed to add specified loadbalancerruleid for vms " + + instanceIds); + ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); + // TBD: Also pack in the instanceIds in the exception using the + // right VO object or table name. throw ex; } @@ -748,15 +1049,17 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (_autoScaleVmGroupDao.isAutoScaleLoadBalancer(loadBalancerId)) { // For autoscaled loadbalancer, the rules need not be applied, // meaning the call need not reach the resource layer. - // We can consider the job done and only need to remove the rules in DB + // We can consider the job done and only need to remove the + // rules in DB _lb2VmMapDao.remove(loadBalancer.getId(), instanceIds, null); return true; } if (!applyLoadBalancerConfig(loadBalancerId)) { s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for vms " + instanceIds); - CloudRuntimeException ex = new CloudRuntimeException("Failed to remove specified load balancer rule id for vms " + instanceIds); - ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); + CloudRuntimeException ex = new CloudRuntimeException( + "Failed to remove specified load balancer rule id for vms " + instanceIds); + ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); throw ex; } success = true; @@ -777,8 +1080,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); } if (!success) { - CloudRuntimeException ex = new CloudRuntimeException("Failed to remove specified load balancer rule id for vms " + instanceIds); - ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); + CloudRuntimeException ex = new CloudRuntimeException( + "Failed to remove specified load balancer rule id for vms " + instanceIds); + ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); throw ex; } return success; @@ -806,7 +1110,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements map.setRevoke(true); _lb2VmMapDao.persist(map); - s_logger.debug("Set load balancer rule for revoke: rule id " + map.getLoadBalancerId() + ", vmId " + instanceId); + s_logger.debug("Set load balancer rule for revoke: rule id " + map.getLoadBalancerId() + ", vmId " + + instanceId); } // Reapply all lbs that had the vm assigned @@ -827,10 +1132,10 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements Account caller = ctx.getCaller(); LoadBalancerVO rule = _lbDao.findById(loadBalancerId); + if (rule == null) { throw new InvalidParameterValueException("Unable to find load balancer rule " + loadBalancerId); } - _accountMgr.checkAccess(caller, null, true, rule); boolean result = deleteLoadBalancerRule(loadBalancerId, apply, caller, ctx.getCallerUserId(), true); @@ -841,7 +1146,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } @DB - public boolean deleteLoadBalancerRule(long loadBalancerId, boolean apply, Account caller, long callerUserId, boolean rollBack) { + public boolean deleteLoadBalancerRule(long loadBalancerId, boolean apply, Account caller, long callerUserId, + boolean rollBack) { LoadBalancerVO lb = _lbDao.findById(loadBalancerId); Transaction txn = Transaction.currentTxn(); boolean generateUsageEvent = false; @@ -865,10 +1171,17 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (LoadBalancerVMMapVO map : maps) { map.setRevoke(true); _lb2VmMapDao.persist(map); - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + map.getInstanceId()); + s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + + map.getInstanceId()); } } + List hcPolicies = _lb2healthcheckDao.listByLoadBalancerId(loadBalancerId); + for (LBHealthCheckPolicyVO lbHealthCheck : hcPolicies) { + lbHealthCheck.setRevoke(true); + _lb2healthcheckDao.persist(lbHealthCheck); + } + if (generateUsageEvent) { // Generate usage event right after all rules were marked for revoke UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_DELETE, lb.getAccountId(), 0, lb.getId(), @@ -887,18 +1200,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (apply) { try { - if (_autoScaleVmGroupDao.isAutoScaleLoadBalancer(loadBalancerId)) { - // Get the associated VmGroup - AutoScaleVmGroupVO vmGroup = _autoScaleVmGroupDao.listByAll(loadBalancerId, null).get(0); - if (!applyAutoScaleConfig(lb, vmGroup,vmGroup.getState())) { - s_logger.warn("Unable to apply the autoscale config"); - return false; - } - } else { - if (!applyLoadBalancerConfig(loadBalancerId)) { - s_logger.warn("Unable to apply the load balancer config"); - return false; - } + if (!applyLoadBalancerConfig(loadBalancerId)) { + s_logger.warn("Unable to apply the load balancer config"); + return false; } } catch (ResourceUnavailableException e) { if (rollBack && isRollBackAllowedForProvider(lb)) { @@ -920,15 +1224,17 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements FirewallRuleVO relatedRule = _firewallDao.findByRelatedId(lb.getId()); if (relatedRule != null) { - s_logger.warn("Unable to remove firewall rule id=" + lb.getId() + " as it has related firewall rule id=" + relatedRule.getId() + "; leaving it in Revoke state"); + s_logger.warn("Unable to remove firewall rule id=" + lb.getId() + " as it has related firewall rule id=" + + relatedRule.getId() + "; leaving it in Revoke state"); success = false; } else { _firewallMgr.removeRule(lb); } - // FIXME: breaking the dependency on ELB manager. This breaks functionality of ELB using virtual router + // FIXME: breaking the dependency on ELB manager. This breaks + // functionality of ELB using virtual router // Bug CS-15411 opened to document this - //_elbMgr.handleDeleteLoadBalancerRule(lb, callerUserId, caller); + // _elbMgr.handleDeleteLoadBalancerRule(lb, callerUserId, caller); if (success) { s_logger.debug("Load balancer with id " + lb.getId() + " is removed successfully"); @@ -939,7 +1245,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override @ActionEvent(eventType = EventTypes.EVENT_LOAD_BALANCER_CREATE, eventDescription = "creating load balancer") - public LoadBalancer createLoadBalancerRule(CreateLoadBalancerRuleCmd lb, boolean openFirewall) throws NetworkRuleConflictException, InsufficientAddressCapacityException { + public LoadBalancer createLoadBalancerRule(CreateLoadBalancerRuleCmd lb, boolean openFirewall) + throws NetworkRuleConflictException, InsufficientAddressCapacityException { Account lbOwner = _accountMgr.getAccount(lb.getEntityOwnerId()); int defPortStart = lb.getDefaultPortStart(); @@ -949,7 +1256,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements throw new InvalidParameterValueException("privatePort is an invalid value: " + defPortEnd); } if (defPortStart > defPortEnd) { - throw new InvalidParameterValueException("private port range is invalid: " + defPortStart + "-" + defPortEnd); + throw new InvalidParameterValueException("private port range is invalid: " + defPortStart + "-" + + defPortEnd); } if ((lb.getAlgorithm() == null) || !NetUtils.isValidAlgorithm(lb.getAlgorithm())) { throw new InvalidParameterValueException("Invalid algorithm: " + lb.getAlgorithm()); @@ -963,9 +1271,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements Network network = _networkModel.getNetwork(lb.getNetworkId()); - // FIXME: breaking the dependency on ELB manager. This breaks functionality of ELB using virtual router + // FIXME: breaking the dependency on ELB manager. This breaks + // functionality of ELB using virtual router // Bug CS-15411 opened to document this - //LoadBalancer result = _elbMgr.handleCreateLoadBalancerRule(lb, lbOwner, lb.getNetworkId()); + // LoadBalancer result = _elbMgr.handleCreateLoadBalancerRule(lb, + // lbOwner, lb.getNetworkId()); LoadBalancer result = null; if (result == null) { IpAddress systemIp = null; @@ -978,7 +1288,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements // Validate ip address if (ipVO == null) { - throw new InvalidParameterValueException("Unable to create load balance rule; can't find/allocate source IP"); + throw new InvalidParameterValueException( + "Unable to create load balance rule; can't find/allocate source IP"); } else if (ipVO.isOneToOneNat()) { throw new NetworkRuleConflictException("Can't do load balance on ip address: " + ipVO.getAddress()); } @@ -986,13 +1297,14 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements boolean performedIpAssoc = false; try { if (ipVO.getAssociatedWithNetworkId() == null) { - boolean assignToVpcNtwk = network.getVpcId() != null - && ipVO.getVpcId() != null && ipVO.getVpcId().longValue() == network.getVpcId(); + boolean assignToVpcNtwk = network.getVpcId() != null && ipVO.getVpcId() != null + && ipVO.getVpcId().longValue() == network.getVpcId(); if (assignToVpcNtwk) { - //set networkId just for verification purposes + // set networkId just for verification purposes _networkModel.checkIpForService(ipVO, Service.Lb, lb.getNetworkId()); - s_logger.debug("The ip is not associated with the VPC network id="+ lb.getNetworkId() + " so assigning"); + s_logger.debug("The ip is not associated with the VPC network id=" + lb.getNetworkId() + + " so assigning"); ipVO = _networkMgr.associateIPToGuestNetwork(ipAddrId, lb.getNetworkId(), false); performedIpAssoc = true; } @@ -1000,8 +1312,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _networkModel.checkIpForService(ipVO, Service.Lb, null); } - if (ipVO.getAssociatedWithNetworkId() == null) { - throw new InvalidParameterValueException("Ip address " + ipVO + " is not assigned to the network " + network); + if (ipVO.getAssociatedWithNetworkId() == null) { + throw new InvalidParameterValueException("Ip address " + ipVO + " is not assigned to the network " + + network); } if (lb.getSourceIpAddressId() == null) { @@ -1015,7 +1328,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } } finally { if (result == null && systemIp != null) { - s_logger.debug("Releasing system IP address " + systemIp + " as corresponding lb rule failed to create"); + s_logger.debug("Releasing system IP address " + systemIp + + " as corresponding lb rule failed to create"); _networkMgr.handleSystemIpRelease(systemIp); } // release ip address if ipassoc was perfored @@ -1035,7 +1349,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override @DB - public LoadBalancer createLoadBalancer(CreateLoadBalancerRuleCmd lb, boolean openFirewall) throws NetworkRuleConflictException { + public LoadBalancer createLoadBalancer(CreateLoadBalancerRuleCmd lb, boolean openFirewall) + throws NetworkRuleConflictException { UserContext caller = UserContext.current(); int srcPortStart = lb.getSourcePortStart(); int defPortStart = lb.getDefaultPortStart(); @@ -1045,45 +1360,48 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements IPAddressVO ipAddr = _ipAddressDao.findById(sourceIpId); // make sure ip address exists if (ipAddr == null || !ipAddr.readyToUse()) { - InvalidParameterValueException ex = new InvalidParameterValueException("Unable to create load balancer rule, invalid IP address id specified"); - ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); + InvalidParameterValueException ex = new InvalidParameterValueException( + "Unable to create load balancer rule, invalid IP address id specified"); + ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); throw ex; } else if (ipAddr.isOneToOneNat()) { - InvalidParameterValueException ex = new InvalidParameterValueException("Unable to create load balancer rule; specified sourceip id has static nat enabled"); - ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); + InvalidParameterValueException ex = new InvalidParameterValueException( + "Unable to create load balancer rule; specified sourceip id has static nat enabled"); + ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); throw ex; } Long networkId = ipAddr.getAssociatedWithNetworkId(); if (networkId == null) { - InvalidParameterValueException ex = new InvalidParameterValueException("Unable to create load balancer rule ; specified sourceip id is not associated with any network"); - ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); + InvalidParameterValueException ex = new InvalidParameterValueException( + "Unable to create load balancer rule ; specified sourceip id is not associated with any network"); + ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); throw ex; - } _firewallMgr.validateFirewallRule(caller.getCaller(), ipAddr, srcPortStart, srcPortEnd, lb.getProtocol(), Purpose.LoadBalancing, FirewallRuleType.User, networkId, null); - NetworkVO network = _networkDao.findById(networkId); - _accountMgr.checkAccess(caller.getCaller(), null, true, ipAddr); // verify that lb service is supported by the network if (!_networkModel.areServicesSupportedInNetwork(network.getId(), Service.Lb)) { - InvalidParameterValueException ex = new InvalidParameterValueException("LB service is not supported in specified network id"); - ex.addProxyObject(network, networkId, "networkId"); + InvalidParameterValueException ex = new InvalidParameterValueException( + "LB service is not supported in specified network id"); + ex.addProxyObject(network, networkId, "networkId"); throw ex; } Transaction txn = Transaction.currentTxn(); txn.start(); - LoadBalancerVO newRule = new LoadBalancerVO(lb.getXid(), lb.getName(), lb.getDescription(), lb.getSourceIpAddressId(), lb.getSourcePortEnd(), lb.getDefaultPortStart(), - lb.getAlgorithm(), network.getId(), ipAddr.getAllocatedToAccountId(), ipAddr.getAllocatedInDomainId()); + LoadBalancerVO newRule = new LoadBalancerVO(lb.getXid(), lb.getName(), lb.getDescription(), + lb.getSourceIpAddressId(), lb.getSourcePortEnd(), lb.getDefaultPortStart(), lb.getAlgorithm(), + network.getId(), ipAddr.getAllocatedToAccountId(), ipAddr.getAllocatedInDomainId()); // verify rule is supported by Lb provider of the network - LoadBalancingRule loadBalancing = new LoadBalancingRule(newRule, new ArrayList(), new ArrayList()); + LoadBalancingRule loadBalancing = new LoadBalancingRule(newRule, new ArrayList(), + new ArrayList(), new ArrayList()); if (!validateRule(loadBalancing)) { throw new InvalidParameterValueException("LB service provider cannot support this rule"); } @@ -1091,7 +1409,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements newRule = _lbDao.persist(newRule); if (openFirewall) { - _firewallMgr.createRuleForAllCidrs(sourceIpId, caller.getCaller(), lb.getSourcePortStart(), + _firewallMgr.createRuleForAllCidrs(sourceIpId, caller.getCaller(), lb.getSourcePortStart(), lb.getSourcePortEnd(), lb.getProtocol(), null, null, newRule.getId(), networkId); } @@ -1102,10 +1420,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (!_firewallDao.setStateToAdd(newRule)) { throw new CloudRuntimeException("Unable to update the state to add for " + newRule); } - s_logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " + srcPortStart + ", private port " + defPortStart + " is added successfully."); + s_logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " + + srcPortStart + ", private port " + defPortStart + " is added successfully."); UserContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, ipAddr.getAllocatedToAccountId(), - ipAddr.getDataCenterId(), newRule.getId(), null, LoadBalancingRule.class.getName(), newRule.getUuid()); + ipAddr.getDataCenterId(), newRule.getId(), null, LoadBalancingRule.class.getName(), + newRule.getUuid()); txn.commit(); return newRule; @@ -1131,8 +1451,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements public boolean applyLoadBalancerConfig(long lbRuleId) throws ResourceUnavailableException { LoadBalancerVO lb = _lbDao.findById(lbRuleId); List lbs; - if (isRollBackAllowedForProvider(lb)) { - // this is for Netscalar type of devices. if their is failure the db entries will be rollbacked. + if (isRollBackAllowedForProvider(lb)) { + // this is for Netscalar type of devices. if their is failure the db + // entries will be rollbacked. lbs = Arrays.asList(lb); } else { // get all rules in transition state @@ -1141,6 +1462,20 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return applyLoadBalancerRules(lbs, true); } + @Override + public boolean revokeLoadBalancersForNetwork(long networkId) throws ResourceUnavailableException { + List lbs = _lbDao.listByNetworkId(networkId); + if (lbs != null) { + for(LoadBalancerVO lb : lbs) { // called during restart, not persisting state in db + lb.setState(FirewallRule.State.Revoke); + } + return applyLoadBalancerRules(lbs, false); // called during restart, not persisting state in db + } else { + s_logger.info("Network id=" + networkId + " doesn't have load balancer rules, nothing to revoke"); + return true; + } + } + @Override public boolean applyLoadBalancersForNetwork(long networkId) throws ResourceUnavailableException { List lbs = _lbDao.listByNetworkId(networkId); @@ -1153,13 +1488,14 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } @Override - public boolean applyRules(Network network, Purpose purpose, List rules) + public boolean applyRules(Network network, Purpose purpose, List rules) throws ResourceUnavailableException { - assert(purpose == Purpose.LoadBalancing): "LB Manager asked to handle non-LB rules"; + assert (purpose == Purpose.LoadBalancing) : "LB Manager asked to handle non-LB rules"; boolean handled = false; - for (LoadBalancingServiceProvider lbElement: _lbProviders) { + for (LoadBalancingServiceProvider lbElement : _lbProviders) { Provider provider = lbElement.getProvider(); - boolean isLbProvider = _networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.Lb, provider); + boolean isLbProvider = _networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.Lb, + provider); if (!isLbProvider) { continue; } @@ -1170,16 +1506,33 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return handled; } + private LoadBalancingRule getLoadBalancerRuleToApply(LoadBalancerVO lb) { + + List policyList = getStickinessPolicies(lb.getId()); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, null, policyList, null); + + if (_autoScaleVmGroupDao.isAutoScaleLoadBalancer(lb.getId())) { + // Get the associated VmGroup + AutoScaleVmGroupVO vmGroup = _autoScaleVmGroupDao.listByAll(lb.getId(), null).get(0); + LbAutoScaleVmGroup lbAutoScaleVmGroup = getLbAutoScaleVmGroup(vmGroup, vmGroup.getState(), lb); + loadBalancing.setAutoScaleVmGroup(lbAutoScaleVmGroup); + } else { + List dstList = getExistingDestinations(lb.getId()); + loadBalancing.setDestinations(dstList); + List hcPolicyList = getHealthCheckPolicies(lb.getId()); + loadBalancing.setHealthCheckPolicies(hcPolicyList); + } + + return loadBalancing; + } + @DB - protected boolean applyLoadBalancerRules(List lbs, boolean updateRulesInDB) throws ResourceUnavailableException { + protected boolean applyLoadBalancerRules(List lbs, boolean updateRulesInDB) + throws ResourceUnavailableException { Transaction txn = Transaction.currentTxn(); List rules = new ArrayList(); for (LoadBalancerVO lb : lbs) { - List dstList = getExistingDestinations(lb.getId()); - List policyList = getStickinessPolicies(lb.getId()); - - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList); - rules.add(loadBalancing); + rules.add(getLoadBalancerRuleToApply(lb)); } if (!_networkMgr.applyRules(rules, FirewallRule.Purpose.LoadBalancing, this, false)) { @@ -1217,18 +1570,28 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (_lb2VmMapDao.listByLoadBalancerId(lb.getId()).isEmpty()) { lb.setState(FirewallRule.State.Add); _lbDao.persist(lb); - s_logger.debug("LB rule " + lb.getId() + " state is set to Add as there are no more active LB-VM mappings"); + s_logger.debug("LB rule " + lb.getId() + + " state is set to Add as there are no more active LB-VM mappings"); } // remove LB-Stickiness policy mapping that were state to revoke - List stickinesspolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(lb.getId(), true); + List stickinesspolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId( + lb.getId(), true); if (!stickinesspolicies.isEmpty()) { _lb2stickinesspoliciesDao.remove(lb.getId(), true); s_logger.debug("Load balancer rule id " + lb.getId() + " is removed stickiness policies"); } - txn.commit(); + // remove LB-HealthCheck policy mapping that were state to + // revoke + List healthCheckpolicies = _lb2healthcheckDao.listByLoadBalancerId(lb.getId(), + true); + if (!healthCheckpolicies.isEmpty()) { + _lb2healthcheckDao.remove(lb.getId(), true); + s_logger.debug("Load balancer rule id " + lb.getId() + " is removed health check monitors policies"); + } + txn.commit(); if (checkForReleaseElasticIp) { boolean success = true; long count = _firewallDao.countRulesByIpId(lb.getSourceIpAddressId()); @@ -1236,7 +1599,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { success = handleSystemLBIpRelease(lb); } catch (Exception ex) { - s_logger.warn("Failed to release system ip as a part of lb rule " + lb + " deletion due to exception ", ex); + s_logger.warn("Failed to release system ip as a part of lb rule " + lb + + " deletion due to exception ", ex); success = false; } finally { if (!success) { @@ -1245,7 +1609,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } } } - // if the rule is the last one for the ip address assigned to VPC, unassign it from the network + // if the rule is the last one for the ip address assigned to + // VPC, unassign it from the network IpAddress ip = _ipAddressDao.findById(lb.getSourceIpAddressId()); _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), lb.getNetworkId()); } @@ -1259,14 +1624,16 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements boolean success = true; if (ip.getSystem()) { s_logger.debug("Releasing system ip address " + lb.getSourceIpAddressId() + " as a part of delete lb rule"); - if (!_networkMgr.disassociatePublicIpAddress(lb.getSourceIpAddressId(), UserContext.current().getCallerUserId(), UserContext.current().getCaller())) { - s_logger.warn("Unable to release system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + if (!_networkMgr.disassociatePublicIpAddress(lb.getSourceIpAddressId(), UserContext.current() + .getCallerUserId(), UserContext.current().getCaller())) { + s_logger.warn("Unable to release system ip address id=" + lb.getSourceIpAddressId() + + " as a part of delete lb rule"); success = false; } else { - s_logger.warn("Successfully released system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + s_logger.warn("Successfully released system ip address id=" + lb.getSourceIpAddressId() + + " as a part of delete lb rule"); } } - return success; } @@ -1287,7 +1654,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override public boolean removeAllLoadBalanacersForNetwork(long networkId, Account caller, long callerUserId) { - List rules = _firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.LoadBalancing); + List rules = _firewallDao + .listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.LoadBalancing); if (rules != null) s_logger.debug("Found " + rules.size() + " lb rules to cleanup"); for (FirewallRule rule : rules) { @@ -1306,12 +1674,28 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements List sDbpolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(lbId); for (LBStickinessPolicyVO sDbPolicy : sDbpolicies) { - LbStickinessPolicy sPolicy = new LbStickinessPolicy(sDbPolicy.getMethodName(), sDbPolicy.getParams(), sDbPolicy.isRevoke()); + LbStickinessPolicy sPolicy = new LbStickinessPolicy(sDbPolicy.getMethodName(), sDbPolicy.getParams(), + sDbPolicy.isRevoke()); stickinessPolicies.add(sPolicy); } return stickinessPolicies; } + @Override + public List getHealthCheckPolicies(long lbId) { + List healthCheckPolicies = new ArrayList(); + List hcDbpolicies = _lb2healthcheckDao.listByLoadBalancerId(lbId); + + for (LBHealthCheckPolicyVO policy : hcDbpolicies) { + String pingpath = policy.getpingpath(); + LbHealthCheckPolicy hDbPolicy = new LbHealthCheckPolicy(pingpath, policy.getDescription(), + policy.getResponseTime(), policy.getHealthcheckInterval(), policy.getHealthcheckThresshold(), + policy.getUnhealthThresshold(), policy.isRevoke()); + healthCheckPolicies.add(hDbPolicy); + } + return healthCheckPolicies; + } + @Override public List getExistingDestinations(long lbId) { List dstList = new ArrayList(); @@ -1323,7 +1707,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements UserVm vm = _vmDao.findById(lbVmMap.getInstanceId()); Nic nic = _nicDao.findByInstanceIdAndNetworkIdIncludingRemoved(lb.getNetworkId(), vm.getId()); dstIp = nic.getIp4Address(); - LbDestination lbDst = new LbDestination(lb.getDefaultPortStart(), lb.getDefaultPortEnd(), dstIp, lbVmMap.isRevoke()); + LbDestination lbDst = new LbDestination(lb.getDefaultPortStart(), lb.getDefaultPortEnd(), dstIp, + lbVmMap.isRevoke()); dstList.add(lbDst); } return dstList; @@ -1369,16 +1754,19 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements applyLoadBalancerConfig(lbRuleId); } catch (ResourceUnavailableException e) { if (isRollBackAllowedForProvider(lb)) { - /* NOTE : We use lb object to update db instead of lbBackup object since db layer will fail to update if there is no change in the object. + /* + * NOTE : We use lb object to update db instead of lbBackup + * object since db layer will fail to update if there is no + * change in the object. */ if (lbBackup.getName() != null) { - lb.setName(lbBackup.getName()); + lb.setName(lbBackup.getName()); } if (lbBackup.getDescription() != null) { lb.setDescription(lbBackup.getDescription()); } if (lbBackup.getAlgorithm() != null) { - lb.setAlgorithm(lbBackup.getAlgorithm()); + lb.setAlgorithm(lbBackup.getAlgorithm()); } lb.setState(lbBackup.getState()); _lbDao.update(lb.getId(), lb); @@ -1399,7 +1787,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } @Override - public List listLoadBalancerInstances(ListLoadBalancerRuleInstancesCmd cmd) throws PermissionDeniedException { + public List listLoadBalancerInstances(ListLoadBalancerRuleInstancesCmd cmd) + throws PermissionDeniedException { Account caller = UserContext.current().getCaller(); Long loadBalancerId = cmd.getId(); Boolean applied = cmd.isApplied(); @@ -1428,10 +1817,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } IPAddressVO addr = _ipAddressDao.findById(loadBalancer.getSourceIpAddressId()); - List userVms = _vmDao.listVirtualNetworkInstancesByAcctAndZone(loadBalancer.getAccountId(), addr.getDataCenterId(), loadBalancer.getNetworkId()); + List userVms = _vmDao.listVirtualNetworkInstancesByAcctAndZone(loadBalancer.getAccountId(), + addr.getDataCenterId(), loadBalancer.getNetworkId()); for (UserVmVO userVm : userVms) { - // if the VM is destroyed, being expunged, in an error state, or in an unknown state, skip it + // if the VM is destroyed, being expunged, in an error state, or in + // an unknown state, skip it switch (userVm.getState()) { case Destroyed: case Expunging: @@ -1449,10 +1840,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return loadBalancerInstances; } - @Override - public List getStickinessMethods(long networkid) - { + public List getStickinessMethods(long networkid) { String capability = getLBCapability(networkid, Capability.SupportedStickinessMethods.getName()); if (capability == null) { return null; @@ -1465,7 +1854,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } @Override - public List searchForLBStickinessPolicies(ListLBStickinessPoliciesCmd cmd) throws PermissionDeniedException { + public List searchForLBStickinessPolicies(ListLBStickinessPoliciesCmd cmd) + throws PermissionDeniedException { Account caller = UserContext.current().getCaller(); Long loadBalancerId = cmd.getLbRuleId(); LoadBalancerVO loadBalancer = _lbDao.findById(loadBalancerId); @@ -1480,6 +1870,20 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return sDbpolicies; } + @Override + public List searchForLBHealthCheckPolicies(ListLBHealthCheckPoliciesCmd cmd) + throws PermissionDeniedException { + Account caller = UserContext.current().getCaller(); + Long loadBalancerId = cmd.getLbRuleId(); + LoadBalancerVO loadBalancer = _lbDao.findById(loadBalancerId); + if (loadBalancer == null) { + return null; + } + _accountMgr.checkAccess(caller, null, true, loadBalancer); + List hcDbpolicies = _lb2healthcheckDao.listByLoadBalancerId(cmd.getLbRuleId()); + return hcDbpolicies; + } + @Override public Pair, Integer> searchForLoadBalancers(ListLoadBalancerRulesCmd cmd) { Long ipId = cmd.getPublicIpId(); @@ -1493,8 +1897,10 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements Account caller = UserContext.current().getCaller(); List permittedAccounts = new ArrayList(); - Ternary domainIdRecursiveListProject = new Ternary(cmd.getDomainId(), cmd.isRecursive(), null); - _accountMgr.buildACLSearchParameters(caller, id, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); + Ternary domainIdRecursiveListProject = new Ternary( + cmd.getDomainId(), cmd.isRecursive(), null); + _accountMgr.buildACLSearchParameters(caller, id, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, + domainIdRecursiveListProject, cmd.listAll(), false); Long domainId = domainIdRecursiveListProject.first(); Boolean isRecursive = domainIdRecursiveListProject.second(); ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third(); @@ -1510,13 +1916,15 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (instanceId != null) { SearchBuilder lbVMSearch = _lb2VmMapDao.createSearchBuilder(); lbVMSearch.and("instanceId", lbVMSearch.entity().getInstanceId(), SearchCriteria.Op.EQ); - sb.join("lbVMSearch", lbVMSearch, sb.entity().getId(), lbVMSearch.entity().getLoadBalancerId(), JoinBuilder.JoinType.INNER); + sb.join("lbVMSearch", lbVMSearch, sb.entity().getId(), lbVMSearch.entity().getLoadBalancerId(), + JoinBuilder.JoinType.INNER); } if (zoneId != null) { SearchBuilder ipSearch = _ipAddressDao.createSearchBuilder(); ipSearch.and("zoneId", ipSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - sb.join("ipSearch", ipSearch, sb.entity().getSourceIpAddressId(), ipSearch.entity().getId(), JoinBuilder.JoinType.INNER); + sb.join("ipSearch", ipSearch, sb.entity().getSourceIpAddressId(), ipSearch.entity().getId(), + JoinBuilder.JoinType.INNER); } if (tags != null && !tags.isEmpty()) { @@ -1528,7 +1936,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } tagSearch.and("resourceType", tagSearch.entity().getResourceType(), SearchCriteria.Op.EQ); sb.groupBy(sb.entity().getId()); - sb.join("tagSearch", tagSearch, sb.entity().getId(), tagSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER); + sb.join("tagSearch", tagSearch, sb.entity().getId(), tagSearch.entity().getResourceId(), + JoinBuilder.JoinType.INNER); } SearchCriteria sc = sb.create(); @@ -1561,7 +1970,6 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements sc.setJoinParameters("ipSearch", "zoneId", zoneId); } - if (tags != null && !tags.isEmpty()) { int count = 0; sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.LoadBalancer.toString()); @@ -1583,7 +1991,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (LoadBalancerVO lb : lbs) { List dstList = getExistingDestinations(lb.getId()); List policyList = this.getStickinessPolicies(lb.getId()); - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList); + List hcPolicyList = this.getHealthCheckPolicies(lb.getId()); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList, hcPolicyList); lbRules.add(loadBalancing); } return lbRules; @@ -1594,10 +2003,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return _lbDao.findById(lbId); } - protected void removeLBRule(LoadBalancerVO rule) { - //remove the rule + protected void removeLBRule(LoadBalancerVO rule) { + // remove the rule _lbDao.remove(rule.getId()); } - - } diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index ce65586ec6e..45b11f702d3 100755 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -27,11 +27,9 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Queue; import java.util.Set; import java.util.TimeZone; import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; @@ -41,8 +39,8 @@ import java.util.concurrent.TimeUnit; import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; + import org.apache.cloudstack.api.command.admin.router.UpgradeRouterCmd; -import com.cloud.agent.api.to.*; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -167,13 +165,13 @@ import com.cloud.network.dao.VirtualRouterProviderDao; import com.cloud.network.dao.VpnUserDao; import com.cloud.network.lb.LoadBalancingRule; import com.cloud.network.lb.LoadBalancingRule.LbDestination; +import com.cloud.network.lb.LoadBalancingRule.LbHealthCheckPolicy; import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.network.lb.LoadBalancingRulesManager; import com.cloud.network.router.VirtualRouter.RedundantState; import com.cloud.network.router.VirtualRouter.Role; import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.FirewallRule.Purpose; -import com.cloud.network.rules.FirewallRuleVO; import com.cloud.network.rules.PortForwardingRule; import com.cloud.network.rules.RulesManager; import com.cloud.network.rules.StaticNat; @@ -209,7 +207,6 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.PasswordGenerator; import com.cloud.utils.StringUtils; - import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; @@ -1702,15 +1699,30 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V String defaultNetworkStartIp = null, defaultNetworkStartIpv6 = null; if (!setupPublicNetwork) { if (guestNetwork.getCidr() != null) { - String startIp = _networkModel.getStartIpAddress(guestNetwork.getId()); - if (startIp != null && _ipAddressDao.findByIpAndSourceNetworkId(guestNetwork.getId(), startIp).getAllocatedTime() == null) { - defaultNetworkStartIp = startIp; - } else if (s_logger.isDebugEnabled()){ - s_logger.debug("First ip " + startIp + " in network id=" + guestNetwork.getId() + - " is already allocated, can't use it for domain router; will get random ip address from the range"); - } + //Check the placeholder nic, and if it's ip address is not empty, allocate it from there + String requestedGateway = null; + if (guestNetwork.getGateway() != null) { + requestedGateway = guestNetwork.getGateway(); + } else if (plan != null && plan.getPodId() != null) { + Pod pod = _configMgr.getPod(plan.getPodId()); + requestedGateway = pod.getGateway(); + } + Nic placeholder = _networkModel.getPlaceholderNic(guestNetwork, null); + if (placeholder != null) { + s_logger.debug("Requesting ip address " + placeholder.getIp4Address() + " stored in placeholder nic for the network " + guestNetwork); + defaultNetworkStartIp = placeholder.getIp4Address(); + } else { + String startIp = _networkModel.getStartIpAddress(guestNetwork.getId()); + if (startIp != null && _ipAddressDao.findByIpAndSourceNetworkId(guestNetwork.getId(), startIp).getAllocatedTime() == null) { + defaultNetworkStartIp = startIp; + } else if (s_logger.isDebugEnabled()){ + s_logger.debug("First ip " + startIp + " in network id=" + guestNetwork.getId() + + " is already allocated, can't use it for domain router; will get random ip address from the range"); + } + } } + //FIXME - get ipv6 stored in the placeholder if (guestNetwork.getIp6Cidr() != null) { String startIpv6 = _networkModel.getStartIpv6Address(guestNetwork.getId()); if (startIpv6 != null && _ipv6Dao.findByNetworkIdAndIp(guestNetwork.getId(), startIpv6) == null) { @@ -2382,11 +2394,12 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V for (LoadBalancerVO lb : lbs) { List dstList = _lbMgr.getExistingDestinations(lb.getId()); List policyList = _lbMgr.getStickinessPolicies(lb.getId()); - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList); + List hcPolicyList = _lbMgr.getHealthCheckPolicies(lb.getId()); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList, hcPolicyList); lbRules.add(loadBalancing); } } - + s_logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of domR " + router + " start."); if (!lbRules.isEmpty()) { createApplyLoadBalancingRulesCommands(lbRules, router, cmds, guestNetworkId); @@ -3284,7 +3297,8 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V for (LoadBalancerVO lb : lbs) { List dstList = _lbMgr.getExistingDestinations(lb.getId()); List policyList = _lbMgr.getStickinessPolicies(lb.getId()); - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList,policyList); + List hcPolicyList = _lbMgr.getHealthCheckPolicies(lb.getId() ); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList, hcPolicyList); lbRules.add(loadBalancing); } return sendLBRules(router, lbRules, network.getId()); diff --git a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index 82c0015e317..d64a0212b46 100755 --- a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -511,13 +511,13 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); sb.and("username", sb.entity().getUsername(), SearchCriteria.Op.EQ); - sb.and("state", sb.entity().getState(), SearchCriteria.Op.EQ); + sb.and("state", sb.entity().getState(), Op.IN); SearchCriteria sc = sb.create(); _accountMgr.buildACLSearchCriteria(sc, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); //list only active users - sc.setParameters("state", State.Active); + sc.setParameters("state", State.Active, State.Add); if (id != null) { sc.setParameters("id", id); diff --git a/server/src/com/cloud/server/ConfigurationServerImpl.java b/server/src/com/cloud/server/ConfigurationServerImpl.java index 8c665ad1eee..53df3b970a8 100755 --- a/server/src/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/com/cloud/server/ConfigurationServerImpl.java @@ -106,10 +106,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.script.Script; import com.cloud.uuididentity.dao.IdentityDao; -import org.apache.cloudstack.region.RegionVO; -import org.apache.cloudstack.region.dao.RegionDao; -import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; + @Component public class ConfigurationServerImpl extends ManagerBase implements ConfigurationServer { @@ -152,7 +149,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio public void persistDefaultValues() throws InternalErrorException { fixupScriptFileAttribute(); - + // Create system user and admin user saveUser(); @@ -337,23 +334,20 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio @DB protected void saveUser() { - int region_id = _configDao.getRegionId(); // insert system account - String insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, region_id) VALUES (1, UUID(), 'system', '1', '1', ?)"; + String insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id) VALUES (1, UUID(), 'system', '1', '1')"; Transaction txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); - stmt.setInt(1, region_id); stmt.executeUpdate(); } catch (SQLException ex) { } // insert system user - insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, region_id)" + - " VALUES (1, UUID(), 'system', RAND(), 1, 'system', 'cloud', now(), ?)"; + insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created)" + + " VALUES (1, UUID(), 'system', RAND(), 1, 'system', 'cloud', now())"; txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); - stmt.setInt(1, region_id); stmt.executeUpdate(); } catch (SQLException ex) { } @@ -366,23 +360,21 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio String lastname = "cloud"; // create an account for the admin user first - insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, region_id) VALUES (" + id + ", UUID(), '" + username + "', '1', '1', ?)"; + insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id) VALUES (" + id + ", UUID(), '" + username + "', '1', '1')"; txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); - stmt.setInt(1, region_id); stmt.executeUpdate(); } catch (SQLException ex) { } // now insert the user - insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, state, region_id) " + - "VALUES (" + id + ", UUID(), '" + username + "', RAND(), 2, '" + firstname + "','" + lastname + "',now(), 'disabled', ?)"; + insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, state) " + + "VALUES (" + id + ", UUID(), '" + username + "', RAND(), 2, '" + firstname + "','" + lastname + "',now(), 'disabled')"; txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); - stmt.setInt(1, region_id); stmt.executeUpdate(); } catch (SQLException ex) { } @@ -708,16 +700,16 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } } - + private void fixupScriptFileAttribute() { - // TODO : this is a hacking fix to workaround that executable bit is not preserved in WAR package + // TODO : this is a hacking fix to workaround that executable bit is not preserved in WAR package String scriptPath = Script.findScript("", "scripts/vm/systemvm/injectkeys.sh"); if(scriptPath != null) { File file = new File(scriptPath); if(!file.canExecute()) { s_logger.info("Some of the shell script files may not have executable bit set. Fixup..."); - - String cmd = "chmod ugo+x " + scriptPath; + + String cmd = "sudo chmod ugo+x " + scriptPath; s_logger.info("Executing " + cmd); String result = Script.runSimpleBashScript(cmd); if (result != null) { diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index dd9490441a8..8d20ce4de10 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -414,13 +414,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject S3Manager _s3Mgr; -/* - @Inject - ComponentContext _forceContextRef; // create a dependency to ComponentContext so that it can be loaded beforehead - - @Inject - EventUtils _forceEventUtilsRef; -*/ private final ScheduledExecutorService _eventExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("EventChecker")); private final ScheduledExecutorService _alertExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AlertChecker")); private KeystoreManager _ksMgr; @@ -429,7 +422,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe private Map _availableIdsMap; - @Inject List _userAuthenticators; + List _userAuthenticators; @Inject ClusterManager _clusterMgr; private String _hashKey = null; @@ -437,6 +430,14 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public ManagementServerImpl() { setRunLevel(ComponentLifecycle.RUN_LEVEL_APPLICATION_MAINLOOP); } + + public List getUserAuthenticators() { + return _userAuthenticators; + } + + public void setUserAuthenticators(List authenticators) { + _userAuthenticators = authenticators; + } @Override public boolean configure(String name, Map params) @@ -2103,10 +2104,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(QueryAsyncJobResultCmd.class); cmdList.add(AssignToLoadBalancerRuleCmd.class); cmdList.add(CreateLBStickinessPolicyCmd.class); + cmdList.add(CreateLBHealthCheckPolicyCmd.class); cmdList.add(CreateLoadBalancerRuleCmd.class); cmdList.add(DeleteLBStickinessPolicyCmd.class); + cmdList.add(DeleteLBHealthCheckPolicyCmd.class); cmdList.add(DeleteLoadBalancerRuleCmd.class); cmdList.add(ListLBStickinessPoliciesCmd.class); + cmdList.add(ListLBHealthCheckPoliciesCmd.class); cmdList.add(ListLoadBalancerRuleInstancesCmd.class); cmdList.add(ListLoadBalancerRulesCmd.class); cmdList.add(RemoveFromLoadBalancerRuleCmd.class); @@ -2239,6 +2243,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(DeleteAlertsCmd.class); cmdList.add(ArchiveEventsCmd.class); cmdList.add(DeleteEventsCmd.class); + cmdList.add(ListStorageProvidersCmd.class); return cmdList; } diff --git a/server/src/com/cloud/server/StatsCollector.java b/server/src/com/cloud/server/StatsCollector.java index 76bae5b4aca..7dcf091f3e3 100755 --- a/server/src/com/cloud/server/StatsCollector.java +++ b/server/src/com/cloud/server/StatsCollector.java @@ -27,10 +27,15 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import javax.annotation.PostConstruct; import javax.inject.Inject; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.resource.ResourceManager; + import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -88,6 +93,7 @@ public class StatsCollector { @Inject private StoragePoolHostDao _storagePoolHostDao; @Inject private SecondaryStorageVmManager _ssvmMgr; @Inject private ResourceManager _resourceMgr; + @Inject private ConfigurationDao _configDao; private ConcurrentHashMap _hostStats = new ConcurrentHashMap(); private final ConcurrentHashMap _VmStats = new ConcurrentHashMap(); @@ -107,6 +113,7 @@ public class StatsCollector { } public static StatsCollector getInstance(Map configs) { + s_instance.init(configs); return s_instance; } @@ -114,6 +121,11 @@ public class StatsCollector { s_instance = this; } + @PostConstruct + private void init(){ + init(_configDao.getConfiguration()); + } + private void init(Map configs) { _executor = Executors.newScheduledThreadPool(3, new NamedThreadFactory("StatsCollector")); diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index b0a1da14eb8..f37654bb317 100755 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -712,7 +712,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } DataStoreProvider provider = this.dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider(); - DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); if (pool == null) { Map params = new HashMap(); String name = (host.getName() + " Local Storage"); @@ -724,7 +724,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C params.put("localStorage", true); params.put("details", pInfo.getDetails()); params.put("uuid", pInfo.getUuid()); - params.put("providerId", provider.getId()); + params.put("providerName", provider.getName()); store = lifeCycle.initialize(params); } else { @@ -748,15 +748,15 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException { - String providerUuid = cmd.getStorageProviderUuid(); + String providerName = cmd.getStorageProviderName(); DataStoreProvider storeProvider = dataStoreProviderMgr - .getDataStoreProviderByUuid(providerUuid); + .getDataStoreProvider(providerName); if (storeProvider == null) { storeProvider = dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider(); if (storeProvider == null) { throw new InvalidParameterValueException( - "can't find storage provider: " + providerUuid); + "can't find storage provider: " + providerName); } } @@ -821,9 +821,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C params.put("tags", cmd.getTags()); params.put("name", cmd.getStoragePoolName()); params.put("details", details); - params.put("providerId", storeProvider.getId()); + params.put("providerName", storeProvider.getName()); - DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle(); + DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle(); DataStore store = null; try { store = lifeCycle.initialize(params); @@ -948,9 +948,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C s_logger.trace("Released lock for storage pool " + id); DataStoreProvider storeProvider = dataStoreProviderMgr - .getDataStoreProviderById(sPool.getStorageProviderId()); - DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle(); - lifeCycle.deleteDataStore(id); + .getDataStoreProvider(sPool.getStorageProviderName()); + DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle(); + DataStore store = dataStoreMgr.getDataStore( + sPool.getId(), DataStoreRole.Primary); + lifeCycle.deleteDataStore(store); return false; } @@ -963,8 +965,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C s_logger.debug("Adding pool " + pool.getName() + " to host " + hostId); DataStoreProvider provider = dataStoreProviderMgr - .getDataStoreProviderById(pool.getStorageProviderId()); - HypervisorHostListener listener = hostListeners.get(provider.getUuid()); + .getDataStoreProvider(pool.getStorageProviderName()); + HypervisorHostListener listener = hostListeners.get(provider.getName()); listener.hostConnect(hostId, pool.getId()); } @@ -1415,19 +1417,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } DataStoreProvider provider = dataStoreProviderMgr - .getDataStoreProviderById(primaryStorage.getStorageProviderId()); - DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); - lifeCycle.maintain(primaryStorage.getId()); + .getDataStoreProvider(primaryStorage.getStorageProviderName()); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); + DataStore store = dataStoreMgr.getDataStore( + primaryStorage.getId(), DataStoreRole.Primary); + lifeCycle.maintain(store); return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore( primaryStorage.getId(), DataStoreRole.Primary); } - private void setPoolStateToError(StoragePoolVO primaryStorage) { - primaryStorage.setStatus(StoragePoolStatus.ErrorInMaintenance); - _storagePoolDao.update(primaryStorage.getId(), primaryStorage); - } - @Override @DB public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance( @@ -1457,29 +1456,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } DataStoreProvider provider = dataStoreProviderMgr - .getDataStoreProviderById(primaryStorage.getStorageProviderId()); - DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); - lifeCycle.cancelMaintain(primaryStorage.getId()); + .getDataStoreProvider(primaryStorage.getStorageProviderName()); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); + DataStore store = dataStoreMgr.getDataStore( + primaryStorage.getId(), DataStoreRole.Primary); + lifeCycle.cancelMaintain(store); + return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore( primaryStorage.getId(), DataStoreRole.Primary); } - private boolean sendToVmResidesOn(StoragePoolVO PrimaryDataStoreVO, - Command cmd) { - ClusterVO cluster = _clusterDao.findById(PrimaryDataStoreVO - .getClusterId()); - if ((cluster.getHypervisorType() == HypervisorType.KVM || cluster - .getHypervisorType() == HypervisorType.VMware) - && ((cmd instanceof ManageSnapshotCommand) || (cmd instanceof BackupSnapshotCommand))) { - return true; - } else { - return false; - } - } - - - - protected class StorageGarbageCollector implements Runnable { public StorageGarbageCollector() { @@ -1845,9 +1831,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override - public synchronized boolean registerHostListener(String providerUuid, + public synchronized boolean registerHostListener(String providerName, HypervisorHostListener listener) { - hostListeners.put(providerUuid, listener); + hostListeners.put(providerName, listener); return true; } diff --git a/server/src/com/cloud/storage/StoragePoolAutomation.java b/server/src/com/cloud/storage/StoragePoolAutomation.java new file mode 100644 index 00000000000..e8eb9b79cd5 --- /dev/null +++ b/server/src/com/cloud/storage/StoragePoolAutomation.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; + +public interface StoragePoolAutomation { + public boolean maintain(DataStore store); + public boolean cancelMaintain(DataStore store); +} diff --git a/server/src/com/cloud/storage/StoragePoolAutomationImpl.java b/server/src/com/cloud/storage/StoragePoolAutomationImpl.java new file mode 100644 index 00000000000..9bba979b9c0 --- /dev/null +++ b/server/src/com/cloud/storage/StoragePoolAutomationImpl.java @@ -0,0 +1,456 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.alert.AlertManager; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.resource.ResourceManager; +import com.cloud.server.ManagementServer; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.StoragePoolWorkDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.User; +import com.cloud.user.UserContext; +import com.cloud.user.dao.UserDao; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExecutionException; +import com.cloud.vm.ConsoleProxyVO; +import com.cloud.vm.DomainRouterVO; +import com.cloud.vm.SecondaryStorageVmVO; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.dao.ConsoleProxyDao; +import com.cloud.vm.dao.DomainRouterDao; +import com.cloud.vm.dao.SecondaryStorageVmDao; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +@Component +public class StoragePoolAutomationImpl implements StoragePoolAutomation { + private static final Logger s_logger = Logger + .getLogger(StoragePoolAutomationImpl.class); + @Inject + protected VirtualMachineManager vmMgr; + @Inject + protected SecondaryStorageVmDao _secStrgDao; + @Inject + UserVmDao userVmDao; + @Inject + protected UserDao _userDao; + @Inject + protected DomainRouterDao _domrDao; + @Inject + protected StoragePoolHostDao _storagePoolHostDao; + @Inject + protected AlertManager _alertMgr; + @Inject + protected ConsoleProxyDao _consoleProxyDao; + + @Inject + protected StoragePoolWorkDao _storagePoolWorkDao; + @Inject + PrimaryDataStoreDao primaryDataStoreDao; + @Inject + DataStoreManager dataStoreMgr; + @Inject + protected ResourceManager _resourceMgr; + @Inject + AgentManager agentMgr; + @Inject + VolumeDao volumeDao; + @Inject + VMInstanceDao vmDao; + @Inject + ManagementServer server; + @Inject DataStoreProviderManager providerMgr; + + @Override + public boolean maintain(DataStore store) { + Long userId = UserContext.current().getCallerUserId(); + User user = _userDao.findById(userId); + Account account = UserContext.current().getCaller(); + StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId()); + try { + StoragePool storagePool = (StoragePool) store; + List hosts = _resourceMgr.listHostsInClusterByStatus( + pool.getClusterId(), Status.Up); + if (hosts == null || hosts.size() == 0) { + pool.setStatus(StoragePoolStatus.Maintenance); + primaryDataStoreDao.update(pool.getId(), pool); + return true; + } else { + // set the pool state to prepare for maintenance + pool.setStatus(StoragePoolStatus.PrepareForMaintenance); + primaryDataStoreDao.update(pool.getId(), pool); + } + // remove heartbeat + for (HostVO host : hosts) { + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand( + false, storagePool); + final Answer answer = agentMgr.easySend(host.getId(), cmd); + if (answer == null || !answer.getResult()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool false failed due to " + + ((answer == null) ? "answer null" : answer + .getDetails())); + } + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool false secceeded"); + } + } + } + // check to see if other ps exist + // if they do, then we can migrate over the system vms to them + // if they dont, then just stop all vms on this one + List upPools = primaryDataStoreDao + .listByStatusInZone(pool.getDataCenterId(), + StoragePoolStatus.Up); + boolean restart = true; + if (upPools == null || upPools.size() == 0) { + restart = false; + } + + // 2. Get a list of all the ROOT volumes within this storage pool + List allVolumes = this.volumeDao.findByPoolId(pool + .getId()); + + // 3. Enqueue to the work queue + for (VolumeVO volume : allVolumes) { + VMInstanceVO vmInstance = vmDao + .findById(volume.getInstanceId()); + + if (vmInstance == null) { + continue; + } + + // enqueue sp work + if (vmInstance.getState().equals(State.Running) + || vmInstance.getState().equals(State.Starting) + || vmInstance.getState().equals(State.Stopping)) { + + try { + StoragePoolWorkVO work = new StoragePoolWorkVO( + vmInstance.getId(), pool.getId(), false, false, + server.getId()); + _storagePoolWorkDao.persist(work); + } catch (Exception e) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Work record already exists, re-using by re-setting values"); + } + StoragePoolWorkVO work = _storagePoolWorkDao + .findByPoolIdAndVmId(pool.getId(), + vmInstance.getId()); + work.setStartedAfterMaintenance(false); + work.setStoppedForMaintenance(false); + work.setManagementServerId(server.getId()); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + + // 4. Process the queue + List pendingWork = _storagePoolWorkDao + .listPendingWorkForPrepareForMaintenanceByPoolId(pool + .getId()); + + for (StoragePoolWorkVO work : pendingWork) { + // shut down the running vms + VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); + + if (vmInstance == null) { + continue; + } + + // if the instance is of type consoleproxy, call the console + // proxy + if (vmInstance.getType().equals( + VirtualMachine.Type.ConsoleProxy)) { + // call the consoleproxymanager + ConsoleProxyVO consoleProxy = _consoleProxyDao + .findById(vmInstance.getId()); + if (!vmMgr.advanceStop(consoleProxy, true, user, account)) { + String errorMsg = "There was an error stopping the console proxy id: " + + vmInstance.getId() + + " ,cannot enable storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + if (restart) { + + if (this.vmMgr.advanceStart(consoleProxy, null, user, + account) == null) { + String errorMsg = "There was an error starting the console proxy id: " + + vmInstance.getId() + + " on another storage pool, cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + } else { + // update work status + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + + // if the instance is of type uservm, call the user vm manager + if (vmInstance.getType().equals(VirtualMachine.Type.User)) { + UserVmVO userVm = userVmDao.findById(vmInstance.getId()); + if (!vmMgr.advanceStop(userVm, true, user, account)) { + String errorMsg = "There was an error stopping the user vm id: " + + vmInstance.getId() + + " ,cannot enable storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type secondary storage vm, call the + // secondary storage vm manager + if (vmInstance.getType().equals( + VirtualMachine.Type.SecondaryStorageVm)) { + SecondaryStorageVmVO secStrgVm = _secStrgDao + .findById(vmInstance.getId()); + if (!vmMgr.advanceStop(secStrgVm, true, user, account)) { + String errorMsg = "There was an error stopping the ssvm id: " + + vmInstance.getId() + + " ,cannot enable storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + if (restart) { + if (vmMgr.advanceStart(secStrgVm, null, user, account) == null) { + String errorMsg = "There was an error starting the ssvm id: " + + vmInstance.getId() + + " on another storage pool, cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + } else { + // update work status + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + + // if the instance is of type domain router vm, call the network + // manager + if (vmInstance.getType().equals( + VirtualMachine.Type.DomainRouter)) { + DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); + if (!vmMgr.advanceStop(domR, true, user, account)) { + String errorMsg = "There was an error stopping the domain router id: " + + vmInstance.getId() + + " ,cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + if (restart) { + if (vmMgr.advanceStart(domR, null, user, account) == null) { + String errorMsg = "There was an error starting the domain router id: " + + vmInstance.getId() + + " on another storage pool, cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + } else { + // update work status + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + } + + } catch(Exception e) { + s_logger.error( + "Exception in enabling primary storage maintenance:", e); + pool.setStatus(StoragePoolStatus.ErrorInMaintenance); + this.primaryDataStoreDao.update(pool.getId(), pool); + throw new CloudRuntimeException(e.getMessage()); + } + return true; + } + + @Override + public boolean cancelMaintain(DataStore store) { + // Change the storage state back to up + Long userId = UserContext.current().getCallerUserId(); + User user = _userDao.findById(userId); + Account account = UserContext.current().getCaller(); + StoragePoolVO poolVO = this.primaryDataStoreDao + .findById(store.getId()); + StoragePool pool = (StoragePool)store; + + List hosts = _resourceMgr.listHostsInClusterByStatus( + pool.getClusterId(), Status.Up); + if (hosts == null || hosts.size() == 0) { + return true; + } + // add heartbeat + for (HostVO host : hosts) { + ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand( + true, pool); + final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd); + if (answer == null || !answer.getResult()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool add failed due to " + + ((answer == null) ? "answer null" : answer + .getDetails())); + } + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool add secceeded"); + } + } + } + + // 2. Get a list of pending work for this queue + List pendingWork = _storagePoolWorkDao + .listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId()); + + // 3. work through the queue + for (StoragePoolWorkVO work : pendingWork) { + try { + VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); + + if (vmInstance == null) { + continue; + } + + // if the instance is of type consoleproxy, call the console + // proxy + if (vmInstance.getType().equals( + VirtualMachine.Type.ConsoleProxy)) { + + ConsoleProxyVO consoleProxy = _consoleProxyDao + .findById(vmInstance.getId()); + if (vmMgr.advanceStart(consoleProxy, null, user, account) == null) { + String msg = "There was an error starting the console proxy id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type ssvm, call the ssvm manager + if (vmInstance.getType().equals( + VirtualMachine.Type.SecondaryStorageVm)) { + SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance + .getId()); + if (vmMgr.advanceStart(ssVm, null, user, account) == null) { + String msg = "There was an error starting the ssvm id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type ssvm, call the ssvm manager + if (vmInstance.getType().equals( + VirtualMachine.Type.DomainRouter)) { + DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); + if (vmMgr.advanceStart(domR, null, user, account) == null) { + String msg = "There was an error starting the domR id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type user vm, call the user vm manager + if (vmInstance.getType().equals(VirtualMachine.Type.User)) { + UserVmVO userVm = userVmDao.findById(vmInstance.getId()); + + if (vmMgr.advanceStart(userVm, null, user, account) == null) { + + String msg = "There was an error starting the user vm id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + return true; + } catch (Exception e) { + s_logger.debug("Failed start vm", e); + throw new CloudRuntimeException(e.toString()); + } + } + return false; + } + +} diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java index 4951975786f..737ed0a3bac 100644 --- a/server/src/com/cloud/storage/VolumeManagerImpl.java +++ b/server/src/com/cloud/storage/VolumeManagerImpl.java @@ -48,6 +48,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManag import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; @@ -1439,64 +1440,30 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { } private boolean needMoveVolume(VolumeVO rootVolumeOfVm, VolumeInfo volume) { - StoragePoolVO vmRootVolumePool = _storagePoolDao - .findById(rootVolumeOfVm.getPoolId()); - DiskOfferingVO volumeDiskOffering = _diskOfferingDao - .findById(volume.getDiskOfferingId()); - String[] volumeTags = volumeDiskOffering.getTagsArray(); - - boolean isVolumeOnSharedPool = !volumeDiskOffering - .getUseLocalStorage(); - StoragePoolVO sourcePool = _storagePoolDao.findById(volume - .getPoolId()); - List matchingVMPools = _storagePoolDao - .findPoolsByTags(vmRootVolumePool.getDataCenterId(), - vmRootVolumePool.getPodId(), - vmRootVolumePool.getClusterId(), volumeTags - ); + DataStore storeForRootVol = this.dataStoreMgr.getPrimaryDataStore(rootVolumeOfVm.getPoolId()); + DataStore storeForDataVol = this.dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); - boolean moveVolumeNeeded = true; - if (matchingVMPools.size() == 0) { - String poolType; - if (vmRootVolumePool.getClusterId() != null) { - poolType = "cluster"; - } else if (vmRootVolumePool.getPodId() != null) { - poolType = "pod"; - } else { - poolType = "zone"; - } - throw new CloudRuntimeException( - "There are no storage pools in the VM's " + poolType - + " with all of the volume's tags (" - + volumeDiskOffering.getTags() + ")."); - } else { - long sourcePoolId = sourcePool.getId(); - Long sourcePoolDcId = sourcePool.getDataCenterId(); - Long sourcePoolPodId = sourcePool.getPodId(); - Long sourcePoolClusterId = sourcePool.getClusterId(); - for (StoragePoolVO vmPool : matchingVMPools) { - long vmPoolId = vmPool.getId(); - Long vmPoolDcId = vmPool.getDataCenterId(); - Long vmPoolPodId = vmPool.getPodId(); - Long vmPoolClusterId = vmPool.getClusterId(); - - // Moving a volume is not required if storage pools belongs - // to same cluster in case of shared volume or - // identical storage pool in case of local - if (sourcePoolDcId == vmPoolDcId - && sourcePoolPodId == vmPoolPodId - && sourcePoolClusterId == vmPoolClusterId - && (isVolumeOnSharedPool || sourcePoolId == vmPoolId)) { - moveVolumeNeeded = false; - break; - } - } + Scope storeForRootStoreScope = storeForRootVol.getScope(); + if (storeForRootStoreScope == null) { + throw new CloudRuntimeException("Can't get scope of data store: " + storeForRootVol.getId()); } - return moveVolumeNeeded; + Scope storeForDataStoreScope = storeForDataVol.getScope(); + if (storeForDataStoreScope == null) { + throw new CloudRuntimeException("Can't get scope of data store: " + storeForDataVol.getId()); + } + + if (storeForDataStoreScope.getScopeType() == ScopeType.ZONE) { + return false; + } + + if (storeForRootStoreScope.getScopeType() != storeForDataStoreScope.getScopeType()) { + throw new CloudRuntimeException("Can't move volume between scope: " + storeForDataStoreScope.getScopeType() + " and " + storeForRootStoreScope.getScopeType()); + } + + return !storeForRootStoreScope.isSameScope(storeForDataStoreScope); } - private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volume, Long deviceId) { String errorMsg = "Failed to attach volume: " + volume.getName() + " to VM: " + vm.getHostName(); @@ -2166,7 +2133,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { + assignedPool + " assigned by deploymentPlanner"); } - VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, null); + VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, assignedPool); tasks.add(task); } } else { diff --git a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java index 5bd749fe842..8f9be0f5d57 100755 --- a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -162,6 +162,10 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { _upgradeMap.put("4.0.0", new DbUpgrade[] { new Upgrade40to41(), new Upgrade410to420() }); + _upgradeMap.put("4.0.1", new DbUpgrade[] { new Upgrade40to41(), new Upgrade410to420() }); + + _upgradeMap.put("4.0.2", new DbUpgrade[] { new Upgrade40to41(), new Upgrade410to420() }); + _upgradeMap.put("4.1.0", new DbUpgrade[] { new Upgrade410to420() }); } diff --git a/server/src/com/cloud/upgrade/dao/Upgrade410to420.java b/server/src/com/cloud/upgrade/dao/Upgrade410to420.java index db562b1c17a..f210dc17de0 100644 --- a/server/src/com/cloud/upgrade/dao/Upgrade410to420.java +++ b/server/src/com/cloud/upgrade/dao/Upgrade410to420.java @@ -60,6 +60,7 @@ public class Upgrade410to420 implements DbUpgrade { @Override public void performDataMigration(Connection conn) { upgradeVmwareLabels(conn); + createPlaceHolderNics(conn); PreparedStatement sql = null; try { sql = conn.prepareStatement("update vm_template set image_data_store_id = 1 where type = 'SYSTEM' or type = 'BUILTIN'"); @@ -158,4 +159,41 @@ public class Upgrade410to420 implements DbUpgrade { } } } + + private void createPlaceHolderNics(Connection conn) { + PreparedStatement pstmt = null; + ResultSet rs = null; + + try { + pstmt = conn.prepareStatement("SELECT network_id, gateway, ip4_address FROM `cloud`.`nics` WHERE reserver_name IN ('DirectNetworkGuru','DirectPodBasedNetworkGuru') and vm_type='DomainRouter' AND removed IS null"); + rs = pstmt.executeQuery(); + while (rs.next()) { + Long networkId = rs.getLong(1); + String gateway = rs.getString(2); + String ip = rs.getString(3); + String uuid = UUID.randomUUID().toString(); + //Insert placeholder nic for each Domain router nic in Shared network + pstmt = conn.prepareStatement("INSERT INTO `cloud`.`nics` (uuid, ip4_address, gateway, network_id, state, strategy) VALUES (?, ?, ?, ?, 'Reserved', 'PlaceHolder')"); + pstmt.setString(1, uuid); + pstmt.setString(2, ip); + pstmt.setString(3, gateway); + pstmt.setLong(4, networkId); + pstmt.executeUpdate(); + s_logger.debug("Created placeholder nic for the ipAddress " + ip); + + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to create placeholder nics", e); + } finally { + try { + if (rs != null) { + rs.close(); + } + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } } diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java index be5f4f4d77d..b69f31464ba 100755 --- a/server/src/com/cloud/user/AccountManagerImpl.java +++ b/server/src/com/cloud/user/AccountManagerImpl.java @@ -48,7 +48,6 @@ import org.apache.cloudstack.api.command.admin.user.RegisterCmd; import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.dao.UserAccountJoinDao; @@ -140,7 +139,6 @@ import com.cloud.vm.dao.InstanceGroupDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; -@Component @Local(value = { AccountManager.class, AccountService.class }) public class AccountManagerImpl extends ManagerBase implements AccountManager, Manager { public static final Logger s_logger = Logger.getLogger(AccountManagerImpl.class); @@ -223,7 +221,6 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M private AutoScaleManager _autoscaleMgr; @Inject VolumeManager volumeMgr; - @Inject private List _userAuthenticators; private final ScheduledExecutorService _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AccountChecker")); @@ -237,6 +234,14 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M List _securityCheckers; int _cleanupInterval; + public List getUserAuthenticators() { + return _userAuthenticators; + } + + public void setUserAuthenticators(List authenticators) { + _userAuthenticators = authenticators; + } + @Override public boolean configure(final String name, final Map params) throws ConfigurationException { _systemAccount = _accountDao.findById(AccountVO.ACCOUNT_ID_SYSTEM); diff --git a/server/src/com/cloud/vm/dao/NicDao.java b/server/src/com/cloud/vm/dao/NicDao.java index 35d719131bb..eb38a8d7a1b 100644 --- a/server/src/com/cloud/vm/dao/NicDao.java +++ b/server/src/com/cloud/vm/dao/NicDao.java @@ -62,4 +62,7 @@ public interface NicDao extends GenericDao { List listByVmIdAndNicId(Long vmId, Long nicId); NicVO findByIp4AddressAndVmId(String ip4Address, long instance); + + List listPlaceholderNicsByNetworkId(long networkId); + } diff --git a/server/src/com/cloud/vm/dao/NicDaoImpl.java b/server/src/com/cloud/vm/dao/NicDaoImpl.java index b9ec72ee7c9..b427d4e23df 100644 --- a/server/src/com/cloud/vm/dao/NicDaoImpl.java +++ b/server/src/com/cloud/vm/dao/NicDaoImpl.java @@ -221,4 +221,12 @@ public class NicDaoImpl extends GenericDaoBase implements NicDao { return findOneBy(sc); } + @Override + public List listPlaceholderNicsByNetworkId(long networkId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("network", networkId); + sc.setParameters("strategy", Nic.ReservationStrategy.PlaceHolder.toString()); + return listBy(sc); + } + } diff --git a/server/test/com/cloud/network/MockNetworkManagerImpl.java b/server/test/com/cloud/network/MockNetworkManagerImpl.java index eb43cce0b9e..6da48ece087 100755 --- a/server/test/com/cloud/network/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/network/MockNetworkManagerImpl.java @@ -67,6 +67,7 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.vm.Nic; import com.cloud.vm.NicProfile; import com.cloud.vm.NicSecondaryIp; +import com.cloud.vm.NicVO; import com.cloud.vm.ReservationContext; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -881,4 +882,10 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage // TODO Auto-generated method stub return false; } + + @Override + public NicVO savePlaceholderNic(Network network, String ip4Address) { + // TODO Auto-generated method stub + return null; + } } diff --git a/server/test/com/cloud/network/MockNetworkModelImpl.java b/server/test/com/cloud/network/MockNetworkModelImpl.java index 83dddf4e735..b926519d45b 100644 --- a/server/test/com/cloud/network/MockNetworkModelImpl.java +++ b/server/test/com/cloud/network/MockNetworkModelImpl.java @@ -34,16 +34,13 @@ import com.cloud.network.Network.GuestType; import com.cloud.network.Network.Provider; import com.cloud.network.Network.Service; import com.cloud.network.Networks.TrafficType; -import com.cloud.network.addr.PublicIp; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.NetworkVO; import com.cloud.network.element.NetworkElement; import com.cloud.network.element.UserDataServiceProvider; -import com.cloud.network.rules.FirewallRule; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.NetworkOfferingVO; import com.cloud.user.Account; -import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.vm.Nic; import com.cloud.vm.NicProfile; @@ -841,4 +838,10 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { // TODO Auto-generated method stub return null; } + + @Override + public Nic getPlaceholderNic(Network network, Long podId) { + // TODO Auto-generated method stub + return null; + } } diff --git a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java index c798cdf7810..ead0051b860 100644 --- a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java @@ -83,6 +83,7 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.vm.Nic; import com.cloud.vm.NicProfile; import com.cloud.vm.NicSecondaryIp; +import com.cloud.vm.NicVO; import com.cloud.vm.ReservationContext; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -1411,4 +1412,16 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage // TODO Auto-generated method stub return false; } + + + + + + @Override + public NicVO savePlaceholderNic(Network network, String ip4Address) { + // TODO Auto-generated method stub + return null; + } + + } diff --git a/server/test/com/cloud/vpc/MockNetworkModelImpl.java b/server/test/com/cloud/vpc/MockNetworkModelImpl.java index 7f1f945f630..b05e8eaba6c 100644 --- a/server/test/com/cloud/vpc/MockNetworkModelImpl.java +++ b/server/test/com/cloud/vpc/MockNetworkModelImpl.java @@ -41,17 +41,14 @@ import com.cloud.network.Networks.TrafficType; import com.cloud.network.PhysicalNetwork; import com.cloud.network.PhysicalNetworkSetupInfo; import com.cloud.network.PublicIpAddress; -import com.cloud.network.addr.PublicIp; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.NetworkVO; import com.cloud.network.element.NetworkElement; import com.cloud.network.element.UserDataServiceProvider; -import com.cloud.network.rules.FirewallRule; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.user.Account; -import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.vm.Nic; import com.cloud.vm.NicProfile; @@ -854,4 +851,10 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { return null; } + @Override + public Nic getPlaceholderNic(Network network, Long podId) { + // TODO Auto-generated method stub + return null; + } + } diff --git a/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java b/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java new file mode 100644 index 00000000000..895a5d4416d --- /dev/null +++ b/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java @@ -0,0 +1,333 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.networkoffering; + +import java.io.IOException; + +import org.apache.cloudstack.acl.SecurityChecker; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl; +import org.mockito.Mockito; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; + +import com.cloud.agent.AgentManager; +import com.cloud.alert.AlertManager; +import com.cloud.api.query.dao.UserAccountJoinDaoImpl; +import com.cloud.capacity.dao.CapacityDaoImpl; +import com.cloud.cluster.agentlb.dao.HostTransferMapDaoImpl; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.dao.AccountVlanMapDaoImpl; +import com.cloud.dc.dao.ClusterDaoImpl; +import com.cloud.dc.dao.DataCenterDaoImpl; +import com.cloud.dc.dao.DataCenterIpAddressDaoImpl; +import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDaoImpl; +import com.cloud.dc.dao.DataCenterVnetDaoImpl; +import com.cloud.dc.dao.DcDetailsDaoImpl; +import com.cloud.dc.dao.HostPodDaoImpl; +import com.cloud.dc.dao.PodVlanDaoImpl; +import com.cloud.dc.dao.PodVlanMapDaoImpl; +import com.cloud.dc.dao.VlanDaoImpl; +import com.cloud.domain.dao.DomainDaoImpl; +import com.cloud.event.dao.UsageEventDaoImpl; +import com.cloud.host.dao.HostDaoImpl; +import com.cloud.host.dao.HostDetailsDaoImpl; +import com.cloud.host.dao.HostTagsDaoImpl; +import com.cloud.network.Ipv6AddressManager; +import com.cloud.network.NetworkManager; +import com.cloud.network.NetworkModel; +import com.cloud.network.NetworkService; +import com.cloud.network.StorageNetworkManager; +import com.cloud.network.dao.FirewallRulesCidrsDaoImpl; +import com.cloud.network.dao.FirewallRulesDaoImpl; +import com.cloud.network.dao.IPAddressDaoImpl; +import com.cloud.network.dao.LoadBalancerDaoImpl; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkDomainDaoImpl; +import com.cloud.network.dao.NetworkServiceMapDaoImpl; +import com.cloud.network.dao.PhysicalNetworkDaoImpl; +import com.cloud.network.dao.PhysicalNetworkServiceProviderDaoImpl; +import com.cloud.network.dao.PhysicalNetworkTrafficTypeDaoImpl; +import com.cloud.network.dao.UserIpv6AddressDaoImpl; +import com.cloud.network.element.DhcpServiceProvider; +import com.cloud.network.element.IpDeployer; +import com.cloud.network.element.NetworkElement; +import com.cloud.network.guru.NetworkGuru; +import com.cloud.network.lb.LoadBalancingRulesManager; +import com.cloud.network.rules.FirewallManager; +import com.cloud.network.rules.RulesManager; +import com.cloud.network.rules.dao.PortForwardingRulesDaoImpl; +import com.cloud.network.vpc.NetworkACLManager; +import com.cloud.network.vpc.VpcManager; +import com.cloud.network.vpc.dao.PrivateIpDaoImpl; +import com.cloud.network.vpn.RemoteAccessVpnService; +import com.cloud.offerings.dao.NetworkOfferingDao; +import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; +import com.cloud.offerings.dao.NetworkOfferingServiceMapDaoImpl; +import com.cloud.projects.ProjectManager; +import com.cloud.service.dao.ServiceOfferingDaoImpl; +import com.cloud.storage.dao.DiskOfferingDaoImpl; +import com.cloud.storage.dao.S3DaoImpl; +import com.cloud.storage.dao.SnapshotDaoImpl; +import com.cloud.storage.dao.StoragePoolDetailsDaoImpl; +import com.cloud.storage.dao.SwiftDaoImpl; +import com.cloud.storage.dao.VolumeDaoImpl; +import com.cloud.storage.s3.S3Manager; +import com.cloud.storage.secondary.SecondaryStorageVmManager; +import com.cloud.storage.swift.SwiftManager; +import com.cloud.tags.dao.ResourceTagsDaoImpl; +import com.cloud.user.AccountManager; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.UserContext; +import com.cloud.user.UserContextInitializer; +import com.cloud.user.dao.AccountDaoImpl; +import com.cloud.user.dao.UserDaoImpl; +import com.cloud.utils.component.SpringComponentScanUtils; +import com.cloud.vm.dao.InstanceGroupDaoImpl; +import com.cloud.vm.dao.NicDaoImpl; +import com.cloud.vm.dao.NicSecondaryIpDaoImpl; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDaoImpl; + +@Configuration +@ComponentScan(basePackageClasses={ + AccountVlanMapDaoImpl.class, + VolumeDaoImpl.class, + HostPodDaoImpl.class, + DomainDaoImpl.class, + SwiftDaoImpl.class, + ServiceOfferingDaoImpl.class, + VlanDaoImpl.class, + IPAddressDaoImpl.class, + ResourceTagsDaoImpl.class, + AccountDaoImpl.class, + InstanceGroupDaoImpl.class, + UserAccountJoinDaoImpl.class, + CapacityDaoImpl.class, + SnapshotDaoImpl.class, + HostDaoImpl.class, + VMInstanceDaoImpl.class, + HostTransferMapDaoImpl.class, + PortForwardingRulesDaoImpl.class, + PrivateIpDaoImpl.class, + UsageEventDaoImpl.class, + PodVlanMapDaoImpl.class, + DiskOfferingDaoImpl.class, + DataCenterDaoImpl.class, + DataCenterIpAddressDaoImpl.class, + DataCenterLinkLocalIpAddressDaoImpl.class, + DataCenterVnetDaoImpl.class, + PodVlanDaoImpl.class, + DcDetailsDaoImpl.class, + NicSecondaryIpDaoImpl.class, + UserIpv6AddressDaoImpl.class, + S3DaoImpl.class, + UserDaoImpl.class, + NicDaoImpl.class, + NetworkDomainDaoImpl.class, + HostDetailsDaoImpl.class, + HostTagsDaoImpl.class, + ClusterDaoImpl.class, + FirewallRulesDaoImpl.class, + FirewallRulesCidrsDaoImpl.class, + PhysicalNetworkDaoImpl.class, + PhysicalNetworkTrafficTypeDaoImpl.class, + PhysicalNetworkServiceProviderDaoImpl.class, + LoadBalancerDaoImpl.class, + NetworkServiceMapDaoImpl.class, + PrimaryDataStoreDaoImpl.class, + StoragePoolDetailsDaoImpl.class + }, +includeFilters={@Filter(value=ChildTestConfiguration.Library.class, type=FilterType.CUSTOM)}, +useDefaultFilters=false +) + +public class ChildTestConfiguration { + + @Bean + public AccountManager acctMgr() { + return Mockito.mock(AccountManager.class); + } + + @Bean + public NetworkService ntwkSvc() { + return Mockito.mock(NetworkService.class); + } + + @Bean + public NetworkModel ntwkMdl() { + return Mockito.mock(NetworkModel.class); + } + + @Bean + public AlertManager alertMgr() { + return Mockito.mock(AlertManager.class); + } + + @Bean + public SecurityChecker securityChkr() { + return Mockito.mock(SecurityChecker.class); + } + + @Bean + public ResourceLimitService resourceSvc() { + return Mockito.mock(ResourceLimitService.class); + } + + @Bean + public ProjectManager projectMgr() { + return Mockito.mock(ProjectManager.class); + } + + @Bean + public SecondaryStorageVmManager ssvmMgr() { + return Mockito.mock(SecondaryStorageVmManager.class); + } + + @Bean + public SwiftManager swiftMgr() { + return Mockito.mock(SwiftManager.class); + } + + @Bean + public S3Manager s3Mgr() { + return Mockito.mock(S3Manager.class); + } + + @Bean + public VpcManager vpcMgr() { + return Mockito.mock(VpcManager.class); + } + + @Bean + public UserVmDao userVMDao() { + return Mockito.mock(UserVmDao.class); + } + + @Bean + public RulesManager rulesMgr() { + return Mockito.mock(RulesManager.class); + } + + @Bean + public LoadBalancingRulesManager lbRulesMgr() { + return Mockito.mock(LoadBalancingRulesManager.class); + } + + @Bean + public RemoteAccessVpnService vpnMgr() { + return Mockito.mock(RemoteAccessVpnService.class); + } + + @Bean + public NetworkGuru ntwkGuru() { + return Mockito.mock(NetworkGuru.class); + } + + @Bean + public NetworkElement ntwkElement() { + return Mockito.mock(NetworkElement.class); + } + + @Bean + public IpDeployer ipDeployer() { + return Mockito.mock(IpDeployer.class); + } + + @Bean + public DhcpServiceProvider dhcpProvider() { + return Mockito.mock(DhcpServiceProvider.class); + } + + @Bean + public FirewallManager firewallMgr() { + return Mockito.mock(FirewallManager.class); + } + + @Bean + public AgentManager agentMgr() { + return Mockito.mock(AgentManager.class); + } + + @Bean + public StorageNetworkManager storageNtwkMgr() { + return Mockito.mock(StorageNetworkManager.class); + } + + @Bean + public NetworkACLManager ntwkAclMgr() { + return Mockito.mock(NetworkACLManager.class); + } + + @Bean + public Ipv6AddressManager ipv6Mgr() { + return Mockito.mock(Ipv6AddressManager.class); + } + + @Bean + public ConfigurationDao configDao() { + return Mockito.mock(ConfigurationDao.class); + } + + @Bean + public UserContext userContext() { + return Mockito.mock(UserContext.class); + } + + @Bean + public UserContextInitializer userContextInitializer() { + return Mockito.mock(UserContextInitializer.class); + } + + @Bean + public NetworkManager networkManager() { + return Mockito.mock(NetworkManager.class); + } + + @Bean + public NetworkOfferingDao networkOfferingDao() { + return Mockito.mock(NetworkOfferingDao.class); + } + + @Bean + public NetworkDao networkDao() { + return Mockito.mock(NetworkDao.class); + } + + @Bean + public NetworkOfferingServiceMapDao networkOfferingServiceMapDao() { + return Mockito.mock(NetworkOfferingServiceMapDao.class); + } + + public static class Library implements TypeFilter { + + @Override + public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { + mdr.getClassMetadata().getClassName(); + ComponentScan cs = ChildTestConfiguration.class.getAnnotation(ComponentScan.class); + return SpringComponentScanUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); + } + + } + +} diff --git a/server/test/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java b/server/test/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java new file mode 100644 index 00000000000..bb829917977 --- /dev/null +++ b/server/test/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java @@ -0,0 +1,179 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.networkoffering; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import javax.inject.Inject; + +import junit.framework.TestCase; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import com.cloud.configuration.ConfigurationManager; +import com.cloud.configuration.ConfigurationVO; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.network.Network; +import com.cloud.network.Network.Provider; +import com.cloud.network.Network.Service; +import com.cloud.network.Networks.TrafficType; +import com.cloud.offering.NetworkOffering.Availability; +import com.cloud.offerings.NetworkOfferingServiceMapVO; +import com.cloud.offerings.NetworkOfferingVO; +import com.cloud.offerings.dao.NetworkOfferingDao; +import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; +import com.cloud.user.UserContext; +import com.cloud.user.UserContextInitializer; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(locations="classpath:/createNetworkOffering.xml") + +public class CreateNetworkOfferingTest extends TestCase{ + + @Inject + ConfigurationManager configMgr; + + @Inject + ConfigurationDao configDao; + + @Inject + NetworkOfferingDao offDao; + + @Inject + UserContext usrCtx; + + @Inject + UserContextInitializer usrCtxInit; + + @Inject + NetworkOfferingServiceMapDao mapDao; + + @Before + public void setUp() { + ConfigurationVO configVO = new ConfigurationVO("200", "200","200","200","200","200"); + Mockito.when(configDao.findByName(Mockito.anyString())).thenReturn(configVO); + + Mockito.when(offDao.persist(Mockito.any(NetworkOfferingVO.class))).thenReturn(new NetworkOfferingVO()); + Mockito.when(mapDao.persist(Mockito.any(NetworkOfferingServiceMapVO.class))).thenReturn(new NetworkOfferingServiceMapVO()); + Mockito.when(usrCtx.current()).thenReturn(new UserContext()); + } + + //Test Shared network offerings + @Test + public void createSharedNtwkOffWithVlan() { + NetworkOfferingVO off = configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, true, + Availability.Optional, 200, null, false, Network.GuestType.Shared, false, + null, false, null, true, false); + + assertNotNull("Shared network offering with specifyVlan=true failed to create ", off); + } + + @Test + public void createSharedNtwkOffWithNoVlan() { + try { + NetworkOfferingVO off = configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, false, + Availability.Optional, 200, null, false, Network.GuestType.Shared, false, + null, false, null, true, false); + assertNull("Shared network offering with specifyVlan=false was created", off); + } catch (InvalidParameterValueException ex) { + } + } + + @Test + public void createSharedNtwkOffWithSpecifyIpRanges() { + NetworkOfferingVO off = configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, true, + Availability.Optional, 200, null, false, Network.GuestType.Shared, false, + null, false, null, true, false); + + assertNotNull("Shared network offering with specifyIpRanges=true failed to create ", off); + } + + @Test + public void createSharedNtwkOffWithoutSpecifyIpRanges() { + try { + NetworkOfferingVO off = configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, true, + Availability.Optional, 200, null, false, Network.GuestType.Shared, false, + null, false, null, false, false); + assertNull("Shared network offering with specifyIpRanges=false was created", off); + } catch (InvalidParameterValueException ex) { + } + } + + //Test Isolated network offerings + @Test + public void createIsolatedNtwkOffWithNoVlan() { + Map> serviceProviderMap = new HashMap>(); + Set vrProvider = new HashSet(); + vrProvider.add(Provider.VirtualRouter); + serviceProviderMap.put(Network.Service.SourceNat, vrProvider); + NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, false, + Availability.Optional, 200, serviceProviderMap, false, Network.GuestType.Isolated, false, + null, false, null, false, false); + + assertNotNull("Isolated network offering with specifyIpRanges=false failed to create ", off); + } + + @Test + public void createIsolatedNtwkOffWithVlan() { + Map> serviceProviderMap = new HashMap>(); + Set vrProvider = new HashSet(); + vrProvider.add(Provider.VirtualRouter); + serviceProviderMap.put(Network.Service.SourceNat, vrProvider); + NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, true, + Availability.Optional, 200, serviceProviderMap, false, Network.GuestType.Isolated, false, + null, false, null, false, false); + assertNotNull("Isolated network offering with specifyVlan=true wasn't created", off); + + } + + @Test + public void createIsolatedNtwkOffWithSpecifyIpRangesAndSourceNat() { + try { + Map> serviceProviderMap = new HashMap>(); + Set vrProvider = new HashSet(); + vrProvider.add(Provider.VirtualRouter); + serviceProviderMap.put(Network.Service.SourceNat, vrProvider); + NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, false, + Availability.Optional, 200, serviceProviderMap, false, Network.GuestType.Isolated, false, + null, false, null, true, false); + assertNull("Isolated network offering with specifyIpRanges=true and source nat service enabled, was created", off); + } catch (InvalidParameterValueException ex) { + } + } + + @Test + public void createIsolatedNtwkOffWithSpecifyIpRangesAndNoSourceNat() { + + Map> serviceProviderMap = new HashMap>(); + Set vrProvider = new HashSet(); + NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, false, + Availability.Optional, 200, serviceProviderMap, false, Network.GuestType.Isolated, false, + null, false, null, true, false); + assertNotNull("Isolated network offering with specifyIpRanges=true and with no sourceNatService, failed to create", off); + + } +} diff --git a/server/test/resources/createNetworkOffering.xml b/server/test/resources/createNetworkOffering.xml new file mode 100644 index 00000000000..f3faaa8a80a --- /dev/null +++ b/server/test/resources/createNetworkOffering.xml @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java b/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java index 6815b0d43bc..297e71118ad 100644 --- a/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java +++ b/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java @@ -53,7 +53,7 @@ public class ConsoleProxyHttpHandlerHelper { ConsoleProxyClientParam param = encryptor.decryptObject(ConsoleProxyClientParam.class, map.get("token")); // make sure we get information from token only - map.clear(); + guardUserInput(map); if(param != null) { if(param.getClientHostAddress() != null) map.put("host", param.getClientHostAddress()); @@ -72,9 +72,19 @@ public class ConsoleProxyHttpHandlerHelper { } } else { // we no longer accept information from parameter other than token - map.clear(); + guardUserInput(map); } return map; } + + private static void guardUserInput(Map map) { + map.remove("host"); + map.remove("port"); + map.remove("tag"); + map.remove("sid"); + map.remove("consoleurl"); + map.remove("sessionref"); + map.remove("ticket"); + } } diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index 865fbd3181c..60ad025c60e 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -261,7 +261,7 @@ CREATE TABLE `cloud`.`region` ( PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -INSERT INTO `cloud`.`region` values ('1','Local','http://localhost:8080/client/api'); +INSERT INTO `cloud`.`region` values ('1','Local','http://localhost:8080/client/'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.cpus', '40', 'The default maximum number of cpu cores that can be used for an account'); @@ -572,6 +572,7 @@ CREATE VIEW `cloud`.`user_vm_view` AS vpc.id vpc_id, vpc.uuid vpc_uuid, networks.uuid network_uuid, + networks.name network_name, networks.traffic_type traffic_type, networks.guest_type guest_type, user_ip_address.id public_ip_id, @@ -750,7 +751,7 @@ CREATE VIEW `cloud`.`domain_router_view` AS left join `cloud`.`networks` ON nics.network_id = networks.id left join - `cloud`.`vpc` ON networks.vpc_id = vpc.id + `cloud`.`vpc` ON domain_router.vpc_id = vpc.id left join `cloud`.`async_job` ON async_job.instance_id = vm_instance.id and async_job.instance_type = 'DomainRouter' diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql index ca15bdaf781..eb650cc9da1 100644 --- a/setup/db/db/schema-410to420.sql +++ b/setup/db/db/schema-410to420.sql @@ -27,7 +27,9 @@ UPDATE `cloud`.`hypervisor_capabilities` SET `max_hosts_per_cluster`=32 WHERE `h INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.1', 128, 0, 32); DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max'; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen'); +ALTER TABLE `cloud`.`load_balancer_vm_map` ADD state VARCHAR(40) NULL COMMENT 'service status updated by LB healthcheck manager'; +alter table storage_pool change storage_provider_id storage_provider_name varchar(255); alter table template_host_ref add state varchar(255); alter table template_host_ref add update_count bigint unsigned; alter table template_host_ref add updated datetime; @@ -69,13 +71,12 @@ CREATE TABLE `cloud`.`data_store_provider` ( CREATE TABLE `cloud`.`image_data_store` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', `name` varchar(255) NOT NULL COMMENT 'name of data store', - `image_provider_id` bigint unsigned NOT NULL COMMENT 'id of image_data_store_provider', + `image_provider_name` varchar(255) NOT NULL COMMENT 'id of image_data_store_provider', `protocol` varchar(255) NOT NULL COMMENT 'protocol of data store', `data_center_id` bigint unsigned COMMENT 'datacenter id of data store', `scope` varchar(255) COMMENT 'scope of data store', `uuid` varchar(255) COMMENT 'uuid of data store', - PRIMARY KEY(`id`), - CONSTRAINT `fk_tags__image_data_store_provider_id` FOREIGN KEY(`image_provider_id`) REFERENCES `data_store_provider`(`id`) + PRIMARY KEY(`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; ALTER TABLE `cloud`.`vm_template` ADD COLUMN `image_data_store_id` bigint unsigned; @@ -97,6 +98,21 @@ CREATE TABLE `vpc_service_map` ( UNIQUE (`vpc_id`, `service`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +CREATE TABLE `cloud`.`load_balancer_healthcheck_policies` ( + `id` bigint(20) NOT NULL auto_increment, + `uuid` varchar(40), + `load_balancer_id` bigint unsigned NOT NULL, + `pingpath` varchar(225) NULL DEFAULT '/', + `description` varchar(4096) NULL, + `response_time` int(11) DEFAULT 5, + `healthcheck_interval` int(11) DEFAULT 5, + `healthcheck_thresshold` int(11) DEFAULT 2, + `unhealth_thresshold` int(11) DEFAULT 10, + `revoke` tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 is when rule is set for Revoke', + PRIMARY KEY (`id`), + UNIQUE KEY `id_UNIQUE` (`id`), + CONSTRAINT `fk_load_balancer_healthcheck_policies_loadbalancer_id` FOREIGN KEY(`load_balancer_id`) REFERENCES `load_balancing_rules`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.instancename.flag', 'false', 'Append guest VM display Name (if set) to the internal name of the VM'); diff --git a/setup/db/templates.simulator.sql b/setup/db/templates.simulator.sql index 437e8f5357e..13246233c16 100755 --- a/setup/db/templates.simulator.sql +++ b/setup/db/templates.simulator.sql @@ -16,7 +16,7 @@ -- under the License. -INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type) - VALUES (10, UUID(), 'simulator-domR', 'SystemVM Template (simulator)', 0, now(), 'SYSTEM', 0, 64, 1, 'http://nfs1.lab.vmops.com/templates/routing/debian/latest/systemvm.vhd.bz2', '', 0, 'SystemVM Template (simulator)', 'VHD', 15, 0, 1, 'Simulator'); -INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type) - VALUES (11, UUID(), 'simulator-Centos', 'CentOS 5.3(64-bit) no GUI (Simulator)', 1, now(), 'BUILTIN', 0, 64, 1, 'http://nfs1.lab.vmops.com/templates/centos53-x86_64/latest/f59f18fb-ae94-4f97-afd2-f84755767aca.vhd.bz2', '', 0, 'CentOS 5.3(64-bit) no GUI (Simulator)', 'VHD', 11, 1, 1, 'Simulator'); +INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, image_data_store_id) + VALUES (10, UUID(), 'simulator-domR', 'SystemVM Template (simulator)', 0, now(), 'SYSTEM', 0, 64, 1, 'http://nfs1.lab.vmops.com/templates/routing/debian/latest/systemvm.vhd.bz2', '', 0, 'SystemVM Template (simulator)', 'VHD', 15, 0, 1, 'Simulator', 1); +INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, image_data_store_id) + VALUES (11, UUID(), 'simulator-Centos', 'CentOS 5.3(64-bit) no GUI (Simulator)', 1, now(), 'BUILTIN', 0, 64, 1, 'http://nfs1.lab.vmops.com/templates/centos53-x86_64/latest/f59f18fb-ae94-4f97-afd2-f84755767aca.vhd.bz2', '', 0, 'CentOS 5.3(64-bit) no GUI (Simulator)', 'VHD', 11, 1, 1, 'Simulator', 1); diff --git a/test/integration/component/test_project_usage.py b/test/integration/component/test_project_usage.py index 16d51068deb..9f0488d20ce 100644 --- a/test/integration/component/test_project_usage.py +++ b/test/integration/component/test_project_usage.py @@ -82,7 +82,7 @@ class Services: "iso": { "displaytext": "Test ISO", "name": "Test ISO", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "isextractable": True, "isfeatured": True, diff --git a/test/integration/component/test_usage.py b/test/integration/component/test_usage.py index 4251eab9555..82d13e5a9ff 100644 --- a/test/integration/component/test_usage.py +++ b/test/integration/component/test_usage.py @@ -78,7 +78,7 @@ class Services: "iso": { "displaytext": "Test ISO", "name": "Test ISO", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "isextractable": True, "isfeatured": True, diff --git a/test/integration/component/test_volumes.py b/test/integration/component/test_volumes.py index 0a7813065ae..bedf6efd8b4 100644 --- a/test/integration/component/test_volumes.py +++ b/test/integration/component/test_volumes.py @@ -77,7 +77,7 @@ class Services: { "displaytext": "Test ISO", "name": "testISO", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "ostype": 'CentOS 5.3 (64-bit)', }, diff --git a/test/integration/smoke/test_iso.py b/test/integration/smoke/test_iso.py index 8228a278cc9..5bd7bb358be 100644 --- a/test/integration/smoke/test_iso.py +++ b/test/integration/smoke/test_iso.py @@ -50,7 +50,7 @@ class Services: { "displaytext": "Test ISO 1", "name": "ISO 1", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "isextractable": True, "isfeatured": True, @@ -61,7 +61,7 @@ class Services: { "displaytext": "Test ISO 2", "name": "ISO 2", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "isextractable": True, "isfeatured": True, diff --git a/test/integration/smoke/test_nic.py b/test/integration/smoke/test_nic.py index b9dfdde8fe5..ad30122cd47 100644 --- a/test/integration/smoke/test_nic.py +++ b/test/integration/smoke/test_nic.py @@ -88,7 +88,7 @@ class Services: "iso": { "displaytext": "Test ISO", "name": "testISO", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "ostype": 'CentOS 5.3 (64-bit)', "mode": 'HTTP_DOWNLOAD', # Downloading existing ISO diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py index 8d65c00c896..0a5fbad8376 100644 --- a/test/integration/smoke/test_vm_life_cycle.py +++ b/test/integration/smoke/test_vm_life_cycle.py @@ -107,7 +107,7 @@ class Services: { "displaytext": "Test ISO", "name": "testISO", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "ostype": 'CentOS 5.3 (64-bit)', "mode": 'HTTP_DOWNLOAD', # Downloading existing ISO diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 6292c536a9d..ab2456dd3eb 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -95,6 +95,7 @@ known_categories = { 'InstanceGroup': 'VM Group', 'StorageMaintenance': 'Storage Pool', 'StoragePool': 'Storage Pool', + 'StorageProvider': 'Storage Pool', 'SecurityGroup': 'Security Group', 'SSH': 'SSH', 'register': 'Registration', @@ -123,6 +124,7 @@ known_categories = { 'Pool': 'Pool', 'VPC': 'VPC', 'PrivateGateway': 'VPC', + 'Simulator': 'simulator', 'StaticRoute': 'VPC', 'Tags': 'Resource tags', 'NiciraNvpDevice': 'Nicira NVP', diff --git a/tools/appliance/README.md b/tools/appliance/README.md index 559f79c6adb..bb28829a366 100644 --- a/tools/appliance/README.md +++ b/tools/appliance/README.md @@ -26,11 +26,11 @@ under the License. export PATH=~/.rvm/bin:$PATH - Install Ruby 1.9.3, if it installed some other version: rvm install 1.9.3 + - Install bundler: (if you get any openssl issue see https://rvm.io/packages/openssl) + gem install bundler All the dependencies will be fetched automatically. -Vagrant: https://github.com/chipchilders/vagrant.git - To save some time if you've downloaded iso of your distro, put the isos in: tools/appliance/iso/ diff --git a/tools/appliance/definitions/devcloud/base.sh b/tools/appliance/definitions/devcloud/base.sh new file mode 100644 index 00000000000..122b3893c92 --- /dev/null +++ b/tools/appliance/definitions/devcloud/base.sh @@ -0,0 +1,12 @@ +# Update the box +apt-get -y update +#below are needed for ruby perhaps +#apt-get -y install linux-headers-$(uname -r) build-essential +#apt-get -y install zlib1g-dev libssl-dev libreadline-gplv2-dev +apt-get -y install curl unzip +apt-get clean + +echo 'cloud ALL=NOPASSWD:ALL' > /etc/sudoers.d/cloud + +# Tweak sshd to prevent DNS resolution (speed up logins) +echo 'UseDNS no' >> /etc/ssh/sshd_config diff --git a/tools/appliance/definitions/devcloud/cleanup.sh b/tools/appliance/definitions/devcloud/cleanup.sh new file mode 100644 index 00000000000..9e98ab03531 --- /dev/null +++ b/tools/appliance/definitions/devcloud/cleanup.sh @@ -0,0 +1,21 @@ +# Clean up +#apt-get -y remove linux-headers-$(uname -r) build-essential +apt-get -y remove dictionaries-common busybox +apt-get -y autoremove +apt-get autoclean +apt-get clean + +# Removing leftover leases and persistent rules +echo "cleaning up dhcp leases" +rm /var/lib/dhcp/* + +# Make sure Udev doesn't block our network +echo "cleaning up udev rules" +rm /etc/udev/rules.d/70-persistent-net.rules +mkdir /etc/udev/rules.d/70-persistent-net.rules +rm -rf /dev/.udev/ +rm /lib/udev/rules.d/75-persistent-net-generator.rules + +echo "Adding a 2 sec delay to the interface up, to make the dhclient happy" +echo "pre-up sleep 2" >> /etc/network/interfaces + diff --git a/tools/appliance/definitions/devcloud/definition.rb b/tools/appliance/definitions/devcloud/definition.rb new file mode 100644 index 00000000000..d4f48ec1ce5 --- /dev/null +++ b/tools/appliance/definitions/devcloud/definition.rb @@ -0,0 +1,45 @@ +Veewee::Definition.declare({ + :cpu_count => '1', + :memory_size=> '2048', + :disk_size => '32000', :disk_format => 'VDI', :hostiocache => 'off', + :os_type_id => 'Debian', + :iso_file => "debian-wheezy-DI-rc1-i386-netinst.iso", + :iso_src => "http://cdimage.debian.org/cdimage/wheezy_di_rc1/i386/iso-cd/debian-wheezy-DI-rc1-i386-netinst.iso", + :iso_md5 => "db12ca9554bb8f121c98e268682a55d0", + :iso_download_timeout => "1000", + :boot_wait => "10", :boot_cmd_sequence => [ + '', + 'install ', + 'preseed/url=http://%IP%:%PORT%/preseed.cfg ', + 'debian-installer=en_US ', + 'auto ', + 'locale=en_US ', + 'kbd-chooser/method=us ', + 'netcfg/get_hostname=systemvm ', + 'netcfg/get_domain=apache.org ', + 'fb=false ', + 'debconf/frontend=noninteractive ', + 'console-setup/ask_detect=false ', + 'console-keymaps-at/keymap=us ', + 'keyboard-configuration/xkb-keymap=us ', + '' + ], + :kickstart_port => "7122", + :kickstart_timeout => "10000", + :kickstart_file => "preseed.cfg", + :ssh_login_timeout => "10000", + :ssh_user => "root", + :ssh_password => "password", + :ssh_key => "", + :ssh_host_port => "7222", + :ssh_guest_port => "22", + :sudo_cmd => "echo '%p'|sudo -S sh '%f'", + :shutdown_cmd => "halt -p", + :postinstall_files => [ + "base.sh", + "postinstall.sh", + "cleanup.sh", + "zerodisk.sh" + ], + :postinstall_timeout => "10000" +}) diff --git a/tools/appliance/definitions/devcloud/postinstall.sh b/tools/appliance/definitions/devcloud/postinstall.sh new file mode 100644 index 00000000000..9ec1240d369 --- /dev/null +++ b/tools/appliance/definitions/devcloud/postinstall.sh @@ -0,0 +1,60 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -x + +install_packages() { + DEBIAN_FRONTEND=noninteractive + DEBIAN_PRIORITY=critical + + # utlities + apt-get --no-install-recommends -q -y --force-yes install python bzip2 sed gawk diffutils grep gzip less tar telnet wget zip unzip sudo + + # dev tools, ssh, nfs + apt-get --no-install-recommends -q -y --force-yes install git vim tcpdump ebtables iptables openssl openssh-server openjdk-6-jdk genisoimage python-pip nfs-kernel-server + + # mysql with root password=password + debconf-set-selections <<< 'mysql-server- mysql-server/root_password password password' + debconf-set-selections <<< 'mysql-server- mysql-server/root_password_again password password' + apt-get --no-install-recommends -q -y --force-yes install mysql-server + + # xen and xcp + apt-get --no-install-recommends -q -y --force-yes install linux-headers-3.2.0-4-686-pae xen-hypervisor-4.1-i386 xcp-xapi xcp-xe xcp-guest-templates xcp-vncterm xen-tools blktap-utils blktap-dkms qemu-keymaps qemu-utils + +} + +fix_locale() { + cat >> /etc/default/locale << EOF +LANG=en_US.UTF-8 +LC_ALL=en_US.UTF-8 +EOF + cat >> /etc/locale.gen << EOF +en_US.UTF-8 UTF-8 +EOF + + locale-gen en_US.UTF-8 +} + +begin=$(date +%s) + +install_packages +fix_locale + +fin=$(date +%s) +t=$((fin-begin)) + +echo "DevCloud baked in $t seconds" diff --git a/tools/appliance/definitions/devcloud/preseed.cfg b/tools/appliance/definitions/devcloud/preseed.cfg new file mode 100644 index 00000000000..ac9edd31213 --- /dev/null +++ b/tools/appliance/definitions/devcloud/preseed.cfg @@ -0,0 +1,357 @@ +#### Contents of the preconfiguration file (for squeeze) +### Localization +# Locale sets language and country. +d-i debian-installer/locale string en_US + +# Keyboard selection. +#d-i console-tools/archs select at +d-i console-keymaps-at/keymap select us +# Example for a different keyboard architecture +#d-i console-keymaps-usb/keymap select mac-usb-us + +### Network configuration +# netcfg will choose an interface that has link if possible. This makes it +# skip displaying a list if there is more than one interface. +d-i netcfg/choose_interface select auto + +# To pick a particular interface instead: +#d-i netcfg/choose_interface select eth1 + +# If you have a slow dhcp server and the installer times out waiting for +# it, this might be useful. +#d-i netcfg/dhcp_timeout string 60 + +# If you prefer to configure the network manually, uncomment this line and +# the static network configuration below. +#d-i netcfg/disable_dhcp boolean true + +# If you want the preconfiguration file to work on systems both with and +# without a dhcp server, uncomment these lines and the static network +# configuration below. +#d-i netcfg/dhcp_failed note +#d-i netcfg/dhcp_options select Configure network manually + +# Static network configuration. +#d-i netcfg/get_nameservers string 192.168.1.1 +#d-i netcfg/get_ipaddress string 192.168.1.42 +#d-i netcfg/get_netmask string 255.255.255.0 +#d-i netcfg/get_gateway string 192.168.1.1 +#d-i netcfg/confirm_static boolean true + +# Any hostname and domain names assigned from dhcp take precedence over +# values set here. However, setting the values still prevents the questions +# from being shown, even if values come from dhcp. +d-i netcfg/get_hostname string systemvm +d-i netcfg/get_domain string cloudstack.org + +# Disable that annoying WEP key dialog. +d-i netcfg/wireless_wep string +# The wacky dhcp hostname that some ISPs use as a password of sorts. +#d-i netcfg/dhcp_hostname string radish + +# If non-free firmware is needed for the network or other hardware, you can +# configure the installer to always try to load it, without prompting. Or +# change to false to disable asking. +#d-i hw-detect/load_firmware boolean true + +### Network console +# Use the following settings if you wish to make use of the network-console +# component for remote installation over SSH. This only makes sense if you +# intend to perform the remainder of the installation manually. +#d-i anna/choose_modules string network-console +#d-i network-console/password password r00tme +#d-i network-console/password-again password r00tme + +### Mirror settings +# If you select ftp, the mirror/country string does not need to be set. +#d-i mirror/protocol string ftp +d-i mirror/country string manual +d-i mirror/http/hostname string http.us.debian.org +d-i mirror/http/directory string /debian +d-i mirror/http/proxy string + +# Suite to install. +#d-i mirror/suite string testing +# Suite to use for loading installer components (optional). +#d-i mirror/udeb/suite string testing + +### Clock and time zone setup +# Controls whether or not the hardware clock is set to UTC. +d-i clock-setup/utc boolean true + +# You may set this to any valid setting for $TZ; see the contents of +# /usr/share/zoneinfo/ for valid values. +d-i time/zone string UTC + +# Controls whether to use NTP to set the clock during the install +d-i clock-setup/ntp boolean true +# NTP server to use. The default is almost always fine here. +#d-i clock-setup/ntp-server string ntp.example.com + +### Partitioning +# If the system has free space you can choose to only partition that space. +#d-i partman-auto/init_automatically_partition select biggest_free + +# Alternatively, you can specify a disk to partition. The device name must +# be given in traditional non-devfs format. +# Note: A disk must be specified, unless the system has only one disk. +# For example, to use the first SCSI/SATA hard disk: +d-i partman-auto/disk string /dev/sda +# In addition, you'll need to specify the method to use. +# The presently available methods are: "regular", "lvm" and "crypto" +d-i partman-auto/method string regular + +# If one of the disks that are going to be automatically partitioned +# contains an old LVM configuration, the user will normally receive a +# warning. This can be preseeded away... +#d-i partman-lvm/device_remove_lvm boolean true +# The same applies to pre-existing software RAID array: +#d-i partman-md/device_remove_md boolean true + +# And the same goes for the confirmation to write the lvm partitions. +#d-i partman-lvm/confirm boolean true +#d-i partman-lvm/confirm_nooverwrite boolean true + +#d-i partman/choose_partition select finish +#d-i partman-auto-lvm/guided_size string max + +# You can choose one of the three predefined partitioning recipes: +# - atomic: all files in one partition +# - home: separate /home partition +# - multi: separate /home, /usr, /var, and /tmp partitions +d-i partman-auto/choose_recipe select atomic +#d-i partman/default_filesystem string ext3 + +# Or provide a recipe of your own... +# The recipe format is documented in the file devel/partman-auto-recipe.txt. +# If you have a way to get a recipe file into the d-i environment, you can +# just point at it. +#d-i partman-auto/expert_recipe_file string /hd-media/recipe + +d-i partman-auto/expert_recipe string \ + boot-root :: \ + 40 50 100 ext4 \ + $primary{ } $bootable{ } \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /boot } \ + . \ + 400 40 500 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ / } \ + . \ + 60 100 200 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /home } \ + . \ + 500 30 1000 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /usr } \ + . \ + 400 40 500 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /opt } \ + . \ + 500 60 1000 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /var } \ + . \ + 100 70 400 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /tmp } \ + . \ + 64 512 300% linux-swap \ + method{ swap } format{ } \ + . + +# If not, you can put an entire recipe into the preconfiguration file in one +# (logical) line. This example creates a small /boot partition, suitable +# swap, and uses the rest of the space for the root partition: +#d-i partman-auto/expert_recipe string \ +# boot-root :: \ +# 40 50 100 ext3 \ +# $primary{ } $bootable{ } \ +# method{ format } format{ } \ +# use_filesystem{ } filesystem{ ext3 } \ +# mountpoint{ /boot } \ +# . \ +# 500 10000 1000000000 ext3 \ +# method{ format } format{ } \ +# use_filesystem{ } filesystem{ ext3 } \ +# mountpoint{ / } \ +# . \ +# 64 512 300% linux-swap \ +# method{ swap } format{ } \ +# . + +#The preseed line that "selects finish" needs to be in a certain order in your preseed, the example-preseed does not follow this. +#http://ubuntuforums.org/archive/index.php/t-1504045.html + +# This makes partman automatically partition without confirmation, provided +# that you told it what to do using one of the methods above. +#d-i partman-partitioning/confirm_write_new_label boolean true +d-i partman/confirm_write_new_label boolean true +d-i partman/choose_partition select finish +d-i partman/confirm boolean true +d-i partman/confirm_nooverwrite boolean true + +### Base system installation +# Select the initramfs generator used to generate the initrd for 2.6 kernels. +#d-i base-installer/kernel/linux/initramfs-generators string yaird + +# The kernel image (meta) package to be installed; "none" can be used if no +# kernel is to be installed. +#d-i base-installer/kernel/image string linux-image-2.6-486 + +### Account setup +# Skip creation of a root account (normal user account will be able to +# use sudo). +d-i passwd/root-login boolean true +# Alternatively, to skip creation of a normal user account. +#d-i passwd/make-user boolean false + +# Root password, either in clear text +d-i passwd/root-password password password +d-i passwd/root-password-again password password +# or encrypted using an MD5 hash. +#d-i passwd/root-password-crypted password [MD5 hash] + +# To create a normal user account. +d-i passwd/user-fullname string Cloud Stack +d-i passwd/username string cloud +# Normal user's password, either in clear text +d-i passwd/user-password password cloud +d-i passwd/user-password-again password cloud +# or encrypted using an MD5 hash. +#d-i passwd/user-password-crypted password [MD5 hash] +# Create the first user with the specified UID instead of the default. +#d-i passwd/user-uid string 1010 +d-i user-setup/encrypt-home boolean false +d-i user-setup/allow-password-weak boolean true + +# The user account will be added to some standard initial groups. To +# override that, use this. +d-i passwd/user-default-groups string audio cdrom video admin + +### Apt setup +# You can choose to install non-free and contrib software. +#d-i apt-setup/non-free boolean true +#d-i apt-setup/contrib boolean true +# Uncomment this if you don't want to use a network mirror. +#d-i apt-setup/use_mirror boolean false +# Select which update services to use; define the mirrors to be used. +# Values shown below are the normal defaults. +#d-i apt-setup/services-select multiselect security, volatile +#d-i apt-setup/security_host string security.debian.org +#d-i apt-setup/volatile_host string volatile.debian.org + + +# By default the installer requires that repositories be authenticated +# using a known gpg key. This setting can be used to disable that +# authentication. Warning: Insecure, not recommended. +#d-i debian-installer/allow_unauthenticated string true + +### Package selection +tasksel tasksel/first multiselect ssh-server +# If the desktop task is selected, install the kde and xfce desktops +# instead of the default gnome desktop. +#tasksel tasksel/desktop multiselect kde, xfce + +# Individual additional packages to install +d-i pkgsel/include string openssh-server ntp acpid sudo bzip2 + +# Whether to upgrade packages after debootstrap. +# Allowed values: none, safe-upgrade, full-upgrade +d-i pkgsel/upgrade select none + +# Some versions of the installer can report back on what software you have +# installed, and what software you use. The default is not to report back, +# but sending reports helps the project determine what software is most +# popular and include it on CDs. +popularity-contest popularity-contest/participate boolean false + +### Boot loader installation +# Grub is the default boot loader (for x86). If you want lilo installed +# instead, uncomment this: +#d-i grub-installer/skip boolean true +# To also skip installing lilo, and install no bootloader, uncomment this +# too: +#d-i lilo-installer/skip boolean true + +# This is fairly safe to set, it makes grub install automatically to the MBR +# if no other operating system is detected on the machine. +d-i grub-installer/only_debian boolean true + +# This one makes grub-installer install to the MBR if it also finds some other +# OS, which is less safe as it might not be able to boot that other OS. +#d-i grub-installer/with_other_os boolean true + +# Alternatively, if you want to install to a location other than the mbr, +# uncomment and edit these lines: +#d-i grub-installer/only_debian boolean false +#d-i grub-installer/with_other_os boolean false +#d-i grub-installer/bootdev string (hd0,0) +# To install grub to multiple disks: +#d-i grub-installer/bootdev string (hd0,0) (hd1,0) (hd2,0) + +# Optional password for grub, either in clear text +#d-i grub-installer/password password r00tme +#d-i grub-installer/password-again password r00tme +# or encrypted using an MD5 hash, see grub-md5-crypt(8). +#d-i grub-installer/password-crypted password [MD5 hash] + +### Finishing up the installation +# During installations from serial console, the regular virtual consoles +# (VT1-VT6) are normally disabled in /etc/inittab. Uncomment the next +# line to prevent this. +#d-i finish-install/keep-consoles boolean true + +# Avoid that last message about the install being complete. +d-i finish-install/reboot_in_progress note + +# This will prevent the installer from ejecting the CD during the reboot, +# which is useful in some situations. +#d-i cdrom-detect/eject boolean false + +# This is how to make the installer shutdown when finished, but not +# reboot into the installed system. +#d-i debian-installer/exit/halt boolean true +# This will power off the machine instead of just halting it. +#d-i debian-installer/exit/poweroff boolean true + +### Preseeding other packages +# Depending on what software you choose to install, or if things go wrong +# during the installation process, it's possible that other questions may +# be asked. You can preseed those too, of course. To get a list of every +# possible question that could be asked during an install, do an +# installation, and then run these commands: +# debconf-get-selections --installer > file +# debconf-get-selections >> file + + +#### Advanced options +### Running custom commands during the installation +# d-i preseeding is inherently not secure. Nothing in the installer checks +# for attempts at buffer overflows or other exploits of the values of a +# preconfiguration file like this one. Only use preconfiguration files from +# trusted locations! To drive that home, and because it's generally useful, +# here's a way to run any shell command you'd like inside the installer, +# automatically. + +# This first command is run as early as possible, just after +# preseeding is read. +# Prevent packaged version of VirtualBox Guest Additions being installed: +d-i preseed/early_command string sed -i \ + '/in-target/idiscover(){/sbin/discover|grep -v VirtualBox;}' \ + /usr/lib/pre-pkgsel.d/20install-hwpackages + +# This command is run just before the install finishes, but when there is +# still a usable /target directory. You can chroot to /target and use it +# directly, or use the apt-install and in-target commands to easily install +# packages and run commands in the target system. diff --git a/tools/appliance/definitions/devcloud/zerodisk.sh b/tools/appliance/definitions/devcloud/zerodisk.sh new file mode 100644 index 00000000000..a70d3e6ee89 --- /dev/null +++ b/tools/appliance/definitions/devcloud/zerodisk.sh @@ -0,0 +1,11 @@ +# Clean up stuff copied in by veewee +rm -fv /root/*.iso +rm -fv /root/base.sh /root/cleanup.sh /root/postinstall.sh /root/zerodisk.sh +rm -fv .veewee_version .veewee_params .vbox_version + +echo "Cleaning up" + +# Zero out the free space to save space in the final image: +dd if=/dev/zero of=/zero bs=1M +sync +rm -fv /zero diff --git a/tools/appliance/definitions/systemvmtemplate/postinstall.sh b/tools/appliance/definitions/systemvmtemplate/postinstall.sh index 8e745eb1ba9..5d529de038a 100644 --- a/tools/appliance/definitions/systemvmtemplate/postinstall.sh +++ b/tools/appliance/definitions/systemvmtemplate/postinstall.sh @@ -168,8 +168,8 @@ configure_services() { mkdir -p /var/lib/haproxy # Get config files from master - snapshot_url="https://git-wip-us.apache.org/repos/asf?p=incubator-cloudstack.git;a=snapshot;h=HEAD;sf=tgz" - snapshot_dir="/opt/incubator-cloudstack*" + snapshot_url="https://git-wip-us.apache.org/repos/asf?p=cloudstack.git;a=snapshot;h=HEAD;sf=tgz" + snapshot_dir="/opt/cloudstack*" cd /opt wget $snapshot_url -O cloudstack.tar.gz tar -zxvf cloudstack.tar.gz diff --git a/tools/appliance/definitions/systemvmtemplate/zerodisk.sh b/tools/appliance/definitions/systemvmtemplate/zerodisk.sh index 25bd8c4af2d..b00f7ae7ccc 100644 --- a/tools/appliance/definitions/systemvmtemplate/zerodisk.sh +++ b/tools/appliance/definitions/systemvmtemplate/zerodisk.sh @@ -6,7 +6,7 @@ rm -fv .veewee_version .veewee_params .vbox_version echo "Cleaning up" # Zero out the free space to save space in the final image: -for path in / /boot /usr /var /opt /tmp +for path in / /boot /usr /var /opt /tmp /home do dd if=/dev/zero of=$path/zero bs=1M sync diff --git a/tools/appliance/definitions/systemvmtemplate64/postinstall.sh b/tools/appliance/definitions/systemvmtemplate64/postinstall.sh index 8e745eb1ba9..5d529de038a 100644 --- a/tools/appliance/definitions/systemvmtemplate64/postinstall.sh +++ b/tools/appliance/definitions/systemvmtemplate64/postinstall.sh @@ -168,8 +168,8 @@ configure_services() { mkdir -p /var/lib/haproxy # Get config files from master - snapshot_url="https://git-wip-us.apache.org/repos/asf?p=incubator-cloudstack.git;a=snapshot;h=HEAD;sf=tgz" - snapshot_dir="/opt/incubator-cloudstack*" + snapshot_url="https://git-wip-us.apache.org/repos/asf?p=cloudstack.git;a=snapshot;h=HEAD;sf=tgz" + snapshot_dir="/opt/cloudstack*" cd /opt wget $snapshot_url -O cloudstack.tar.gz tar -zxvf cloudstack.tar.gz diff --git a/tools/cli/cloudmonkey/__init__.py b/tools/cli/cloudmonkey/__init__.py index e4c4e6d24f1..cf689e79480 100644 --- a/tools/cli/cloudmonkey/__init__.py +++ b/tools/cli/cloudmonkey/__init__.py @@ -16,6 +16,8 @@ # under the License. try: - from config import __version__ + from config import __version__, __description__ + from config import __maintainer__, __maintaineremail__ + from config import __project__, __projecturl__, __projectemail__ except ImportError, e: print e diff --git a/tools/cli/cloudmonkey/cachemaker.py b/tools/cli/cloudmonkey/cachemaker.py index 42a077ad928..a625b014d38 100644 --- a/tools/cli/cloudmonkey/cachemaker.py +++ b/tools/cli/cloudmonkey/cachemaker.py @@ -21,7 +21,7 @@ try: import os import types - from config import cache_file + from config import config_fields except ImportError, e: import sys print "ImportError", e @@ -100,7 +100,11 @@ def monkeycache(apis): cache['count'] = getvalue(apis[responsekey], 'count') cache['asyncapis'] = [] - for api in getvalue(apis[responsekey], 'api'): + apilist = getvalue(apis[responsekey], 'api') + if apilist == None: + print "[monkeycache] Server response issue, no apis found" + + for api in apilist: name = getvalue(api, 'name') verb, subject = splitverbsubject(name) @@ -168,6 +172,7 @@ def main(json_file): f.close() if __name__ == "__main__": + cache_file = config_fields['core']['cache_file'] print "[cachemaker] Pre-caching using user's cloudmonkey cache", cache_file if os.path.exists(cache_file): main(cache_file) diff --git a/tools/cli/cloudmonkey/cloudmonkey.py b/tools/cli/cloudmonkey/cloudmonkey.py index 25422412613..94006c9577a 100644 --- a/tools/cli/cloudmonkey/cloudmonkey.py +++ b/tools/cli/cloudmonkey/cloudmonkey.py @@ -29,8 +29,9 @@ try: import types from cachemaker import loadcache, savecache, monkeycache, splitverbsubject - from config import __version__, cache_file - from config import read_config, write_config + from config import __version__, __description__, __projecturl__ + from config import read_config, write_config, config_file + from optparse import OptionParser from prettytable import PrettyTable from printer import monkeyprint from requester import monkeyrequest @@ -63,13 +64,14 @@ class CloudMonkeyShell(cmd.Cmd, object): intro = ("☁ Apache CloudStack 🐵 cloudmonkey " + __version__ + ". Type help or ? to list commands.\n") ruler = "=" - cache_file = cache_file config_options = [] verbs = [] - def __init__(self, pname): + def __init__(self, pname, cfile): self.program_name = pname - self.config_options = read_config(self.get_attr, self.set_attr) + self.config_file = cfile + self.config_options = read_config(self.get_attr, self.set_attr, + self.config_file) self.loadcache() self.prompt = self.prompt.strip() + " " # Cosmetic fix for prompt @@ -364,7 +366,7 @@ class CloudMonkeyShell(cmd.Cmd, object): key, value = (args[0], args[2]) setattr(self, key, value) # keys and attributes should have same names self.prompt = self.prompt.strip() + " " # prompt fix - write_config(self.get_attr) + write_config(self.get_attr, self.config_file) def complete_set(self, text, line, begidx, endidx): mline = line.partition(" ")[2] @@ -458,10 +460,38 @@ class CloudMonkeyShell(cmd.Cmd, object): return self.do_EOF(args) +class MonkeyParser(OptionParser): + def format_help(self, formatter=None): + if formatter is None: + formatter = self.formatter + result = [] + if self.usage: + result.append("Usage: cloudmonkey [options] [cmds] [params]\n\n") + if self.description: + result.append(self.format_description(formatter) + "\n") + result.append(self.format_option_help(formatter)) + result.append("\nTry cloudmonkey [help|?]\n") + return "".join(result) + + def main(): - shell = CloudMonkeyShell(sys.argv[0]) - if len(sys.argv) > 1: - shell.onecmd(' '.join(sys.argv[1:])) + parser = MonkeyParser() + parser.add_option("-c", "--config-file", + dest="cfile", default=config_file, + help="config file for cloudmonkey", metavar="FILE") + parser.add_option("-v", "--version", + action="store_true", dest="version", default=False, + help="prints cloudmonkey version information") + + (options, args) = parser.parse_args() + if options.version: + print "cloudmonkey", __version__ + print __description__, "(%s)" % __projecturl__ + sys.exit(0) + + shell = CloudMonkeyShell(sys.argv[0], options.cfile) + if len(args) > 0: + shell.onecmd(' '.join(args)) else: shell.cmdloop() diff --git a/tools/cli/cloudmonkey/config.py b/tools/cli/cloudmonkey/config.py index 6a5feab8d12..75605df93b7 100644 --- a/tools/cli/cloudmonkey/config.py +++ b/tools/cli/cloudmonkey/config.py @@ -19,6 +19,12 @@ # Use following rules for versioning: # - __version__ = "4.1.0-0" +__description__ = "Command Line Interface for Apache CloudStack" +__maintainer__ = "Rohit Yadav" +__maintaineremail__ = "bhaisaab@apache.org" +__project__ = "The Apache CloudStack Team" +__projectemail__ = "dev@cloudstack.apache.org" +__projecturl__ = "http://cloudstack.apache.org" try: import os @@ -36,14 +42,14 @@ iterable_type = ['set', 'list', 'object'] config_dir = expanduser('~/.cloudmonkey') config_file = expanduser(config_dir + '/config') -cache_file = expanduser(config_dir + '/cache') # cloudmonkey config fields -config_fields = {'core': {}, 'ui': {}, 'server': {}, 'user': {}} +config_fields = {'core': {}, 'server': {}, 'user': {}, 'ui': {}} # core config_fields['core']['asyncblock'] = 'true' config_fields['core']['paramcompletion'] = 'false' +config_fields['core']['cache_file'] = expanduser(config_dir + '/cache') config_fields['core']['history_file'] = expanduser(config_dir + '/history') config_fields['core']['log_file'] = expanduser(config_dir + '/log') @@ -64,8 +70,8 @@ config_fields['user']['apikey'] = '' config_fields['user']['secretkey'] = '' -def write_config(get_attr, first_time=False): - global config_fields, config_file +def write_config(get_attr, config_file, first_time=False): + global config_fields config = ConfigParser() for section in config_fields.keys(): config.add_section(section) @@ -79,8 +85,8 @@ def write_config(get_attr, first_time=False): return config -def read_config(get_attr, set_attr): - global config_fields, config_dir, config_file +def read_config(get_attr, set_attr, config_file): + global config_fields, config_dir if not os.path.exists(config_dir): os.makedirs(config_dir) @@ -95,7 +101,7 @@ def read_config(get_attr, set_attr): except IOError, e: print "Error: config_file not found", e else: - config = write_config(get_attr, True) + config = write_config(get_attr, config_file, True) print "Welcome! Using `set` configure the necessary settings:" print " ".join(sorted(config_options)) print "Config file:", config_file diff --git a/tools/cli/setup.py b/tools/cli/setup.py index 9624115ed5f..4c7b2978b2f 100644 --- a/tools/cli/setup.py +++ b/tools/cli/setup.py @@ -22,13 +22,9 @@ except ImportError: use_setuptools() from setuptools import setup, find_packages -from cloudmonkey import __version__ - -name = 'cloudmonkey' -version = __version__ -requires = ['Pygments>=1.5', - 'prettytable>=0.6', - ] +from cloudmonkey import __version__, __description__ +from cloudmonkey import __maintainer__, __maintaineremail__ +from cloudmonkey import __project__, __projecturl__, __projectemail__ try: import readline @@ -36,20 +32,22 @@ except ImportError: requires.append('readline') setup( - name = name, - version = version, - author = "The Apache CloudStack Team", - author_email = "cloudstack-dev@incubator.apache.org", - maintainer = "Rohit Yadav", - maintainer_email = "bhaisaab@apache.org", - url = "http://incubator.apache.org/cloudstack", - description = "Command Line Interface for Apache CloudStack", - long_description = "cloudmonkey is a command line interface for Apache " - "CloudStack powered by CloudStack Marvin", + name = 'cloudmonkey', + version = __version__, + author = __project__, + author_email = __projectemail__, + maintainer = __maintainer__, + maintainer_email = __maintaineremail__, + url = __projecturl__, + description = __description__, + long_description = "cloudmonkey is a CLI for Apache CloudStack", platforms = ("Any",), license = 'ASL 2.0', packages = find_packages(), - install_requires = requires, + install_requires = [ + 'Pygments>=1.5', + 'prettytable>=0.6', + ], include_package_data = True, zip_safe = False, classifiers = [ diff --git a/tools/marvin/marvin/sandbox/demo/simulator/simulator.cfg b/tools/marvin/marvin/sandbox/demo/simulator/simulator.cfg index 7c733ade256..ca794605540 100644 --- a/tools/marvin/marvin/sandbox/demo/simulator/simulator.cfg +++ b/tools/marvin/marvin/sandbox/demo/simulator/simulator.cfg @@ -112,11 +112,11 @@ "logger": [ { "name": "TestClient", - "file": "/var/log/testclient.log" + "file": "/tmp/testclient.log" }, { "name": "TestCase", - "file": "/var/log/testcase.log" + "file": "/tmp/testcase.log" } ], "globalConfig": [ diff --git a/tools/marvin/pom.xml b/tools/marvin/pom.xml index 80099be1ecb..a3bd5460fd5 100644 --- a/tools/marvin/pom.xml +++ b/tools/marvin/pom.xml @@ -9,112 +9,119 @@ OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> - 4.0.0 - cloud-marvin - Apache CloudStack marvin - pom - - org.apache.cloudstack - cloud-tools - 4.2.0-SNAPSHOT - ../pom.xml - - - install - - - maven-antrun-plugin - 1.7 - - - clean - clean - - run - - - - - Deleting ${project.artifactId} API sources - - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - compile - compile - - exec - - - ${basedir}/marvin - python - - codegenerator.py - -s - ${basedir}/../apidoc/target/commands.xml - Generating ${project.artifactId} API classes} - - - - - package - package - - exec - - - ${exec.workingdir} - python - - setup.py - sdist - - - - + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + 4.0.0 + cloud-marvin + Apache CloudStack marvin + pom + + org.apache.cloudstack + cloud-tools + 4.2.0-SNAPSHOT + ../pom.xml + + + install + + + maven-antrun-plugin + 1.7 + + + clean + clean + + run + + + + + Deleting ${project.artifactId} API sources + + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + compile + compile + + exec + + + ${basedir}/marvin + python + + codegenerator.py + -s + ${basedir}/../apidoc/target/commands.xml + Generating ${project.artifactId} API classes} + + + + + package + package + + exec + + + ${exec.workingdir} + python + + setup.py + sdist + + + + - + - - - - marvin - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - package - - exec - - - - - ${basedir}/marvin - python - - deployDataCenter.py - -i - ${user.dir}/${marvin.config} - - - - - - - + + + + marvin + + marvin.config + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + ${basedir}/marvin + python + + deployAndRun.py + -c + ${user.dir}/${marvin.config} + -t + /tmp/t.log + -r + /tmp/r.log + -f + ${basedir}/marvin/testSetupSuccess.py + + + + + test + + exec + + + + + + + + diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index 7381715d693..9b77b9f3a4f 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -467,6 +467,13 @@ div.list-view div.toolbar div.section-switcher div.section-select label { opacity: 0.7; } +.loading-overlay span { + display: block; + text-align: center; + margin: 155px 0 0 5px; + color: #4B4B4B; +} + .detail-view .ui-tabs-panel .loading-overlay { background-position: 50% 250px; } @@ -1766,6 +1773,37 @@ div.list-view td.state.off span { background-position: 100% -431px; } +/*List-view: subselect dropdown*/ +.list-view .subselect { + width: 116px; + display: block; + float: left; + background: url(../images/bg-gradients.png) 0px -42px; + padding: 0; + margin: 8px 0 1px 7px; + clear: both; + border: 1px solid #A8A7A7; + /*+border-radius:4px;*/ + -moz-border-radius: 4px; + -webkit-border-radius: 4px; + -khtml-border-radius: 4px; + border-radius: 4px; +} + +.list-view .subselect span { + margin: 4px 0 0 12px; +} + +.list-view .subselect select { + width: 85%; + margin: 5px 0 4px; + font-size: 10px; +} + +.detail-group .main-groups table td.value .view-all:hover { + background-position: 100% -431px; +} + .panel.always-maximized .detail-group .main-groups table td.value span { width: 565px; } @@ -7743,6 +7781,23 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t margin: 0 22px 0 0; } +/** Fix long table overflow*/ +.detail-view .multi-edit { + width: 100%; +} + +.detail-view .multi-edit table { + width: 97%; + max-width: inherit; +} + +.detail-view .multi-edit table tr th, +.detail-view .multi-edit table tr td { + width: 87px !important; + min-width: 87px !important; + max-width: 87px !important; +} + /** Header fields*/ .multi-edit .header-fields { position: relative; @@ -10561,6 +10616,37 @@ div.ui-dialog div.acl div.multi-edit div.data div.data-body div.data-item table width: 65px; } +/*HEALTH CHECK*/ +.ui-dialog .health-check { + height: 295px !important; + padding-bottom: 93px; +} + +div.ui-dialog div.health-check div.health-check-description { + color: #808080; +} + +div.ui-dialog div.health-check div.form-container form div.form-item { + width: 58% margin-left:116px; + margin-top: -16px; + margin-bottom: 30px; +} + +div.ui-dialog div.health-check div.health-check-config-title { + float: left; + color: #808080; + font-size: 17px; + margin-left: 15px; +} + +div.ui-dialog div.health-check div.health-check-advanced-title { + float: left; + color: #808080; + font-size: 17px; + margin-left: 15px; + margin-top: -70px; +} + /*Autoscaler*/ .ui-dialog div.autoscaler { overflow: auto; diff --git a/ui/dictionary.jsp b/ui/dictionary.jsp index 6c06a10ed98..cd8d7323300 100644 --- a/ui/dictionary.jsp +++ b/ui/dictionary.jsp @@ -25,6 +25,7 @@ under the License. <% long now = System.currentTimeMillis(); %> + diff --git a/ui/scripts/accounts.js b/ui/scripts/accounts.js index 3727f8ee0b1..3403337a834 100644 --- a/ui/scripts/accounts.js +++ b/ui/scripts/accounts.js @@ -112,32 +112,7 @@ label: 'label.last.name', validation: { required: true }, docID: 'helpAccountLastName' - }, - regionid: { - label: 'label.region', - select: function(args) { - $.ajax({ - url: createURL('listRegions&listAll=true'), - success: function(json) { - var regions = json.listregionsresponse.region; - var regionOptions; - - if (!regions) { - regionOptions = [{ id: 0, description: '0 - Default' }]; - } else { - regionOptions = $(regions).map(function(index, region) { - return { - id: region.id, - description: region.id + ' - ' + region.name - }; - }); - } - - args.response.success({ data: regionOptions }); - } - }); - } - }, + }, domainid: { label: 'label.domain', docID: 'helpAccountDomain', @@ -220,8 +195,7 @@ $.extend(data, { email: args.data.email, firstname: args.data.firstname, - lastname: args.data.lastname, - regionid: args.data.regionid, + lastname: args.data.lastname, domainid: args.data.domainid }); @@ -316,8 +290,7 @@ domainid: accountObj.domainid, account: accountObj.name, newname: args.data.name, - networkdomain: args.data.networkdomain, - regionid: accountObj.regionid ? accountObj.regionid : 0 + networkdomain: args.data.networkdomain }; $.ajax({ @@ -660,8 +633,7 @@ converter: function(args){ return cloudStack.converters.toRole(args); } - }, - regionid: { label: 'label.region' }, + }, domain: { label: 'label.domain' }, state: { label: 'label.state' }, networkdomain: { diff --git a/ui/scripts/cloud.core.callbacks.js b/ui/scripts/cloud.core.callbacks.js index 857c247d9d1..1a9e0456d0b 100644 --- a/ui/scripts/cloud.core.callbacks.js +++ b/ui/scripts/cloud.core.callbacks.js @@ -52,17 +52,29 @@ Below is a sample login attempt var clientApiUrl = "/client/api"; var clientConsoleUrl = "/client/console"; -$(document).ready(function() { +$(document).ready(function() { + /* + condition 1: If window.location.href contains parameter 'loginUrl', save the parameter's value to a cookie, then reload the page without any URL parameter. + (After the page is reloaded without any URL parameter, it will fall in condition 2.) + */ + if ($.urlParam('loginUrl') != 0) { + $.cookie('loginUrl', $.urlParam('loginUrl'), { expires: 1}); + document.location.href = window.location.href.substring(0, window.location.href.indexOf('?')); + } - var url = $.urlParam("loginUrl"); - if (url != undefined && url != null && url.length > 0) { - url = unescape(clientApiUrl+"?"+url); + /* + condition 2: If window.location.href does not contain parameter 'loginUrl' but cookie 'loginUrl' exists, + save the cookie's value to g_regionUrlParam (a global variable for switching regions), + then call login API to set g_loginResponse (a global variable for single-sign-on). + */ + else if($.cookie('loginUrl') != null) { + g_regionUrlParam = '?loginUrl=' + $.cookie('loginUrl'); $.ajax({ - url: url, + url: unescape(clientApiUrl + "?" + $.cookie('loginUrl')), dataType: "json", async: false, success: function(json) { - g_loginResponse = json.loginresponse; + g_loginResponse = json.loginresponse; }, error: function() { onLogoutCallback(); @@ -73,6 +85,7 @@ $(document).ready(function() { } }); } + }); diff --git a/ui/scripts/cloudStack.js b/ui/scripts/cloudStack.js index f9b5a58545c..985627b824e 100644 --- a/ui/scripts/cloudStack.js +++ b/ui/scripts/cloudStack.js @@ -251,6 +251,9 @@ array1.push("&domain=" + encodeURIComponent("/")); } + g_regionUrlParam = '?loginUrl=' + escape("command=login" + array1.join("") + "&response=json"); + $.cookie('loginUrl', escape("command=login" + array1.join("") + "&response=json"), { expires: 1}); + $.ajax({ type: "POST", data: "command=login" + array1.join("") + "&response=json", @@ -382,8 +385,9 @@ g_domainid = null; g_timezoneoffset = null; g_timezone = null; - g_supportELB = null; - + g_supportELB = null; + g_regionUrlParam = null; + $.cookie('JSESSIONID', null); $.cookie('sessionKey', null); $.cookie('username', null); @@ -394,6 +398,7 @@ $.cookie('timezoneoffset', null); $.cookie('timezone', null); $.cookie('supportELB', null); + $.cookie('loginUrl', null); if(onLogoutCallback()) { //onLogoutCallback() will set g_loginResponse(single-sign-on variable) to null, then bypassLoginCheck() will show login screen. document.location.reload(); //when onLogoutCallback() returns true, reload the current document. @@ -451,10 +456,21 @@ context: cloudStack.context }); }); + + window._reloadUI = function() { + $('#container').html(''); + $('#container').cloudStack(window.cloudStack); + }; } }; document.title = 'CloudStack'; + + if ($.cookie('loginUrl') != null || $.urlParam('loginUrl') != 0) { + // SSO + loginArgs.hideLoginScreen = true; + } + cloudStack.uiCustom.login(loginArgs); // Localization diff --git a/ui/scripts/dashboard.js b/ui/scripts/dashboard.js index 88c3cd15835..845ae52259b 100644 --- a/ui/scripts/dashboard.js +++ b/ui/scripts/dashboard.js @@ -91,7 +91,7 @@ var netTotal = json.listnetworksresponse.count ? json.listnetworksresponse.count : 0; - $.ajax({ + $.ajax({ url: createURL('listPublicIpAddresses'), success: function(json) { var ipTotal = json.listpublicipaddressesresponse.count ? @@ -102,7 +102,7 @@ ipTotal: ipTotal })); } - }); + }); } }); } @@ -128,7 +128,7 @@ } } }, - + dataProvider: function(args) { var dataFns = { zones: function(data) { @@ -142,71 +142,24 @@ }); }, capacity: function(data) { - var latestData =null; - if(window.fetchLatestflag == 1) - { + var latestData =null; + if(window.fetchLatestflag == 1) + { latestData = { - fetchLatest:true - } + fetchLatest:true + } } - else + else { latestData = { fetchLatest:false - } + } } - window.fetchLatestflag = 0; - if (data.zones) { - $.ajax({ - url: createURL('listCapacity'), - data: latestData, - success: function(json) { - var capacities = json.listcapacityresponse.capacity; - var capacity = function(id, converter) { - var result = $.grep(capacities, function(capacity) { - return capacity.type == id; - }); - return result[0] ? result[0] : { - capacityused: 0, - capacitytotal: 0, - percentused: 0 - }; - }; + window.fetchLatestflag = 0; - dataFns.alerts($.extend(data, { - publicIPAllocated: capacity(8).capacityused, - publicIPTotal: capacity(8).capacitytotal, - publicIPPercentage: parseInt(capacity(8).percentused), - privateIPAllocated: capacity(5).capacityused, - privateIPTotal: capacity(5).capacitytotal, - privateIPPercentage: parseInt(capacity(8).percentused), - memoryAllocated: cloudStack.converters.convertBytes(capacity(0).capacityused), - memoryTotal: cloudStack.converters.convertBytes(capacity(0).capacitytotal), - memoryPercentage: parseInt(capacity(0).percentused), - cpuAllocated: cloudStack.converters.convertHz(capacity(1).capacityused), - cpuTotal: cloudStack.converters.convertHz(capacity(1).capacitytotal), - cpuPercentage: parseInt(capacity(1).percentused) - })); - } - }); - } else { - dataFns.alerts($.extend(data, { - publicIPAllocated: 0, - publicIPTotal: 0, - publicIPPercentage: 0, - privateIPAllocated: 0, - privateIPTotal: 0, - privateIPPercentage: 0, - memoryAllocated: 0, - memoryTotal: 0, - memoryPercentage: 0, - cpuAllocated: 0, - cpuTotal: 0, - cpuPercentage: 0 - })); - } + dataFns.alerts(data); }, alerts: function(data) { diff --git a/ui/scripts/instances.js b/ui/scripts/instances.js index 787239d9528..1e3ce45ce22 100644 --- a/ui/scripts/instances.js +++ b/ui/scripts/instances.js @@ -338,11 +338,12 @@ notification: function(args) { return 'label.action.start.instance'; }, - complete: function(args) { + complete: function(args) { if(args.password != null) { - alert('Password of the VM is ' + args.password); + return 'Password of the VM is ' + args.password; } - return 'label.action.start.instance'; + + return false; } }, notification: { diff --git a/ui/scripts/network.js b/ui/scripts/network.js index 15ead8cd5a0..65be302c0c9 100755 --- a/ui/scripts/network.js +++ b/ui/scripts/network.js @@ -2609,6 +2609,17 @@ action: cloudStack.lbStickyPolicy.dialog() } }, + + 'health-check':{ + label:'Health Check', + custom:{ + requireValidation: true , + buttonLabel:'Configure', + action:cloudStack.uiCustom.healthCheck() + + } + }, + 'autoScale': { label: 'AutoScale', custom: { diff --git a/ui/scripts/regions.js b/ui/scripts/regions.js index 42a3e9de57c..8839dec77c3 100644 --- a/ui/scripts/regions.js +++ b/ui/scripts/regions.js @@ -27,10 +27,8 @@ args.response.success({ data: regions ? regions : [ - { id: -1, name: '(Default)' } - ], - activeRegionID: cloudStack.context.users.regionid ? - cloudStack.context.users.regionid : 1 + { id: -1, name: _l('label.no.data') } + ] }); } }); @@ -55,26 +53,34 @@ fields: { id: { label: 'label.id', validation: { required: true } }, name: { label: 'label.name', validation: { required: true } }, - endpoint: { label: 'label.endpoint', validation: { required: true } }, - userapikey: { label: 'label.api.key' }, - userapisecretkey: { label: 'label.s3.secret_key' } + endpoint: { label: 'label.endpoint', validation: { required: true } } } }, - action: function(args) { + action: function(args) { + var data = { + id: args.data.id, + name: args.data.name, + endpoint: args.data.endpoint + }; + $.ajax({ url: createURL('addRegion'), - data: args.data, - success: function(json) { - var jobID = json.addregionresponse.jobid; - - args.response.success({ _custom: { jobId: jobID }}); + data: data, + success: function(json) { + var item = json.addregionresponse.region; + args.response.success({data: item}); $(window).trigger('cloudStack.refreshRegions'); }, error: function(json) { args.response.error(parseXMLHttpResponse(json)); } }); - } + }, + notification: { + poll: function(args) { + args.complete(); + } + } } }, dataProvider: function(args) { @@ -98,9 +104,15 @@ edit: { label: 'label.edit.region', action: function(args) { + var data = { + id: args.context.regions[0].id, + name: args.data.name, + endpoint: args.data.endpoint + }; + $.ajax({ url: createURL('updateRegion'), - data: args.data, + data: data, success: function(json) { args.response.success(); $(window).trigger('cloudStack.refreshRegions'); @@ -116,7 +128,22 @@ messages: { notification: function() { return 'label.remove.region'; }, confirm: function() { return 'message.remove.region'; } - }, + }, + preAction: function(args) { + var region = args.context.regions[0]; + + /* e.g. + region.endpoint == "http://localhost:8080/client/" + document.location.href == "http://localhost:8080/client/#" + */ + /* + if(document.location.href.indexOf(region.endpoint) != -1) { + cloudStack.dialog.notice({ message: _l('You can not remove the region that you are currently in.') }); + return false; + } + */ + return true; + }, action: function(args) { var region = args.context.regions[0]; @@ -139,11 +166,11 @@ title: 'label.details', fields: [ { - name: { label: 'label.name', isEditable: true }, + id: { label: 'label.id' } }, { - endpoint: { label: 'label.endpoint', isEditable: true }, - id: { label: 'label.id', isEditable: true } + name: { label: 'label.name', isEditable: true }, + endpoint: { label: 'label.endpoint', isEditable: true } } ], dataProvider: function(args) { diff --git a/ui/scripts/sharedFunctions.js b/ui/scripts/sharedFunctions.js index ad26b34196e..dbcb781a6fa 100644 --- a/ui/scripts/sharedFunctions.js +++ b/ui/scripts/sharedFunctions.js @@ -20,6 +20,7 @@ var g_role = null; // roles - root, domain-admin, ro-admin, user var g_username = null; var g_account = null; var g_domainid = null; +var g_regionUrlParam = null; var g_enableLogging = false; var g_timezoneoffset = null; var g_timezone = null; @@ -400,20 +401,28 @@ cloudStack.converters = { case 4 : return _l('label.public.ips'); case 5 : return _l('label.management.ips'); case 6 : return _l('label.secondary.storage'); - case 7 : return _l('label.vlan'); - case 8 : return _l('label.direct.ips'); - case 9 : return _l('label.local.storage'); + case 7 : return _l('label.host'); + case 9 : return _l('label.domain.router'); + case 10 : return _l('label.console.proxy'); - // These are old values -- can be removed in the future - case 10 : return "Routing Host"; - case 11 : return "Storage"; - case 12 : return "Usage Server"; - case 13 : return "Management Server"; - case 14 : return "Domain Router"; - case 15 : return "Console Proxy"; - case 16 : return "User VM"; - case 17 : return "VLAN"; - case 18 : return "Secondary Storage VM"; + // These are old values -- can be removed in the future + case 8 : return "User VM"; + case 11 : return "Routing Host"; + case 12 : return "Storage"; + case 13 : return "Usage Server"; + case 14 : return "Management Server"; + case 15 : return "Domain Router"; + case 16 : return "Console Proxy"; + case 17 : return "User VM"; + case 18 : return "VLAN"; + case 19 : return "Secondary Storage VM"; + case 20 : return "Usage Server"; + case 21 : return "Storage"; + case 22 : return "Update Resource Count"; + case 23 : return "Usage Sanity Result"; + case 24 : return "Direct Attached Public IP"; + case 25 : return "Local Storage"; + case 26 : return "Resource Limit Exceeded"; } }, convertByType: function(alertCode, value) { diff --git a/ui/scripts/system.js b/ui/scripts/system.js index 4d529aeb04e..830d622caf4 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -1350,42 +1350,7 @@ var $form = $(this).closest("form"); var selectedNetworkOfferingId = $(this).val(); $(networkOfferingObjs).each(function(){ - if(this.id == selectedNetworkOfferingId) { - if(this.guestiptype == "Isolated") { //*** Isolated *** - if(this.specifyipranges == false) { - $form.find('.form-item[rel=startipv4]').hide(); - $form.find('.form-item[rel=endipv4]').hide(); - } - else { - $form.find('.form-item[rel=startipv4]').css('display', 'inline-block'); - $form.find('.form-item[rel=endipv4]').css('display', 'inline-block'); - } - - var includingSourceNat = false; - var serviceObjArray = this.service; - for(var k = 0; k < serviceObjArray.length; k++) { - if(serviceObjArray[k].name == "SourceNat") { - includingSourceNat = true; - break; - } - } - if(includingSourceNat == true) { //Isolated with SourceNat - cloudStack.dialog.createFormField.validation.required.remove($form.find('.form-item[rel=ip4gateway]')); //make ip4gateway optional - cloudStack.dialog.createFormField.validation.required.remove($form.find('.form-item[rel=ip4Netmask]')); //make ip4Netmask optional - } - else { //Isolated with no SourceNat - cloudStack.dialog.createFormField.validation.required.add($form.find('.form-item[rel=ip4gateway]')); //make ip4gateway required - cloudStack.dialog.createFormField.validation.required.add($form.find('.form-item[rel=ip4Netmask]')); //make ip4Netmask required - } - } - else { //*** Shared *** - $form.find('.form-item[rel=startipv4]').css('display', 'inline-block'); - $form.find('.form-item[rel=endipv4]').css('display', 'inline-block'); - - cloudStack.dialog.createFormField.validation.required.add($form.find('.form-item[rel=ip4gateway]')); //make ip4gateway required - cloudStack.dialog.createFormField.validation.required.add($form.find('.form-item[rel=ip4Netmask]')); //make ip4Netmask required - } - + if(this.id == selectedNetworkOfferingId) { if(this.specifyvlan == false) { $form.find('.form-item[rel=vlanId]').hide(); cloudStack.dialog.createFormField.validation.required.remove($form.find('.form-item[rel=vlanId]')); //make vlanId optional @@ -1411,13 +1376,11 @@ docID: 'helpGuestNetworkZoneNetmask' }, startipv4: { - label: 'IPv4 Start IP', - validation: { required: true }, + label: 'IPv4 Start IP', docID: 'helpGuestNetworkZoneStartIP' }, endipv4: { - label: 'IPv4 End IP', - validation: { required: true }, + label: 'IPv4 End IP', docID: 'helpGuestNetworkZoneEndIP' }, //IPv4 (end) @@ -1431,13 +1394,11 @@ label: 'IPv6 CIDR' }, startipv6: { - label: 'IPv6 Start IP', - validation: { required: true }, + label: 'IPv6 Start IP', docID: 'helpGuestNetworkZoneStartIP' }, endipv6: { - label: 'IPv6 End IP', - validation: { required: true }, + label: 'IPv6 End IP', docID: 'helpGuestNetworkZoneEndIP' }, //IPv6 (end) @@ -1449,7 +1410,17 @@ } }, - action: function(args) { //Add guest network in advanced zone + action: function(args) { //Add guest network in advanced zone + if ( + ((args.data.ip4gateway.length == 0) && (args.data.ip4Netmask.length == 0) && (args.data.startipv4.length == 0) && (args.data.endipv4.length == 0)) + && + ((args.data.ip6gateway.length == 0) && (args.data.ip6cidr.length == 0) && (args.data.startipv6.length == 0) && (args.data.endipv6.length == 0)) + ) + { + args.response.error("Either IPv4 fields or IPv6 fields need to be filled when adding a guest network"); + return; + } + var $form = args.$form; var array1 = []; @@ -7597,7 +7568,7 @@ if(vSwitchEnabled) { - items.push({ id:" nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); + items.push({ id:"nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); items.push({id: "vmwaresvs", description: "VMware vNetwork Standard Virtual Switch"}); items.push({id: "vmwaredvs", description: "VMware vNetwork Distributed Virtual Switch"}); @@ -7610,7 +7581,7 @@ else{ items.push({id: "vmwaredvs", description: "VMware vNetwork Distributed Virtual Switch"}); items.push({id: "vmwaresvs", description: "VMware vNetwork Standard Virtual Switch"}); - items.push({ id:" nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); + items.push({ id:"nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); } args.response.success({data: items}); diff --git a/ui/scripts/ui-custom/healthCheck.js b/ui/scripts/ui-custom/healthCheck.js new file mode 100644 index 00000000000..ebb7e5a8903 --- /dev/null +++ b/ui/scripts/ui-custom/healthCheck.js @@ -0,0 +1,342 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +(function($, cloudStack) { + + cloudStack.uiCustom.healthCheck = function(args) { + + // Place outer args here as local variables + // i.e, -- var dataProvider = args.dataProvider + + return function(args){ + if(args.context.multiRules == undefined) { //LB rule is not created yet + cloudStack.dialog.notice({ message: _l('Health Check can only be configured on a created LB rule') }); + return; + } + + var formData = args.formData; + var forms = $.extend(true, {}, args.forms); + var topFieldForm, bottomFieldForm , $topFieldForm , $bottomFieldForm; + var topfields = forms.topFields; + + var $healthCheckDesc = $('

Your load balancer will automatically perform health checks on your cloudstack instances and only route traffic to instances that pass the health check
').addClass('health-check-description'); + var $healthCheckConfigTitle = $('


Configuration Options :
').addClass('health-check-config-title'); + var $healthCheckAdvancedTitle = $('


Advanced Options :
').addClass('health-check-advanced-title'); + + var $healthCheckDialog = $('
').addClass('health-check'); + $healthCheckDialog.append($healthCheckDesc); + $healthCheckDialog.append($healthCheckConfigTitle); + var $loadingOnDialog = $('
').addClass('loading-overlay'); + + var policyObj = null; + var pingpath1 = '/'; + var responsetimeout1 = '2'; + var healthinterval1 = '5'; + var healthythreshold1 = '2'; + var unhealthythreshold1 = '1'; + + $.ajax({ + url: createURL('listLBHealthCheckPolicies'), + data: { + lbruleid: args.context.multiRules[0].id + }, + async: false, + success: function(json) { + if(json.listlbhealthcheckpoliciesresponse.healthcheckpolicies[0].healthcheckpolicy[0] != undefined) { + policyObj = json.listlbhealthcheckpoliciesresponse.healthcheckpolicies[0].healthcheckpolicy[0]; + pingpath1 = policyObj.pingpath; //API bug: API doesn't return it + responsetimeout1 = policyObj.responsetime; + healthinterval1 = policyObj.healthcheckinterval; + healthythreshold1 = policyObj.healthcheckthresshold; + unhealthythreshold1 = policyObj.unhealthcheckthresshold; + } + } + }); + + topFieldForm = cloudStack.dialog.createForm({ + context: args.context, + noDialog: true, // Don't render a dialog, just return $formContainer + form: { + title: '', + fields:{ + pingpath: {label: 'Ping Path', validation: {required: false}, defaultValue: pingpath1} + } + } + }); + + $topFieldForm = topFieldForm.$formContainer; + $topFieldForm.appendTo($healthCheckDialog); + + $healthCheckDialog.append($healthCheckAdvancedTitle); + + bottomFieldForm = cloudStack.dialog.createForm ({ + context:args.context, + noDialog:true, + form:{ + title:'', + fields:{ + responsetimeout: {label: 'Response Timeout (in sec)' , validation:{required:false}, defaultValue: responsetimeout1}, + healthinterval: {label: 'Health Check Interval (in sec)', validation:{required:false}, defaultValue: healthinterval1}, + healthythreshold: {label: 'Healthy Threshold', validation: {required:false}, defaultValue: healthythreshold1}, + unhealthythreshold: {label: 'Unhealthy Threshold' , validation: { required:false}, defaultValue: unhealthythreshold1} + } + } + }); + + $bottomFieldForm = bottomFieldForm.$formContainer; + $bottomFieldForm.appendTo($healthCheckDialog); + + + var buttons = [ + { + text: _l('label.cancel'), + 'class': 'cancel', + click: function() { + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + } + ]; + + if(policyObj == null) { //policy is not created yet + buttons.push( + { + text: _l('Create'), + 'class': 'ok', + click: function() { + $loadingOnDialog.appendTo($healthCheckDialog); + var formData = cloudStack.serializeForm($healthCheckDialog.find('form')); + var data = { + lbruleid: args.context.multiRules[0].id, + pingpath: formData.pingpath, + responsetimeout: formData.responsetimeout, + intervaltime: formData.healthinterval, + healthythreshold: formData.healthythreshold, + unhealthythreshold: formData.unhealthythreshold + }; + + $.ajax({ + url: createURL('createLBHealthCheckPolicy'), + data: data, + success: function(json) { + var jobId = json.createlbhealthcheckpolicyresponse.jobid; + var createLBHealthCheckPolicyIntervalId = setInterval(function(){ + $.ajax({ + url: createURL('queryAsyncJobResult'), + data: { + jobid: jobId + }, + success: function(json) { + var result = json.queryasyncjobresultresponse; + if (result.jobstatus == 0) { + return; //Job has not completed + } + else { + clearInterval(createLBHealthCheckPolicyIntervalId); + + if (result.jobstatus == 1) { + cloudStack.dialog.notice({ message: _l('Health Check Policy has been created') }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + else if (result.jobstatus == 2) { + cloudStack.dialog.notice({ message: _s(result.jobresult.errortext) }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + } + } + }); + }, g_queryAsyncJobResultInterval); + } + }); + } + } + ); + } + else { //policy exists already + buttons.push( + //Update Button (begin) - call delete API first, then create API + { + text: _l('Update'), + 'class': 'ok', + click: function() { + $loadingOnDialog.appendTo($healthCheckDialog); + + $.ajax({ + url: createURL('deleteLBHealthCheckPolicy'), + data: { + id : policyObj.id + }, + success: function(json) { + var jobId = json.deletelbhealthcheckpolicyresponse.jobid; + var deleteLBHealthCheckPolicyIntervalId = setInterval(function(){ + $.ajax({ + url: createURL('queryAsyncJobResult'), + data: { + jobid: jobId + }, + success: function(json) { + var result = json.queryasyncjobresultresponse; + if (result.jobstatus == 0) { + return; //Job has not completed + } + else { + clearInterval(deleteLBHealthCheckPolicyIntervalId); + + if (result.jobstatus == 1) { + var formData = cloudStack.serializeForm($healthCheckDialog.find('form')); + var data = { + lbruleid: args.context.multiRules[0].id, + pingpath: formData.pingpath, + responsetimeout: formData.responsetimeout, + intervaltime: formData.healthinterval, + healthythreshold: formData.healthythreshold, + unhealthythreshold: formData.unhealthythreshold + }; + + $.ajax({ + url: createURL('createLBHealthCheckPolicy'), + data: data, + success: function(json) { + var jobId = json.createlbhealthcheckpolicyresponse.jobid; + var createLBHealthCheckPolicyIntervalId = setInterval(function(){ + $.ajax({ + url: createURL('queryAsyncJobResult'), + data: { + jobid: jobId + }, + success: function(json) { + var result = json.queryasyncjobresultresponse; + if (result.jobstatus == 0) { + return; //Job has not completed + } + else { + clearInterval(createLBHealthCheckPolicyIntervalId); + + if (result.jobstatus == 1) { + cloudStack.dialog.notice({ message: _l('Health Check Policy has been updated') }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + else if (result.jobstatus == 2) { + cloudStack.dialog.notice({ message: _s(result.jobresult.errortext) }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + } + } + }); + }, g_queryAsyncJobResultInterval); + } + }); + } + else if (result.jobstatus == 2) { + cloudStack.dialog.notice({ message: _s(result.jobresult.errortext) }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + } + } + }); + }, g_queryAsyncJobResultInterval); + } + }); + } + } + //Update Button (end) + , + //Delete Button (begin) - call delete API + { + text: _l('Delete'), + 'class': 'delete', + click: function() { + $loadingOnDialog.appendTo($healthCheckDialog); + + $.ajax({ + url: createURL('deleteLBHealthCheckPolicy'), + data: { + id : policyObj.id + }, + success: function(json) { + var jobId = json.deletelbhealthcheckpolicyresponse.jobid; + var deleteLBHealthCheckPolicyIntervalId = setInterval(function(){ + $.ajax({ + url: createURL('queryAsyncJobResult'), + data: { + jobid: jobId + }, + success: function(json) { + var result = json.queryasyncjobresultresponse; + if (result.jobstatus == 0) { + return; //Job has not completed + } + else { + clearInterval(deleteLBHealthCheckPolicyIntervalId); + + if (result.jobstatus == 1) { + cloudStack.dialog.notice({ message: _l('Health Check Policy has been deleted') }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + else if (result.jobstatus == 2) { + cloudStack.dialog.notice({ message: _s(result.jobresult.errortext) }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + } + } + }); + }, g_queryAsyncJobResultInterval); + } + }); + } + } + //Delete Button (end) + ); + } + + $healthCheckDialog.dialog({ + title: 'Health Check Wizard', + width: 600, + height: 600, + draggable: true, + closeonEscape: false, + overflow:'auto', + open:function() { + $("button").each(function(){ + $(this).attr("style", "left: 400px; position: relative; margin-right: 5px; "); + }); + + $('.ui-dialog .delete').css('left','140px'); + + }, + buttons: buttons + }).closest('.ui-dialog').overlay(); + + } + } + }(jQuery, cloudStack)); + + diff --git a/ui/scripts/ui-custom/instanceWizard.js b/ui/scripts/ui-custom/instanceWizard.js index b55df79e7be..2c6d9f1886e 100644 --- a/ui/scripts/ui-custom/instanceWizard.js +++ b/ui/scripts/ui-custom/instanceWizard.js @@ -118,8 +118,12 @@ if (isSingleSelect) { $select.siblings('.single-select:visible').find('input[type=checkbox]') .attr('checked', false); - - $(this).closest('.select').find('input[type=radio]').click(); + + if (!$('input[name=new-network]:visible').is(':checked')) { + $(this).closest('.select').find('input[type=radio]').click(); + } else { + $newNetwork.find('input[type=radio]').click(); + } } if ((!$otherSelects.size()) && diff --git a/ui/scripts/ui-custom/login.js b/ui/scripts/ui-custom/login.js index c092b82ec0e..7a3b887d783 100644 --- a/ui/scripts/ui-custom/login.js +++ b/ui/scripts/ui-custom/login.js @@ -128,5 +128,17 @@ }); $languageSelect.val($.cookie('lang')); + + // Hide login screen, mainly for SSO + if (args.hideLoginScreen) { + $login.children().hide(); + $login.append($('
').addClass('loading-overlay').append( + $('').html( + // _l is not set yet, so localize directly to dictionary + // [should fix in future] + dictionary['label.loading'] + '...' + ) + )); + } }; })(jQuery, cloudStack); diff --git a/ui/scripts/ui-custom/regions.js b/ui/scripts/ui-custom/regions.js index ac52776d49f..17bc86cb5ad 100644 --- a/ui/scripts/ui-custom/regions.js +++ b/ui/scripts/ui-custom/regions.js @@ -27,25 +27,33 @@ response: { success: function(args) { var data = args.data; - var activeRegionID = args.activeRegionID; - + + var currentRegion = null; $(data).each(function() { var region = this; var regionName = region.name; var $li = $('
  • ').append($('').html(_s(region.name))); $li.data('region-data', region); - - if (region.id == activeRegionID) { - $li.addClass('active'); - } - - $regionSwitcherButton.find('.title') - .html(regionName) - .attr('title', regionName); - - $regionList.append($li); + + /* e.g. + region.endpoint == "http://localhost:8080/client/" + document.location.href == "http://localhost:8080/client/#" + */ + if(document.location.href.indexOf(region.endpoint) != -1) { + currentRegion = region; + $li.addClass('active'); + } + + $regionList.append($li); }); + + if(currentRegion != null) { + $regionSwitcherButton.find('.title').html(_s(currentRegion.name)).attr('title', _s(currentRegion.name)); + } + else { + $regionSwitcherButton.find('.title').html('').attr('title', ''); + } } } }); @@ -81,8 +89,8 @@ closeRegionSelector({ complete: function() { $('#container').prepend($('
    ').addClass('loading-overlay')); - - document.location.href = url; + + document.location.href = url + g_regionUrlParam; } }); }; diff --git a/ui/scripts/ui/dialog.js b/ui/scripts/ui/dialog.js index 88dba3fa498..bb372fbf3d6 100644 --- a/ui/scripts/ui/dialog.js +++ b/ui/scripts/ui/dialog.js @@ -585,27 +585,31 @@ * Notice dialog */ notice: function(args) { - return $( - $('').addClass('message').html( - _l(args.message) - ) - ).dialog({ - title: _l('label.status'), - dialogClass: 'notice', - closeOnEscape: false, - zIndex: 5000, - buttons: [ - { - text: _l('Close'), - 'class': 'close', - click: function() { - $(this).dialog('destroy'); - if (args.clickAction) args.clickAction(); - $('.hovered-elem').hide(); - } - } - ] - }); + if (args.message) { + return $( + $('').addClass('message').html( + _l(args.message) + ) + ).dialog({ + title: _l('label.status'), + dialogClass: 'notice', + closeOnEscape: false, + zIndex: 5000, + buttons: [ + { + text: _l('Close'), + 'class': 'close', + click: function() { + $(this).dialog('destroy'); + if (args.clickAction) args.clickAction(); + $('.hovered-elem').hide(); + } + } + ] + }); + } + + return false; } }; })(window.jQuery, window.cloudStack); diff --git a/ui/scripts/ui/widgets/listView.js b/ui/scripts/ui/widgets/listView.js index 76ce526f4d6..4b88647e6f1 100644 --- a/ui/scripts/ui/widgets/listView.js +++ b/ui/scripts/ui/widgets/listView.js @@ -648,7 +648,21 @@ } // Actions column - if (actions && renderActionCol(actions)) { + var actionsArray = actions ? $.map(actions, function(v, k) { + if (k == 'add') { + v.isAdd = true; + } + + return v; + }) : []; + var headerActionsArray = $.grep( + actionsArray, + function(action) { + return action.isHeader || action.isAdd; + } + ); + + if (actions && renderActionCol(actions) && actionsArray.length != headerActionsArray.length) { $thead.find('tr').append( $('') .html(_l('label.actions')) @@ -1014,7 +1028,21 @@ $tr.data('jsonObj', dataItem); $tr.data('list-view-action-filter', options.actionFilter); - if (actions && renderActionCol(actions)) { + var actionsArray = actions ? $.map(actions, function(v, k) { + if (k == 'add') { + v.isAdd = true; + } + + return v; + }) : []; + var headerActionsArray = $.grep( + actionsArray, + function(action) { + return action.isHeader || action.isAdd; + } + ); + + if (actions && renderActionCol(actions) && actionsArray.length != headerActionsArray.length) { var allowedActions = $.map(actions, function(value, key) { return key; }); diff --git a/ui/scripts/ui/widgets/toolTip.js b/ui/scripts/ui/widgets/toolTip.js index af6c2aa6d3f..6967acc7da0 100644 --- a/ui/scripts/ui/widgets/toolTip.js +++ b/ui/scripts/ui/widgets/toolTip.js @@ -156,7 +156,7 @@ // Fix overlay setTimeout(function() { - $('.tooltip-box').zIndex($(':ui-dialog').zIndex() + 1); }); + $('.tooltip-box').zIndex($(':ui-dialog').zIndex() + 10); }); }; diff --git a/ui/scripts/zoneWizard.js b/ui/scripts/zoneWizard.js index 47932664927..c09da8a33a2 100755 --- a/ui/scripts/zoneWizard.js +++ b/ui/scripts/zoneWizard.js @@ -322,7 +322,9 @@ if (args.data['network-model'] == 'Basic') { args.$form.find('[rel=networkOfferingId]').show(); - args.$form.find('[rel=guestcidraddress]').hide(); + args.$form.find('[rel=guestcidraddress]').hide(); + args.$form.find('[rel=ip6dns1]').hide(); + args.$form.find('[rel=ip6dns2]').hide(); } else { //args.data['network-model'] == 'Advanced' args.$form.find('[rel=networkOfferingId]').hide(); @@ -331,7 +333,10 @@ args.$form.find('[rel=guestcidraddress]').show(); else //args.data["zone-advanced-sg-enabled"] == "on args.$form.find('[rel=guestcidraddress]').hide(); - } + + args.$form.find('[rel=ip6dns1]').show(); + args.$form.find('[rel=ip6dns2]').show(); + } setTimeout(function() { if ($form.find('input[name=ispublic]').is(':checked')) { @@ -897,7 +902,7 @@ if(vSwitchEnabled) { - items.push({ id:" nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); + items.push({ id:"nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); items.push({id: "vmwaresvs", description: "VMware vNetwork Standard Virtual Switch"}); items.push({id: "vmwaredvs", description: "VMware vNetwork Distributed Virtual Switch"}); @@ -909,9 +914,9 @@ // items.push({id: " ", description: " "}); else{ - items.push({id: "vmwaredvs", description: "VMware vNetwork Distributed Virtual Switch"}); - items.push({id: "vmwaresvs", description: "VMware vNetwork Standard Virtual Switch"}); - items.push({ id:" nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); + items.push({id:"vmwaredvs", description: "VMware vNetwork Distributed Virtual Switch"}); + items.push({ id: "vmwaresvs", description: "VMware vNetwork Standard Virtual Switch"}); + items.push({ id:"nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); } args.response.success({data: items}); @@ -1177,6 +1182,24 @@ validation: { required: true } }, + scope: { + label: 'label.scope', + select: function(args) { + + var scope = [ + { id: 'zone', description: _l('label.zone.wide') }, + { id: 'cluster', description: _l('label.cluster') }, + { id: 'host', description: _l('label.host') } + ]; + + args.response.success({ + data: scope + }); + + } + + }, + protocol: { label: 'label.protocol', validation: { required: true }, @@ -3244,6 +3267,7 @@ array1.push("&podId=" + args.data.returnedPod.id); array1.push("&clusterid=" + args.data.returnedCluster.id); array1.push("&name=" + todb(args.data.primaryStorage.name)); + array1.push("&scope=" + todb(args.data.primaryStorage.scope)); var server = args.data.primaryStorage.server; var url = null; diff --git a/usage/src/com/cloud/usage/UsageAlertManagerImpl.java b/usage/src/com/cloud/usage/UsageAlertManagerImpl.java index a0765b2b272..dc918b83b6d 100644 --- a/usage/src/com/cloud/usage/UsageAlertManagerImpl.java +++ b/usage/src/com/cloud/usage/UsageAlertManagerImpl.java @@ -50,11 +50,12 @@ import com.sun.mail.smtp.SMTPTransport; @Local(value={AlertManager.class}) public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { private static final Logger s_logger = Logger.getLogger(UsageAlertManagerImpl.class.getName()); + private static final Logger s_alertsLogger = Logger.getLogger("org.apache.cloudstack.alerts"); private EmailAlert _emailAlert; @Inject private AlertDao _alertDao; @Inject private ConfigurationDao _configDao; - + @Override public boolean configure(String name, Map params) throws ConfigurationException { Map configs = _configDao.getConfiguration("management-server", params); @@ -101,6 +102,9 @@ public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { try { if (_emailAlert != null) { _emailAlert.sendAlert(alertType, dataCenterId, podId, subject, body); + } else { + s_alertsLogger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + + podId + " // clusterId:: " + null + " // message:: " + subject ); } } catch (Exception ex) { s_logger.error("Problem sending email alert", ex); @@ -171,18 +175,19 @@ public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { // TODO: make sure this handles SSL transport (useAuth is true) and regular public void sendAlert(short alertType, long dataCenterId, Long podId, String subject, String content) throws MessagingException, UnsupportedEncodingException { + s_alertsLogger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + + podId + " // clusterId:: " + null + " // message:: " + subject); AlertVO alert = null; - if ((alertType != AlertManager.ALERT_TYPE_HOST) && (alertType != AlertManager.ALERT_TYPE_USERVM) && (alertType != AlertManager.ALERT_TYPE_DOMAIN_ROUTER) && (alertType != AlertManager.ALERT_TYPE_CONSOLE_PROXY) && - (alertType != AlertManager.ALERT_TYPE_SSVM) && + (alertType != AlertManager.ALERT_TYPE_SSVM) && (alertType != AlertManager.ALERT_TYPE_STORAGE_MISC) && (alertType != AlertManager.ALERT_TYPE_MANAGMENT_NODE)) { alert = _alertDao.getLastAlert(alertType, dataCenterId, podId); } - + if (alert == null) { // set up a new alert AlertVO newAlert = new AlertVO(); diff --git a/utils/src/com/cloud/utils/component/AdapterBase.java b/utils/src/com/cloud/utils/component/AdapterBase.java index a8f4f468090..ea5e9611ab6 100644 --- a/utils/src/com/cloud/utils/component/AdapterBase.java +++ b/utils/src/com/cloud/utils/component/AdapterBase.java @@ -22,8 +22,10 @@ import java.util.List; public class AdapterBase extends ComponentLifecycleBase implements Adapter { public AdapterBase() { + // set default run level for adapter components + setRunLevel(ComponentLifecycle.RUN_LEVEL_COMPONENT); } - + public static T getAdapterByName(List adapters, String name) { for(T adapter : adapters) { if(adapter.getName() != null && adapter.getName().equalsIgnoreCase(name)) diff --git a/utils/src/com/cloud/utils/component/ComponentMethodProxyCache.java b/utils/src/com/cloud/utils/component/ComponentMethodProxyCache.java new file mode 100644 index 00000000000..ea3b68573cf --- /dev/null +++ b/utils/src/com/cloud/utils/component/ComponentMethodProxyCache.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.utils.component; + +import java.lang.ref.WeakReference; +import java.lang.reflect.Method; +import java.util.WeakHashMap; + +public class ComponentMethodProxyCache { + + private static WeakHashMap> s_cache = new WeakHashMap>(); + + public ComponentMethodProxyCache() { + } + + public static Method getTargetMethod(Method method, Object target) { + synchronized(s_cache) { + WeakReference targetMethod = s_cache.get(new TargetKey(method, target)); + if(targetMethod != null && targetMethod.get() != null) + return targetMethod.get(); + + Class clazz = target.getClass(); + for(Method m : clazz.getMethods()) { + if(isMethodMatched(method, m)) { + s_cache.put(new TargetKey(method, target), new WeakReference(m)); + return m; + } + } + + return method; + } + } + + private static boolean isMethodMatched(Method m1, Method m2) { + if(!m1.getName().equals(m2.getName())) + return false; + + Class[] params1 = m1.getParameterTypes(); + Class[] params2 = m2.getParameterTypes(); + + if(params1.length != params2.length) + return false; + + for(int i = 0; i < params1.length; i++) { + if(!params1[i].isAssignableFrom(params2[i])) + return false; + } + + return true; + } + + public static class TargetKey { + Method _method; + Object _target; + + public TargetKey(Method method, Object target) { + _method = method; + _target = target; + } + + @Override + public boolean equals(Object obj) { + if(!(obj instanceof TargetKey)) + return false; + + // for target object, we just check the reference + return _method.equals(((TargetKey)obj)._method) && + _target == ((TargetKey)obj)._target; + } + + public int hashCode() { + return _target.hashCode() ^ _target.hashCode(); + } + } +} diff --git a/utils/src/com/cloud/utils/component/SpringComponentScanUtils.java b/utils/src/com/cloud/utils/component/SpringComponentScanUtils.java index fda11b74609..9a85c79fa80 100644 --- a/utils/src/com/cloud/utils/component/SpringComponentScanUtils.java +++ b/utils/src/com/cloud/utils/component/SpringComponentScanUtils.java @@ -38,5 +38,4 @@ public class SpringComponentScanUtils { } return false; } - } diff --git a/utils/src/com/cloud/utils/crypt/EncryptionSecretKeyChanger.java b/utils/src/com/cloud/utils/crypt/EncryptionSecretKeyChanger.java index 2be274921cc..9b13eb8b155 100755 --- a/utils/src/com/cloud/utils/crypt/EncryptionSecretKeyChanger.java +++ b/utils/src/com/cloud/utils/crypt/EncryptionSecretKeyChanger.java @@ -54,7 +54,7 @@ public class EncryptionSecretKeyChanger { private StandardPBEStringEncryptor oldEncryptor = new StandardPBEStringEncryptor(); private StandardPBEStringEncryptor newEncryptor = new StandardPBEStringEncryptor(); - private static final String keyFile = "/etc/cloud/management/key"; + private static final String keyFile = "/etc/cloudstack/management/key"; public static void main(String[] args){ List argsList = Arrays.asList(args); diff --git a/utils/src/com/cloud/utils/crypt/EncryptionSecretKeyChecker.java b/utils/src/com/cloud/utils/crypt/EncryptionSecretKeyChecker.java index 5ffa14f3088..bf6c35109b3 100755 --- a/utils/src/com/cloud/utils/crypt/EncryptionSecretKeyChecker.java +++ b/utils/src/com/cloud/utils/crypt/EncryptionSecretKeyChecker.java @@ -46,7 +46,7 @@ public class EncryptionSecretKeyChecker extends AdapterBase implements SystemInt private static final Logger s_logger = Logger.getLogger(EncryptionSecretKeyChecker.class); // Two possible locations with the new packaging naming - private static final String s_altKeyFile = "/etc/cloud/management/key"; + private static final String s_altKeyFile = "/etc/cloudstack/management/key"; private static final String s_keyFile = "/etc/cloudstack/management/key"; private static final String s_envKey = "CLOUD_SECRET_KEY"; private static StandardPBEStringEncryptor s_encryptor = new StandardPBEStringEncryptor(); diff --git a/utils/src/com/cloud/utils/db/TransactionContextBuilder.java b/utils/src/com/cloud/utils/db/TransactionContextBuilder.java index e03b25f912d..7ca33ab5f5d 100644 --- a/utils/src/com/cloud/utils/db/TransactionContextBuilder.java +++ b/utils/src/com/cloud/utils/db/TransactionContextBuilder.java @@ -22,9 +22,10 @@ import org.aopalliance.intercept.MethodInterceptor; import org.aopalliance.intercept.MethodInvocation; import org.apache.log4j.Logger; import org.aspectj.lang.ProceedingJoinPoint; -import org.aspectj.lang.Signature; import org.aspectj.lang.reflect.MethodSignature; +import com.cloud.utils.component.ComponentMethodProxyCache; + public class TransactionContextBuilder implements MethodInterceptor { private static final Logger s_logger = Logger.getLogger(TransactionContextBuilder.class); public TransactionContextBuilder() { @@ -72,14 +73,9 @@ public class TransactionContextBuilder implements MethodInterceptor { Class clazz = method.getDeclaringClass(); if(clazz.isInterface()) { clazz = target.getClass(); - for(Method m : clazz.getMethods()) { - // it is supposed that we need to check against type arguments, - // this can be simplified by just checking method name - if(m.getName().equals(method.getName())) { - if(m.getAnnotation(DB.class) != null) - return true; - } - } + Method targetMethod = ComponentMethodProxyCache.getTargetMethod(method, target); + if(targetMethod != null && targetMethod.getAnnotation(DB.class) != null) + return true; } do { diff --git a/utils/src/com/cloud/utils/net/NetUtils.java b/utils/src/com/cloud/utils/net/NetUtils.java index dd40a33934d..5988dd5f337 100755 --- a/utils/src/com/cloud/utils/net/NetUtils.java +++ b/utils/src/com/cloud/utils/net/NetUtils.java @@ -632,7 +632,7 @@ public class NetUtils { Set result = new TreeSet(); long ip = ip2Long(cidr); long startNetMask = ip2Long(getCidrNetmask(size)); - long start = (ip & startNetMask) + 2; + long start = (ip & startNetMask) + 1; long end = start; end = end >> (32 - size);