From 2094ee682e213a89a99dc91c17016059fcecefd9 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Thu, 14 Mar 2013 17:13:27 +0530 Subject: [PATCH 001/123] Health-Check UI development --- ui/index.jsp | 1 + ui/scripts/network.js | 11 + ui/scripts/ui-custom/healthCheck.js | 318 ++++++++++++++++++++++++++++ 3 files changed, 330 insertions(+) create mode 100644 ui/scripts/ui-custom/healthCheck.js diff --git a/ui/index.jsp b/ui/index.jsp index d1e6bfa923b..3b8f37886ef 100644 --- a/ui/index.jsp +++ b/ui/index.jsp @@ -1645,6 +1645,7 @@ under the License. + diff --git a/ui/scripts/network.js b/ui/scripts/network.js index 2353a038f1f..697141a84ec 100755 --- a/ui/scripts/network.js +++ b/ui/scripts/network.js @@ -2786,6 +2786,17 @@ action: cloudStack.lbStickyPolicy.dialog() } }, + + 'health-check':{ + label:'Health Check', + custom:{ + requireValidation: true , + buttonLabel:'Configure', + action:cloudStack.uiCustom.healthCheck() + + } + }, + 'autoScale': { label: 'AutoScale', custom: { diff --git a/ui/scripts/ui-custom/healthCheck.js b/ui/scripts/ui-custom/healthCheck.js new file mode 100644 index 00000000000..0c6689a7cc8 --- /dev/null +++ b/ui/scripts/ui-custom/healthCheck.js @@ -0,0 +1,318 @@ +// Copyright 2012 Citrix Systems, Inc. Licensed under the +// Apache License, Version 2.0 (the "License"); you may not use this +// file except in compliance with the License. Citrix Systems, Inc. +// reserves all rights not expressly granted by the License. +// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +(function($, cloudStack) { + + cloudStack.uiCustom.healthCheck = function(args) { + + // Place outer args here as local variables + // i.e, -- var dataProvider = args.dataProvider + + return function(args){ + if(args.context.multiRules == undefined) { //LB rule is not created yet + cloudStack.dialog.notice({ message: _l('Health Check can only be configured on a created LB rule') }); + return; + } + + var formData = args.formData; + var forms = $.extend(true, {}, args.forms); + var topFieldForm, bottomFieldForm , $topFieldForm , $bottomFieldForm; + var topfields = forms.topFields; + + var $healthCheckDesc = $('
Your load balancer will automatically perform health checks on your cloudstack instances and only route traffic to instances that pass the health check
').addClass('health-check-description'); + var $healthCheckConfigTitle = $('


Configuration Options :
').addClass('health-check-config-title'); + var $healthCheckAdvancedTitle = $('


Advanced Options :
').addClass('health-check-advanced-title'); + + var $healthCheckDialog = $('
').addClass('health-check'); + $healthCheckDialog.append($healthCheckDesc); + $healthCheckDialog.append($healthCheckConfigTitle); + var $loadingOnDialog = $('
').addClass('loading-overlay'); + + var policyObj = null; + var pingpath1 = '/'; + var responsetimeout1 = '2'; + var healthinterval1 = '5'; + var healthythreshold1 = '2'; + var unhealthythreshold1 = '1'; + + $.ajax({ + url: createURL('listLBHealthCheckPolicies'), + data: { + lbruleid: args.context.multiRules[0].id + }, + async: false, + success: function(json) { + if(json.listlbhealtcheckpoliciesresponse.healthcheckpolicies[0].healthcheckpolicy[0] != undefined) { + policyObj = json.listlbhealtcheckpoliciesresponse.healthcheckpolicies[0].healthcheckpolicy[0]; + pingpath1 = policyObj.pingpath; //API bug: API doesn't return it + responsetimeout1 = policyObj.responsetime; + healthinterval1 = policyObj.healthcheckinterval; + healthythreshold1 = policyObj.healthcheckthresshold; + unhealthythreshold1 = policyObj.unhealthcheckthresshold; + } + } + }); + + topFieldForm = cloudStack.dialog.createForm({ + context: args.context, + noDialog: true, // Don't render a dialog, just return $formContainer + form: { + title: '', + fields:{ + pingpath: {label: 'Ping Path', docID:'helpAccountUsername' , validation: {required: false}, defaultValue: pingpath1} + } + } + }); + + $topFieldForm = topFieldForm.$formContainer; + $topFieldForm.appendTo($healthCheckDialog); + + $healthCheckDialog.append($healthCheckAdvancedTitle); + + bottomFieldForm = cloudStack.dialog.createForm ({ + context:args.context, + noDialog:true, + form:{ + title:'', + fields:{ + responsetimeout: {label: 'Response Timeout (in sec)' , validation:{required:false}, defaultValue: responsetimeout1}, + healthinterval: {label: 'Health Check Interval (in sec)', validation:{required:false}, defaultValue: healthinterval1}, + healthythreshold: {label: 'Healthy Threshold', validation: {required:false}, defaultValue: healthythreshold1}, + unhealthythreshold: {label: 'Unhealthy Threshold' , validation: { required:false}, defaultValue: unhealthythreshold1} + } + } + }); + + $bottomFieldForm = bottomFieldForm.$formContainer; + $bottomFieldForm.appendTo($healthCheckDialog); + + + var buttons = [ + { + text: _l('label.cancel'), + 'class': 'cancel', + click: function() { + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + } + ]; + + if(policyObj == null) { //policy is not created yet + buttons.push( + { + text: _l('Create'), + 'class': 'ok', + click: function() { + $loadingOnDialog.appendTo($healthCheckDialog); + var formData = cloudStack.serializeForm($healthCheckDialog.find('form')); + var data = { + lbruleid: args.context.multiRules[0].id, + pingpath: formData.pingpath, + responsetimeout: formData.responsetimeout, + intervaltime: formData.healthinterval, + healthythreshold: formData.healthythreshold, + unhealthythreshold: formData.unhealthythreshold + }; + + var lbRuleData = { + + algorithm:args.context.multiRules[0].algorithm, + name:args.context.multiRules[0].name, + publicport:args.context.multiRules[0].publicport, + privateport:args.context.multiRules[0].privateport + + + } + + if(args.context.multiRules[0] != null) + $.extend(data , lbRuleData); + + $.ajax({ + url: createURL('createLBHealthCheckPolicy'), + data: data, + success: function(json) { + var jobId = json.createlbhealthcheckpolicyresponse.jobid; + var createLBHealthCheckPolicyIntervalId = setInterval(function(){ + $.ajax({ + url: createURL('queryAsyncJobResult'), + data: { + jobid: jobId + }, + success: function(json) { + var result = json.queryasyncjobresultresponse; + if (result.jobstatus == 0) { + return; //Job has not completed + } + else { + clearInterval(createLBHealthCheckPolicyIntervalId); + + if (result.jobstatus == 1) { + cloudStack.dialog.notice({ message: _l('Health Check Policy has been created') }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + else if (result.jobstatus == 2) { + cloudStack.dialog.notice({ message: _s(result.jobresult.errortext) }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + } + } + }); + }, g_queryAsyncJobResultInterval); + }, + error:function(XMLHttpResponse){ + args.response.error(parseXMLHttpResponse(XMLHttpResponse)); + + + } + }); + } + } + ); + } + else { //policy exists already + buttons.push( + //Update Button (begin) - call delete API first, then create API + { + text: _l('Update'), + 'class': 'ok', + click: function() { + $loadingOnDialog.appendTo($healthCheckDialog); + + $.ajax({ + url: createURL('deleteLBHealthCheckPolicy'), + data: { + id : policyObj.id + }, + success: function(json) { + var jobId = json.deletelbhealthcheckpolicyresponse.jobid; + var deleteLBHealthCheckPolicyIntervalId = setInterval(function(){ + $.ajax({ + url: createURL('queryAsyncJobResult'), + data: { + jobid: jobId + }, + success: function(json) { + var result = json.queryasyncjobresultresponse; + if (result.jobstatus == 0) { + return; //Job has not completed + } + else { + clearInterval(deleteLBHealthCheckPolicyIntervalId); + + if (result.jobstatus == 1) { + var formData = cloudStack.serializeForm($healthCheckDialog.find('form')); + var data = { + lbruleid: args.context.multiRules[0].id, + pingpath: formData.pingpath, + responsetimeout: formData.responsetimeout, + intervaltime: formData.healthinterval, + healthythreshold: formData.healthythreshold, + unhealthythreshold: formData.unhealthythreshold + }; + + var lbRuleData = { + + algorithm:args.context.multiRules[0].algorithm, + name:args.context.multiRules[0].name, + publicport:args.context.multiRules[0].publicport, + privateport:args.context.multiRules[0].privateport + + + } + + if(args.context.multiRules[0] != null) + $.extend(data , lbRuleData); + + $.ajax({ + url: createURL('createLBHealthCheckPolicy'), + data: data, + success: function(json) { + var jobId = json.createlbhealthcheckpolicyresponse.jobid; + var createLBHealthCheckPolicyIntervalId = setInterval(function(){ + $.ajax({ + url: createURL('queryAsyncJobResult'), + data: { + jobid: jobId + }, + success: function(json) { + var result = json.queryasyncjobresultresponse; + if (result.jobstatus == 0) { + return; //Job has not completed + } + else { + clearInterval(createLBHealthCheckPolicyIntervalId); + + if (result.jobstatus == 1) { + cloudStack.dialog.notice({ message: _l('Health Check Policy has been updated') }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + else if (result.jobstatus == 2) { + cloudStack.dialog.notice({ message: _s(result.jobresult.errortext) }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + } + } + }); + }, g_queryAsyncJobResultInterval); + }, + error:function(json){ + args.response.error(parseXMLHttpResponse(json)); + + } + }); + } + else if (result.jobstatus == 2) { + cloudStack.dialog.notice({ message: _s(result.jobresult.errortext) }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + } + } + }); + }, g_queryAsyncJobResultInterval); + } + }); + } + } + //Update Button (end) + ); + } + + $healthCheckDialog.dialog({ + title: 'Health Check Wizard', + width: 600, + height: 600, + draggable: true, + closeonEscape: false, + overflow:'auto', + open:function() { + $("button").each(function(){ + $(this).attr("style", "left: 400px; position: relative; margin-right: 5px; "); + }); + }, + buttons: buttons + }).closest('.ui-dialog').overlay(); + + } + } + }(jQuery, cloudStack)); + + From a7e09c864864beb56e5b8a652150e72fe39ca3a4 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Thu, 14 Mar 2013 17:21:03 +0530 Subject: [PATCH 002/123] Health-Check UI development --- ui/scripts/ui-custom/healthCheck.js | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ui/scripts/ui-custom/healthCheck.js b/ui/scripts/ui-custom/healthCheck.js index 0c6689a7cc8..d0f4defdc97 100644 --- a/ui/scripts/ui-custom/healthCheck.js +++ b/ui/scripts/ui-custom/healthCheck.js @@ -171,12 +171,10 @@ } }); }, g_queryAsyncJobResultInterval); - }, - error:function(XMLHttpResponse){ - args.response.error(parseXMLHttpResponse(XMLHttpResponse)); + } - } + }); } } From e6ac982d6c699dde4e47aa94407ce72c964ff180 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Fri, 15 Mar 2013 12:26:09 +0530 Subject: [PATCH 003/123] Adding the license header to the new file --- ui/scripts/ui-custom/healthCheck.js | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/ui/scripts/ui-custom/healthCheck.js b/ui/scripts/ui-custom/healthCheck.js index d0f4defdc97..2f1730c5230 100644 --- a/ui/scripts/ui-custom/healthCheck.js +++ b/ui/scripts/ui-custom/healthCheck.js @@ -1,13 +1,19 @@ -// Copyright 2012 Citrix Systems, Inc. Licensed under the -// Apache License, Version 2.0 (the "License"); you may not use this -// file except in compliance with the License. Citrix Systems, Inc. -// reserves all rights not expressly granted by the License. -// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. (function($, cloudStack) { From 3752f1c3314aaf1bbc926a7da8d200ae6f878356 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Fri, 15 Mar 2013 12:35:28 +0530 Subject: [PATCH 004/123] Health Check UI development --- ui/scripts/ui-custom/healthCheck.js | 91 +++++++++++++++++------------ 1 file changed, 54 insertions(+), 37 deletions(-) diff --git a/ui/scripts/ui-custom/healthCheck.js b/ui/scripts/ui-custom/healthCheck.js index 2f1730c5230..eb80e721b3b 100644 --- a/ui/scripts/ui-custom/healthCheck.js +++ b/ui/scripts/ui-custom/healthCheck.js @@ -56,8 +56,8 @@ }, async: false, success: function(json) { - if(json.listlbhealtcheckpoliciesresponse.healthcheckpolicies[0].healthcheckpolicy[0] != undefined) { - policyObj = json.listlbhealtcheckpoliciesresponse.healthcheckpolicies[0].healthcheckpolicy[0]; + if(json.listlbhealthcheckpoliciesresponse.healthcheckpolicies[0].healthcheckpolicy[0] != undefined) { + policyObj = json.listlbhealthcheckpoliciesresponse.healthcheckpolicies[0].healthcheckpolicy[0]; pingpath1 = policyObj.pingpath; //API bug: API doesn't return it responsetimeout1 = policyObj.responsetime; healthinterval1 = policyObj.healthcheckinterval; @@ -121,26 +121,13 @@ $loadingOnDialog.appendTo($healthCheckDialog); var formData = cloudStack.serializeForm($healthCheckDialog.find('form')); var data = { - lbruleid: args.context.multiRules[0].id, + lbruleid: args.context.multiRules[0].id, pingpath: formData.pingpath, responsetimeout: formData.responsetimeout, intervaltime: formData.healthinterval, healthythreshold: formData.healthythreshold, unhealthythreshold: formData.unhealthythreshold }; - - var lbRuleData = { - - algorithm:args.context.multiRules[0].algorithm, - name:args.context.multiRules[0].name, - publicport:args.context.multiRules[0].publicport, - privateport:args.context.multiRules[0].privateport - - - } - - if(args.context.multiRules[0] != null) - $.extend(data , lbRuleData); $.ajax({ url: createURL('createLBHealthCheckPolicy'), @@ -178,9 +165,6 @@ }); }, g_queryAsyncJobResultInterval); } - - - }); } } @@ -226,19 +210,6 @@ healthythreshold: formData.healthythreshold, unhealthythreshold: formData.unhealthythreshold }; - - var lbRuleData = { - - algorithm:args.context.multiRules[0].algorithm, - name:args.context.multiRules[0].name, - publicport:args.context.multiRules[0].publicport, - privateport:args.context.multiRules[0].privateport - - - } - - if(args.context.multiRules[0] != null) - $.extend(data , lbRuleData); $.ajax({ url: createURL('createLBHealthCheckPolicy'), @@ -275,11 +246,7 @@ } }); }, g_queryAsyncJobResultInterval); - }, - error:function(json){ - args.response.error(parseXMLHttpResponse(json)); - - } + } }); } else if (result.jobstatus == 2) { @@ -297,6 +264,56 @@ } } //Update Button (end) + , + //Delete Button (begin) - call delete API + { + text: _l('Delete'), + 'class': 'cancel', + click: function() { + $loadingOnDialog.appendTo($healthCheckDialog); + + $.ajax({ + url: createURL('deleteLBHealthCheckPolicy'), + data: { + id : policyObj.id + }, + success: function(json) { + var jobId = json.deletelbhealthcheckpolicyresponse.jobid; + var deleteLBHealthCheckPolicyIntervalId = setInterval(function(){ + $.ajax({ + url: createURL('queryAsyncJobResult'), + data: { + jobid: jobId + }, + success: function(json) { + var result = json.queryasyncjobresultresponse; + if (result.jobstatus == 0) { + return; //Job has not completed + } + else { + clearInterval(deleteLBHealthCheckPolicyIntervalId); + + if (result.jobstatus == 1) { + cloudStack.dialog.notice({ message: _l('Health Check Policy has been deleted') }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + else if (result.jobstatus == 2) { + cloudStack.dialog.notice({ message: _s(result.jobresult.errortext) }); + $loadingOnDialog.remove(); + $healthCheckDialog.dialog('destroy'); + $('.overlay').remove(); + } + } + } + }); + }, g_queryAsyncJobResultInterval); + } + }); + } + } + //Delete Button (end) ); } From e74dd13f7fdcf55d51eb32179ce2072ed83aa3e6 Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Fri, 15 Mar 2013 05:51:29 -0400 Subject: [PATCH 005/123] CLOUDSTACK-825: Fix CloudMonkey docs --- docs/en-US/cloudmonkey.xml | 84 ++++++++++++++++++++++++++++---------- 1 file changed, 62 insertions(+), 22 deletions(-) diff --git a/docs/en-US/cloudmonkey.xml b/docs/en-US/cloudmonkey.xml index 0057562cca2..5665ed4cd02 100644 --- a/docs/en-US/cloudmonkey.xml +++ b/docs/en-US/cloudmonkey.xml @@ -24,7 +24,7 @@
CloudMonkey - CloudMonkey is the &PRODUCT; Command Line Interface (CLI). It is written in Python and leverages Marvin. CloudMonkey can be used both as an interactive shell and as a command line tool which simplifies &PRODUCT; configuration and management. + CloudMonkey is the &PRODUCT; Command Line Interface (CLI). It is written in Python. CloudMonkey can be used both as an interactive shell and as a command line tool which simplifies &PRODUCT; configuration and management. It can be used with &PRODUCT; CloudStack 4.0-incubating and above CloudMonkey is still under development and should be considered a Work In Progress (WIP), the wiki is the most up to date documentation: https://cwiki.apache.org/CLOUDSTACK/cloudstack-cloudmonkey-cli.html @@ -32,13 +32,15 @@
Installing CloudMonkey - There are two ways to get CloudMonkey: - + CloudMonkey is dependent on readline, pygments, prettytable, when installing from source you will need to resolve those dependencies. Using the cheese shop, the dependencies will be automatically installed. + There are three ways to get CloudMonkey. Via the official &PRODUCT; source releases or via a community maintained distribution at the cheese shop. Developers can also get it directly from the git repository in tools/cli/. + + - Via the official Apache &PRODUCT; releases (starting with 4.1). + Via the official Apache &PRODUCT; releases as well as the git repository. Configuration - To configure CloudMonkey you can edit the .cloudmonkey_config file in the user's home directory as shown below. The values can also be set interactively at the cloudmonkey prompt + To configure CloudMonkey you can edit the ~/.cloudmonkey/config file in the user's home directory as shown below. The values can also be set interactively at the cloudmonkey prompt. Logs are kept in ~/.cloudmonkey/log, and history is stored in ~/.cloudmonkey/history. Discovered apis are listed in ~/.cloudmonkey/cache. Only the log and history files can be custom paths and can be configured by setting appropriate file paths in ~/.cloudmonkey/config -$ cat .cloudmonkey_config -[CLI] -protocol = http +$ cat ~/.cloudmonkey/config +[core] +log_file = /Users/sebastiengoasguen/.cloudmonkey/log asyncblock = true +paramcompletion = false +history_file = /Users/sebastiengoasguen/.cloudmonkey/history + +[ui] color = true -prompt = cloudmonkey> -history_file = /Users/sebastiengoasguen/.cloudmonkey_history -host = localhost +prompt = > +tabularize = false + +[user] +secretkey =VDaACYb0LV9eNjTetIOElcVQkvJck_J_QljX_FcHRj87ZKiy0z0ty0ZsYBkoXkY9b7eq1EhwJaw7FF3akA3KBQ +apikey = plgWJfZK4gyS3mOMTVmjUVg-X-jlWlnfaUJ9GAbBbf9EdMkAYMmAiLqzzq1ElZLYq_u38zCm0bewzGUdP66mg + +[server] path = /client/api +host = localhost +protocol = http port = 8080 -apikey = plgWJfZK4gyS3mOMTVmjUVg-X-jlWlnfaUJ9GAbBbf9EdM-kAYMmAiLqzzq1ElZLYq_u38zCm0bewzGUdP66mg -secretkey = VDaACYb0LV9eNjTetIOElcVQkvJck_J_QljX_FcHRj87ZKiy0z0ty0ZsYBkoXkY9b7eq1EhwJaw7FF3akA3KBQ -timeout = 600 -log_file = /Users/sebastiengoasguen/.cloudmonkey_log +timeout = 3600 - The values can also be set at the cloudmonkey prompt. The API and secret keys are obtained via the &PRODUCT; UI or via a raw api call. + The values can also be set at the CloudMonkey prompt. The API and secret keys are obtained via the &PRODUCT; UI or via a raw api call. set prompt myprompt> +☁ Apache CloudStack cloudmonkey 4.1.0-snapshot. Type help or ? to list commands. + +> set prompt myprompt> myprompt> set host localhost myprompt> set port 8080 myprompt> set apikey myprompt> set secretkey ]]> - You can use cloudmonkey to interact with a local cloud, and even with a remote public cloud. You just need to set the host value properly and obtain the keys from the cloud administrator. + You can use CloudMonkey to interact with a local cloud, and even with a remote public cloud. You just need to set the host value properly and obtain the keys from the cloud administrator. +
+ +
+ API Discovery + + In &PRODUCT; 4.0.* releases, the list of api calls available will be pre-cached, while starting with &PRODUCT; 4.1 releases and above an API discovery service is enabled. CloudMonkey will discover automatically the api calls available on the management server. The sync command in CloudMonkey pulls a list of apis which are accessible to your user role, along with help docs etc. and stores them in ~/.cloudmonkey/cache. This allows cloudmonkey to be adaptable to changes in mgmt server, so in case the sysadmin enables a plugin such as Nicira NVP for that user role, the users can get those changes. New verbs and grammar (DSL) rules are created on the fly. + + To discover the APIs available do: + + > sync +324 APIs discovered and cached + +
+ +
+ Tabular Output + The number of key/value pairs returned by the api calls can be large resulting in a very long output. To enable easier viewing of the output, a tabular formatting can be setup. You may enable tabular listing and even choose set of column fields, this allows you to create your own field using the filter param which takes in comma separated argument. If argument has a space, put them under double quotes. The create table will have the same sequence of field filters provided + To enable it, use the set function and create filters like so: + +> set tabularize true +> list users filter=id,domain,account +count = 1 +user: ++--------------------------------------+--------+---------+ +| id | domain | account | ++--------------------------------------+--------+---------+ +| 7ed6d5da-93b2-4545-a502-23d20b48ef2a | ROOT | admin | ++--------------------------------------+--------+---------+ +
Interactive Shell Usage - To start learning cloudmonkey, the best is to use the interactive shell. Simply type cloudmonkey at the prompt and you should get the interactive shell. - At the cloudmonkey prompt press the tab key twice, you will see all potential verbs available. Pick on, enter a space and then press tab twice. You will see all actions available for that verb + To start learning CloudMonkey, the best is to use the interactive shell. Simply type CloudMonkey at the prompt and you should get the interactive shell. + At the CloudMonkey prompt press the tab key twice, you will see all potential verbs available. Pick on, enter a space and then press tab twice. You will see all actions available for that verb From 8291c9b3ba9c57388f654da2e8825c0644122523 Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Tue, 26 Feb 2013 11:31:34 -0800 Subject: [PATCH 006/123] Mutli-edit overflow CSS handling --- ui/css/cloudstack3.css | 96 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 92 insertions(+), 4 deletions(-) diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index cb1debc27cf..35dc3ca7d4e 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -1737,16 +1737,60 @@ div.list-view td.state.off span { text-indent: 0; } -.detail-group .main-groups table td.value span { +.detail-group .main-groups table td.value > span { display: block; height: 30px; overflow: auto; position: relative; top: 9px; + float:left; + width:245px; } -.detail-group .main-groups table td.value span { - width: 355px; +.detail-group .main-groups table td.value .view-all { + cursor: pointer; + /*[empty]height:;*/ + border-left: 1px solid #9FA2A5; + /*+border-radius:4px 0 0 4px;*/ + -moz-border-radius: 4px 0 0 4px; + -webkit-border-radius: 4px 0 0 4px; + -khtml-border-radius: 4px 0 0 4px; + border-radius: 4px 0 0 4px; + background: url(../images/sprites.png) no-repeat 100% -398px; + float: right; + margin: 1px 0 0; + padding: 8px 33px 6px 15px; + } + +.detail-group .main-groups table td.value .view-all:hover { + background-position: 100% -431px; + } + +/*List-view: subselect dropdown*/ +.list-view .subselect { + width: 116px; + display: block; + float: left; + background: url(../images/bg-gradients.png) 0px -42px; + padding: 0; + margin: 8px 0 1px 7px; + clear: both; + border: 1px solid #A8A7A7; + /*+border-radius:4px;*/ + -moz-border-radius: 4px; + -webkit-border-radius: 4px; + -khtml-border-radius: 4px; + border-radius: 4px; +} + +.list-view .subselect span { + margin: 4px 0 0 12px; +} + +.list-view .subselect select { + width: 85%; + margin: 5px 0 4px; + font-size: 10px; } .panel.always-maximized .detail-group .main-groups table td.value span { @@ -7684,9 +7728,27 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t margin: 0 22px 0 0; } + /** Fix long table overflow*/ +.detail-view .multi-edit { + width: 100%; +} + +.detail-view .multi-edit table { + width: 97%; + max-width: inherit; +} + +.detail-view .multi-edit table tr th, +.detail-view .multi-edit table tr td { + width: 87px !important; + min-width: 87px !important; + max-width: 87px !important; +} + + /** Header fields*/ .multi-edit .header-fields { - position: relative; + position: relative; /*+placement:shift 14px 11px;*/ position: relative; left: 14px; @@ -10502,6 +10564,32 @@ div.ui-dialog div.acl div.multi-edit div.data div.data-body div.data-item table width: 65px; } +/*HEALTH CHECK */ + +div.ui-dialog div.health-check div.health-check-description { +color: #808080; +} + +div.ui-dialog div.health-check div.form-container form div.form-item { +width:58% margin-left:116px; margin-top:-16px; margin-bottom:30px; } + +div.ui-dialog div.health-check div.health-check-config-title { +float:left; +color: #808080; +font-size:17px; +margin-left:15px; +} + +div.ui-dialog div.health-check div.health-check-advanced-title { +float:left; +color: #808080; +font-size:17px; +margin-left:15px; +} + + + + /*Autoscaler*/ .ui-dialog div.autoscaler { overflow: auto; From e86ee12a78b1ac9361e92ea008f53fda0829fc59 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Fri, 15 Mar 2013 17:23:04 +0530 Subject: [PATCH 007/123] Adjusting the CSS attr for the delete button --- ui/scripts/ui-custom/healthCheck.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ui/scripts/ui-custom/healthCheck.js b/ui/scripts/ui-custom/healthCheck.js index eb80e721b3b..c4c84e5236b 100644 --- a/ui/scripts/ui-custom/healthCheck.js +++ b/ui/scripts/ui-custom/healthCheck.js @@ -268,7 +268,7 @@ //Delete Button (begin) - call delete API { text: _l('Delete'), - 'class': 'cancel', + 'class': 'delete', click: function() { $loadingOnDialog.appendTo($healthCheckDialog); @@ -328,6 +328,9 @@ $("button").each(function(){ $(this).attr("style", "left: 400px; position: relative; margin-right: 5px; "); }); + + $('.ui-dialog .delete').css('left','140px'); + }, buttons: buttons }).closest('.ui-dialog').overlay(); From 4494b3fd3248856ebd640fa4ab2b4144fa58aadf Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Fri, 15 Mar 2013 17:48:50 +0530 Subject: [PATCH 008/123] Adjusting the zindex for the tooltip to be on top --- ui/scripts/ui/widgets/toolTip.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/scripts/ui/widgets/toolTip.js b/ui/scripts/ui/widgets/toolTip.js index af6c2aa6d3f..6967acc7da0 100644 --- a/ui/scripts/ui/widgets/toolTip.js +++ b/ui/scripts/ui/widgets/toolTip.js @@ -156,7 +156,7 @@ // Fix overlay setTimeout(function() { - $('.tooltip-box').zIndex($(':ui-dialog').zIndex() + 1); }); + $('.tooltip-box').zIndex($(':ui-dialog').zIndex() + 10); }); }; From 1d47458ed746b3357dd15192edeaf5c172ca9ec8 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Fri, 15 Mar 2013 18:16:18 +0530 Subject: [PATCH 009/123] adjusting the css to remove blank space in the dialog box --- ui/css/cloudstack3.css | 1 + 1 file changed, 1 insertion(+) diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index 35dc3ca7d4e..5797428b1be 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -10585,6 +10585,7 @@ float:left; color: #808080; font-size:17px; margin-left:15px; +margin-top:-70px; } From 42d33fc3ffe3821eea2b23c0db605ca201e2b7ee Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Thu, 14 Mar 2013 16:20:53 -0700 Subject: [PATCH 010/123] CLOUDSTACK-1044: Fix duplicate listCapacity call on dashboard --- ui/scripts/dashboard.js | 71 +++++++---------------------------------- 1 file changed, 12 insertions(+), 59 deletions(-) diff --git a/ui/scripts/dashboard.js b/ui/scripts/dashboard.js index 88c3cd15835..845ae52259b 100644 --- a/ui/scripts/dashboard.js +++ b/ui/scripts/dashboard.js @@ -91,7 +91,7 @@ var netTotal = json.listnetworksresponse.count ? json.listnetworksresponse.count : 0; - $.ajax({ + $.ajax({ url: createURL('listPublicIpAddresses'), success: function(json) { var ipTotal = json.listpublicipaddressesresponse.count ? @@ -102,7 +102,7 @@ ipTotal: ipTotal })); } - }); + }); } }); } @@ -128,7 +128,7 @@ } } }, - + dataProvider: function(args) { var dataFns = { zones: function(data) { @@ -142,71 +142,24 @@ }); }, capacity: function(data) { - var latestData =null; - if(window.fetchLatestflag == 1) - { + var latestData =null; + if(window.fetchLatestflag == 1) + { latestData = { - fetchLatest:true - } + fetchLatest:true + } } - else + else { latestData = { fetchLatest:false - } + } } - window.fetchLatestflag = 0; - if (data.zones) { - $.ajax({ - url: createURL('listCapacity'), - data: latestData, - success: function(json) { - var capacities = json.listcapacityresponse.capacity; - var capacity = function(id, converter) { - var result = $.grep(capacities, function(capacity) { - return capacity.type == id; - }); - return result[0] ? result[0] : { - capacityused: 0, - capacitytotal: 0, - percentused: 0 - }; - }; + window.fetchLatestflag = 0; - dataFns.alerts($.extend(data, { - publicIPAllocated: capacity(8).capacityused, - publicIPTotal: capacity(8).capacitytotal, - publicIPPercentage: parseInt(capacity(8).percentused), - privateIPAllocated: capacity(5).capacityused, - privateIPTotal: capacity(5).capacitytotal, - privateIPPercentage: parseInt(capacity(8).percentused), - memoryAllocated: cloudStack.converters.convertBytes(capacity(0).capacityused), - memoryTotal: cloudStack.converters.convertBytes(capacity(0).capacitytotal), - memoryPercentage: parseInt(capacity(0).percentused), - cpuAllocated: cloudStack.converters.convertHz(capacity(1).capacityused), - cpuTotal: cloudStack.converters.convertHz(capacity(1).capacitytotal), - cpuPercentage: parseInt(capacity(1).percentused) - })); - } - }); - } else { - dataFns.alerts($.extend(data, { - publicIPAllocated: 0, - publicIPTotal: 0, - publicIPPercentage: 0, - privateIPAllocated: 0, - privateIPTotal: 0, - privateIPPercentage: 0, - memoryAllocated: 0, - memoryTotal: 0, - memoryPercentage: 0, - cpuAllocated: 0, - cpuTotal: 0, - cpuPercentage: 0 - })); - } + dataFns.alerts(data); }, alerts: function(data) { From 89dcc0734072859b3c97aa7a4bf56752f6d5f721 Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Fri, 15 Mar 2013 12:33:42 -0700 Subject: [PATCH 011/123] Health check: Fix height of dialog box --- ui/css/cloudstack3.css | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index 74105687096..54a77780cda 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -10610,32 +10610,36 @@ div.ui-dialog div.acl div.multi-edit div.data div.data-body div.data-item table width: 65px; } -/*HEALTH CHECK */ +/*HEALTH CHECK*/ +.ui-dialog .health-check { + height: 295px !important; + padding-bottom: 93px; +} div.ui-dialog div.health-check div.health-check-description { -color: #808080; + color: #808080; } -div.ui-dialog div.health-check div.form-container form div.form-item { -width:58% margin-left:116px; margin-top:-16px; margin-bottom:30px; } - -div.ui-dialog div.health-check div.health-check-config-title { -float:left; -color: #808080; -font-size:17px; -margin-left:15px; +div.ui-dialog div.health-check div.form-container form div.form-item { + width: 58% margin-left:116px; + margin-top: -16px; + margin-bottom: 30px; } -div.ui-dialog div.health-check div.health-check-advanced-title { -float:left; -color: #808080; -font-size:17px; -margin-left:15px; -margin-top:-70px; +div.ui-dialog div.health-check div.health-check-config-title { + float: left; + color: #808080; + font-size: 17px; + margin-left: 15px; } - - +div.ui-dialog div.health-check div.health-check-advanced-title { + float: left; + color: #808080; + font-size: 17px; + margin-left: 15px; + margin-top: -70px; +} /*Autoscaler*/ .ui-dialog div.autoscaler { From 136e527c63c12eeb62a12bfae8aea7bfd2e12206 Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Fri, 15 Mar 2013 12:33:52 -0700 Subject: [PATCH 012/123] Cleanup CSS formatting --- ui/css/cloudstack3.css | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index 54a77780cda..e1dd02d4126 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -1763,8 +1763,8 @@ div.list-view td.state.off span { } .detail-group .main-groups table td.value .view-all:hover { - background-position: 100% -431px; - } + background-position: 100% -431px; +} /*List-view: subselect dropdown*/ .list-view .subselect { @@ -1789,7 +1789,7 @@ div.list-view td.state.off span { .list-view .subselect select { width: 85%; - margin: 5px 0 4px; + margin: 5px 0 4px; font-size: 10px; } @@ -7774,7 +7774,7 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t margin: 0 22px 0 0; } - /** Fix long table overflow*/ +/** Fix long table overflow*/ .detail-view .multi-edit { width: 100%; } @@ -7791,10 +7791,9 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t max-width: 87px !important; } - /** Header fields*/ .multi-edit .header-fields { - position: relative; + position: relative; /*+placement:shift 14px 11px;*/ position: relative; left: 14px; From b130e8b3f09a417dd278c59a55bfaaa5836937b4 Mon Sep 17 00:00:00 2001 From: David Grizzanti Date: Fri, 15 Mar 2013 14:29:25 -0400 Subject: [PATCH 013/123] CLOUSTACK-997 improved documentation for assignVirtualMachine API; updated overall description and per parameter descriptions to per bug suggestions --- .../cloudstack/api/command/admin/vm/AssignVMCmd.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/api/src/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java b/api/src/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java index 8a75c66531c..152dd4e14c2 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java @@ -35,7 +35,7 @@ import org.apache.log4j.Logger; import com.cloud.user.Account; import com.cloud.uservm.UserVm; -@APICommand(name = "assignVirtualMachine", description="Move a user VM to another user under same domain.", responseObject=UserVmResponse.class, since="3.0.0") +@APICommand(name = "assignVirtualMachine", description="Assign a VM from one account to another under the same domain. This API is available for Basic zones with security groups and Advance zones with guest networks. The VM is restricted to move between accounts under same domain.", responseObject=UserVmResponse.class, since="3.0.0") public class AssignVMCmd extends BaseCmd { public static final Logger s_logger = Logger.getLogger(AssignVMCmd.class.getName()); @@ -46,7 +46,7 @@ public class AssignVMCmd extends BaseCmd { ///////////////////////////////////////////////////// @Parameter(name=ApiConstants.VIRTUAL_MACHINE_ID, type=CommandType.UUID, entityType=UserVmResponse.class, - required=true, description="the vm ID of the user VM to be moved") + required=true, description="id of the VM to be moved") private Long virtualMachineId; @Parameter(name=ApiConstants.ACCOUNT, type=CommandType.STRING, required=true, description="account name of the new VM owner.") @@ -58,11 +58,11 @@ public class AssignVMCmd extends BaseCmd { //Network information @Parameter(name=ApiConstants.NETWORK_IDS, type=CommandType.LIST, collectionType=CommandType.UUID, entityType=NetworkResponse.class, - description="list of network ids that will be part of VM network after move in advanced network setting.") + description="list of new network ids in which the moved VM will participate. In case no network ids are provided the VM will be part of the default network for that zone. In case there is no network yet created for the new account the default network will be created.") private List networkIds; @Parameter(name=ApiConstants.SECURITY_GROUP_IDS, type=CommandType.LIST, collectionType=CommandType.UUID, entityType=SecurityGroupResponse.class, - description="comma separated list of security groups id that going to be applied to the virtual machine. Should be passed only when vm is moved in a zone with Basic Network support.") + description="list of security group ids to be applied on the virtual machine. In case no security groups are provided the VM is part of the default security group.") private List securityGroupIdList; ///////////////////////////////////////////////////// From f0a77d67cc0776a33e3bd12e4f69a4f15c4b3c36 Mon Sep 17 00:00:00 2001 From: David Nalley Date: Sat, 16 Mar 2013 13:26:24 -0400 Subject: [PATCH 014/123] CLOUDSTACK-337 - first iteration of an agent SELinux policy --- packaging/centos63/cloudstack-agent.te | 33 ++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 packaging/centos63/cloudstack-agent.te diff --git a/packaging/centos63/cloudstack-agent.te b/packaging/centos63/cloudstack-agent.te new file mode 100644 index 00000000000..4259e173a46 --- /dev/null +++ b/packaging/centos63/cloudstack-agent.te @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +module cloudstack-agent 1.0; + +require { + type nfs_t; + type system_conf_t; + type mount_t; + type qemu_t; + class file unlink; + class filesystem getattr; +} + +#============= mount_t ============== +allow mount_t system_conf_t:file unlink; + +#============= qemu_t ============== +allow qemu_t nfs_t:filesystem getattr; From eee720ceaa5339186bc7492f01c45a40aade3e75 Mon Sep 17 00:00:00 2001 From: Chip Childers Date: Sat, 16 Mar 2013 15:04:19 -0400 Subject: [PATCH 015/123] CLOUDSTACK-1702: Changed getId to getUuid in an exception message string. This doesn't resolve the bug in question, but does fix the specific message that was reported as the origin of the bug. Signed-off-by: Chip Childers --- server/src/com/cloud/network/NetworkModelImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/com/cloud/network/NetworkModelImpl.java b/server/src/com/cloud/network/NetworkModelImpl.java index 52089df4d95..779b9f23466 100644 --- a/server/src/com/cloud/network/NetworkModelImpl.java +++ b/server/src/com/cloud/network/NetworkModelImpl.java @@ -1457,11 +1457,11 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { if (network.getGuestType() != Network.GuestType.Shared) { List networkMap = _networksDao.listBy(owner.getId(), network.getId()); if (networkMap == null || networkMap.isEmpty()) { - throw new PermissionDeniedException("Unable to use network with id= " + network.getId() + ", permission denied"); + throw new PermissionDeniedException("Unable to use network with id= " + network.getUuid() + ", permission denied"); } } else { if (!isNetworkAvailableInDomain(network.getId(), owner.getDomainId())) { - throw new PermissionDeniedException("Shared network id=" + network.getId() + " is not available in domain id=" + owner.getDomainId()); + throw new PermissionDeniedException("Shared network id=" + network.getUuid() + " is not available in domain id=" + owner.getDomainId()); } } } From 99ed891804f9cd2c9f8d1df6676034c8af35c371 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Mon, 18 Mar 2013 11:00:39 +0530 Subject: [PATCH 016/123] appliance: Fix readme and zerodisk /home Signed-off-by: Rohit Yadav --- tools/appliance/README.md | 4 ++-- tools/appliance/definitions/systemvmtemplate/zerodisk.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/appliance/README.md b/tools/appliance/README.md index 559f79c6adb..bb28829a366 100644 --- a/tools/appliance/README.md +++ b/tools/appliance/README.md @@ -26,11 +26,11 @@ under the License. export PATH=~/.rvm/bin:$PATH - Install Ruby 1.9.3, if it installed some other version: rvm install 1.9.3 + - Install bundler: (if you get any openssl issue see https://rvm.io/packages/openssl) + gem install bundler All the dependencies will be fetched automatically. -Vagrant: https://github.com/chipchilders/vagrant.git - To save some time if you've downloaded iso of your distro, put the isos in: tools/appliance/iso/ diff --git a/tools/appliance/definitions/systemvmtemplate/zerodisk.sh b/tools/appliance/definitions/systemvmtemplate/zerodisk.sh index 25bd8c4af2d..b00f7ae7ccc 100644 --- a/tools/appliance/definitions/systemvmtemplate/zerodisk.sh +++ b/tools/appliance/definitions/systemvmtemplate/zerodisk.sh @@ -6,7 +6,7 @@ rm -fv .veewee_version .veewee_params .vbox_version echo "Cleaning up" # Zero out the free space to save space in the final image: -for path in / /boot /usr /var /opt /tmp +for path in / /boot /usr /var /opt /tmp /home do dd if=/dev/zero of=$path/zero bs=1M sync From 183b989d30d10943615f7498f42ed7213cadcbd8 Mon Sep 17 00:00:00 2001 From: Rajesh Battala Date: Mon, 18 Mar 2013 14:01:15 +0530 Subject: [PATCH 017/123] CLOUDSTACK-664:AWS-Health Check feature: Patch applied cleanly , RAT build is successful --- .../routing/HealthCheckLBConfigAnswer.java | 42 ++ .../routing/HealthCheckLBConfigCommand.java | 39 + .../cloud/agent/api/to/LoadBalancerTO.java | 114 ++- api/src/com/cloud/event/EventTypes.java | 2 + api/src/com/cloud/network/Network.java | 1 + .../element/LoadBalancingServiceProvider.java | 8 +- .../cloud/network/lb/LoadBalancingRule.java | 84 ++- .../network/lb/LoadBalancingRulesService.java | 32 +- .../network/rules/HealthCheckPolicy.java | 45 ++ .../apache/cloudstack/api/ApiConstants.java | 5 + .../cloudstack/api/ResponseGenerator.java | 7 + .../CreateLBHealthCheckPolicyCmd.java | 168 +++++ .../DeleteLBHealthCheckPolicyCmd.java | 116 +++ .../ListLBHealthCheckPoliciesCmd.java | 85 +++ .../response/LBHealthCheckPolicyResponse.java | 98 +++ .../api/response/LBHealthCheckResponse.java | 102 +++ client/tomcatconf/commands.properties.in | 3 + .../resources/components-example.xml | 2 + .../element/ElasticLoadBalancerElement.java | 7 + .../lb/ElasticLoadBalancerManagerImpl.java | 9 +- .../F5ExternalLoadBalancerElement.java | 8 + .../network/element/NetscalerElement.java | 25 +- .../network/resource/NetscalerResource.java | 291 +++++++- .../src/com/cloud/api/ApiResponseHelper.java | 61 ++ .../src/com/cloud/configuration/Config.java | 6 +- .../ExternalLoadBalancerDeviceManager.java | 7 +- ...ExternalLoadBalancerDeviceManagerImpl.java | 95 ++- .../cloud/network/LBHealthCheckPolicyVO.java | 157 ++++ .../com/cloud/network/NetworkManagerImpl.java | 71 +- .../network/dao/LBHealthCheckPolicyDao.java | 35 + .../dao/LBHealthCheckPolicyDaoImpl.java | 71 ++ .../network/dao/LoadBalancerVMMapVO.java | 16 +- .../network/element/VirtualRouterElement.java | 10 +- .../network/lb/LBHealthCheckManager.java | 24 + .../network/lb/LBHealthCheckManagerImpl.java | 110 +++ .../network/lb/LoadBalancingRulesManager.java | 2 + .../lb/LoadBalancingRulesManagerImpl.java | 680 ++++++++++++++---- .../VirtualNetworkApplianceManagerImpl.java | 9 +- .../cloud/server/ManagementServerImpl.java | 3 + setup/db/db/schema-410to420.sql | 16 + 40 files changed, 2449 insertions(+), 217 deletions(-) create mode 100644 api/src/com/cloud/agent/api/routing/HealthCheckLBConfigAnswer.java create mode 100644 api/src/com/cloud/agent/api/routing/HealthCheckLBConfigCommand.java create mode 100644 api/src/com/cloud/network/rules/HealthCheckPolicy.java create mode 100644 api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBHealthCheckPolicyCmd.java create mode 100644 api/src/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBHealthCheckPolicyCmd.java create mode 100644 api/src/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java create mode 100644 api/src/org/apache/cloudstack/api/response/LBHealthCheckPolicyResponse.java create mode 100644 api/src/org/apache/cloudstack/api/response/LBHealthCheckResponse.java create mode 100644 server/src/com/cloud/network/LBHealthCheckPolicyVO.java create mode 100644 server/src/com/cloud/network/dao/LBHealthCheckPolicyDao.java create mode 100644 server/src/com/cloud/network/dao/LBHealthCheckPolicyDaoImpl.java create mode 100644 server/src/com/cloud/network/lb/LBHealthCheckManager.java create mode 100644 server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java diff --git a/api/src/com/cloud/agent/api/routing/HealthCheckLBConfigAnswer.java b/api/src/com/cloud/agent/api/routing/HealthCheckLBConfigAnswer.java new file mode 100644 index 00000000000..dfca4ab5908 --- /dev/null +++ b/api/src/com/cloud/agent/api/routing/HealthCheckLBConfigAnswer.java @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.routing; + +import java.util.List; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.LoadBalancerTO; +import com.cloud.agent.api.to.NicTO; + +/** + * LoadBalancerConfigCommand sends the load balancer configuration + */ +public class HealthCheckLBConfigAnswer extends Answer { + List loadBalancers; + + protected HealthCheckLBConfigAnswer() { + } + + public HealthCheckLBConfigAnswer(List loadBalancers) { + this.loadBalancers = loadBalancers; + } + + public List getLoadBalancers() { + return loadBalancers; + } + +} diff --git a/api/src/com/cloud/agent/api/routing/HealthCheckLBConfigCommand.java b/api/src/com/cloud/agent/api/routing/HealthCheckLBConfigCommand.java new file mode 100644 index 00000000000..f705f6c9707 --- /dev/null +++ b/api/src/com/cloud/agent/api/routing/HealthCheckLBConfigCommand.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.routing; + +import com.cloud.agent.api.to.LoadBalancerTO; +import com.cloud.agent.api.to.NicTO; + +/** + * LoadBalancerConfigCommand sends the load balancer configuration + */ +public class HealthCheckLBConfigCommand extends NetworkElementCommand { + LoadBalancerTO[] loadBalancers; + + protected HealthCheckLBConfigCommand() { + } + + public HealthCheckLBConfigCommand(LoadBalancerTO[] loadBalancers) { + this.loadBalancers = loadBalancers; + } + + public LoadBalancerTO[] getLoadBalancers() { + return loadBalancers; + } + +} diff --git a/api/src/com/cloud/agent/api/to/LoadBalancerTO.java b/api/src/com/cloud/agent/api/to/LoadBalancerTO.java index 2d166ea1e1e..df2f8a87490 100644 --- a/api/src/com/cloud/agent/api/to/LoadBalancerTO.java +++ b/api/src/com/cloud/agent/api/to/LoadBalancerTO.java @@ -31,6 +31,7 @@ import com.cloud.network.lb.LoadBalancingRule.LbAutoScaleVmGroup; import com.cloud.network.lb.LoadBalancingRule.LbAutoScaleVmProfile; import com.cloud.network.lb.LoadBalancingRule.LbCondition; import com.cloud.network.lb.LoadBalancingRule.LbDestination; +import com.cloud.network.lb.LoadBalancingRule.LbHealthCheckPolicy; import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.utils.Pair; @@ -46,8 +47,10 @@ public class LoadBalancerTO { boolean inline; DestinationTO[] destinations; private StickinessPolicyTO[] stickinessPolicies; + private HealthCheckPolicyTO[] healthCheckPolicies; private AutoScaleVmGroupTO autoScaleVmGroupTO; final static int MAX_STICKINESS_POLICIES = 1; + final static int MAX_HEALTHCHECK_POLICIES = 1; public LoadBalancerTO(String uuid, String srcIp, int srcPort, String protocol, String algorithm, boolean revoked, boolean alreadyAdded, boolean inline, List destinations) { if (destinations == null) { // for autoscaleconfig destinations will be null; @@ -69,23 +72,52 @@ public class LoadBalancerTO { } } - public LoadBalancerTO(String id, String srcIp, int srcPort, String protocol, String algorithm, boolean revoked, boolean alreadyAdded, boolean inline, List arg_destinations, List stickinessPolicies) { + public LoadBalancerTO(String id, String srcIp, int srcPort, String protocol, String algorithm, boolean revoked, + boolean alreadyAdded, boolean inline, List arg_destinations, + List stickinessPolicies) { + + this(id, srcIp, srcPort, protocol, algorithm, revoked, alreadyAdded, inline, arg_destinations, + stickinessPolicies, null); + } + + public LoadBalancerTO(String id, String srcIp, int srcPort, String protocol, String algorithm, boolean revoked, + boolean alreadyAdded, boolean inline, List arg_destinations, + List stickinessPolicies, List healthCheckPolicies) { this(id, srcIp, srcPort, protocol, algorithm, revoked, alreadyAdded, inline, arg_destinations); this.stickinessPolicies = null; + this.healthCheckPolicies = null; if (stickinessPolicies != null && stickinessPolicies.size() > 0) { this.stickinessPolicies = new StickinessPolicyTO[MAX_STICKINESS_POLICIES]; int index = 0; for (LbStickinessPolicy stickinesspolicy : stickinessPolicies) { if (!stickinesspolicy.isRevoked()) { - this.stickinessPolicies[index] = new StickinessPolicyTO(stickinesspolicy.getMethodName(), stickinesspolicy.getParams()); + this.stickinessPolicies[index] = new StickinessPolicyTO(stickinesspolicy.getMethodName(), + stickinesspolicy.getParams()); index++; - if (index == MAX_STICKINESS_POLICIES) break; - } + if (index == MAX_STICKINESS_POLICIES) + break; } - if (index == 0) this.stickinessPolicies = null; } + if (index == 0) + this.stickinessPolicies = null; + } + + if (healthCheckPolicies != null && healthCheckPolicies.size() > 0) { + this.healthCheckPolicies = new HealthCheckPolicyTO[MAX_HEALTHCHECK_POLICIES]; + int index = 0; + for (LbHealthCheckPolicy hcp : healthCheckPolicies) { + this.healthCheckPolicies[0] = new HealthCheckPolicyTO(hcp.getpingpath(), hcp.getDescription(), + hcp.getResponseTime(), hcp.getHealthcheckInterval(), hcp.getHealthcheckThresshold(), + hcp.getUnhealthThresshold(), hcp.isRevoked()); + index++; + if (index == MAX_HEALTHCHECK_POLICIES) + break; } + if (index == 0) + this.healthCheckPolicies = null; + } + } protected LoadBalancerTO() { } @@ -126,6 +158,10 @@ public class LoadBalancerTO { return stickinessPolicies; } + public HealthCheckPolicyTO[] getHealthCheckPolicies() { + return healthCheckPolicies; + } + public DestinationTO[] getDestinations() { return destinations; } @@ -158,6 +194,65 @@ public class LoadBalancerTO { this._methodName = methodName; this._paramsList = paramsList; } + } + + public static class HealthCheckPolicyTO { + private String pingPath; + private String description; + private int responseTime; + private int healthcheckInterval; + private int healthcheckThresshold; + private int unhealthThresshold; + private boolean revoke = false; + + public HealthCheckPolicyTO(String pingPath, String description, int responseTime, int healthcheckInterval, + int healthcheckThresshold, int unhealthThresshold, boolean revoke) { + + this.description = description; + this.pingPath = pingPath; + this.responseTime = responseTime; + this.healthcheckInterval = healthcheckInterval; + this.healthcheckThresshold = healthcheckThresshold; + this.unhealthThresshold = unhealthThresshold; + this.revoke = revoke; + } + + public HealthCheckPolicyTO() { + + } + + public String getpingPath() { + return pingPath; + } + + public String getDescription() { + return description; + } + + public int getResponseTime() { + return responseTime; + } + + public int getHealthcheckInterval() { + return healthcheckInterval; + } + + public int getHealthcheckThresshold() { + return healthcheckThresshold; + } + + public int getUnhealthThresshold() { + return unhealthThresshold; + } + + public void setRevoke(boolean revoke) { + this.revoke = revoke; + } + + public boolean isRevoked() { + return revoke; + } + } public static class DestinationTO { @@ -165,6 +260,7 @@ public class LoadBalancerTO { int destPort; boolean revoked; boolean alreadyAdded; + String monitorState; public DestinationTO(String destIp, int destPort, boolean revoked, boolean alreadyAdded) { this.destIp = destIp; this.destPort = destPort; @@ -190,6 +286,14 @@ public class LoadBalancerTO { public boolean isAlreadyAdded() { return alreadyAdded; } + + public void setMonitorState(String state) { + this.monitorState = state; + } + + public String getMonitorState() { + return monitorState; + } } public static class CounterTO implements Serializable { private final String name; diff --git a/api/src/com/cloud/event/EventTypes.java b/api/src/com/cloud/event/EventTypes.java index 0087edca743..f38865c9e34 100755 --- a/api/src/com/cloud/event/EventTypes.java +++ b/api/src/com/cloud/event/EventTypes.java @@ -110,6 +110,8 @@ public class EventTypes { public static final String EVENT_LOAD_BALANCER_DELETE = "LB.DELETE"; public static final String EVENT_LB_STICKINESSPOLICY_CREATE = "LB.STICKINESSPOLICY.CREATE"; public static final String EVENT_LB_STICKINESSPOLICY_DELETE = "LB.STICKINESSPOLICY.DELETE"; + public static final String EVENT_LB_HEALTHCHECKPOLICY_CREATE = "LB.HEALTHCHECKPOLICY.CREATE"; + public static final String EVENT_LB_HEALTHCHECKPOLICY_DELETE = "LB.HEALTHCHECKPOLICY.DELETE"; public static final String EVENT_LOAD_BALANCER_UPDATE = "LB.UPDATE"; // Account events diff --git a/api/src/com/cloud/network/Network.java b/api/src/com/cloud/network/Network.java index efed5cd4f8b..89dac686d0f 100644 --- a/api/src/com/cloud/network/Network.java +++ b/api/src/com/cloud/network/Network.java @@ -188,6 +188,7 @@ public interface Network extends ControlledEntity, StateObject, I public static final Capability InlineMode = new Capability("InlineMode"); public static final Capability SupportedTrafficDirection = new Capability("SupportedTrafficDirection"); public static final Capability SupportedEgressProtocols = new Capability("SupportedEgressProtocols"); + public static final Capability HealthCheckPolicy = new Capability("HealthCheckPolicy"); private String name; diff --git a/api/src/com/cloud/network/element/LoadBalancingServiceProvider.java b/api/src/com/cloud/network/element/LoadBalancingServiceProvider.java index 879ea0ed663..cb3155f9c05 100644 --- a/api/src/com/cloud/network/element/LoadBalancingServiceProvider.java +++ b/api/src/com/cloud/network/element/LoadBalancingServiceProvider.java @@ -18,6 +18,7 @@ package com.cloud.network.element; import java.util.List; +import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.Network; import com.cloud.network.lb.LoadBalancingRule; @@ -25,6 +26,7 @@ import com.cloud.network.lb.LoadBalancingRule; public interface LoadBalancingServiceProvider extends NetworkElement, IpDeployingRequester { /** * Apply rules + * * @param network * @param rules * @return @@ -34,10 +36,14 @@ public interface LoadBalancingServiceProvider extends NetworkElement, IpDeployin /** * Validate rules + * * @param network * @param rule - * @return true/false. true should be return if there are no validations. false should be return if any oneof the validation fails. + * @return true/false. true should be return if there are no validations. + *false should be return if any oneof the validation fails. * @throws */ boolean validateLBRule(Network network, LoadBalancingRule rule); + + List updateHealthChecks(Network network, List lbrules); } diff --git a/api/src/com/cloud/network/lb/LoadBalancingRule.java b/api/src/com/cloud/network/lb/LoadBalancingRule.java index fb1d988a4de..84526c5ea45 100644 --- a/api/src/com/cloud/network/lb/LoadBalancingRule.java +++ b/api/src/com/cloud/network/lb/LoadBalancingRule.java @@ -32,11 +32,14 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { private List destinations; private List stickinessPolicies; private LbAutoScaleVmGroup autoScaleVmGroup; + private List healthCheckPolicies; - public LoadBalancingRule(LoadBalancer lb, List destinations, List stickinessPolicies) { + public LoadBalancingRule(LoadBalancer lb, List destinations, + List stickinessPolicies, List healthCheckPolicies) { this.lb = lb; this.destinations = destinations; this.stickinessPolicies = stickinessPolicies; + this.healthCheckPolicies = healthCheckPolicies; } @Override @@ -136,11 +139,17 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { return stickinessPolicies; } + public List getHealthCheckPolicies() { + return healthCheckPolicies; + } public interface Destination { String getIpAddress(); + int getDestinationPortStart(); + int getDestinationPortEnd(); + boolean isRevoked(); } @@ -174,6 +183,64 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { } } + public static class LbHealthCheckPolicy { + private String pingpath; + private String description; + private int responseTime; + private int healthcheckInterval; + private int healthcheckThresshold; + private int unhealthThresshold; + private boolean _revoke; + + public LbHealthCheckPolicy(String pingpath, String description, int responseTime, int healthcheckInterval, + int healthcheckThresshold, int unhealthThresshold) { + this(pingpath, description, responseTime, healthcheckInterval, healthcheckThresshold, unhealthThresshold, false); + } + + public LbHealthCheckPolicy(String pingpath, String description, int responseTime, int healthcheckInterval, + int healthcheckThresshold, int unhealthThresshold, boolean revoke) { + this.pingpath = pingpath; + this.description = description; + this.responseTime = responseTime; + this.healthcheckInterval = healthcheckInterval; + this.healthcheckThresshold = healthcheckThresshold; + this.unhealthThresshold = unhealthThresshold; + this._revoke = revoke; + } + + public LbHealthCheckPolicy() { + } + + public String getpingpath() { + return pingpath; + } + + public String getDescription() { + return description; + } + + public int getResponseTime() { + return responseTime; + } + + public int getHealthcheckInterval() { + return healthcheckInterval; + } + + public int getHealthcheckThresshold() { + return healthcheckThresshold; + } + + public int getUnhealthThresshold() { + return unhealthThresshold; + } + + public boolean isRevoked() { + return _revoke; + } + + } + public static class LbDestination implements Destination { private int portStart; private int portEnd; @@ -191,10 +258,12 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { public String getIpAddress() { return ip; } + @Override public int getDestinationPortStart() { return portStart; } + @Override public int getDestinationPortEnd() { return portEnd; @@ -230,15 +299,16 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { return null; } - @Override public TrafficType getTrafficType() { return null; } + @Override public FirewallRuleType getType() { return FirewallRuleType.User; } + public LbAutoScaleVmGroup getAutoScaleVmGroup() { return autoScaleVmGroup; } @@ -274,8 +344,7 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { private final AutoScalePolicy policy; private boolean revoked; - public LbAutoScalePolicy(AutoScalePolicy policy, List conditions) - { + public LbAutoScalePolicy(AutoScalePolicy policy, List conditions) { this.policy = policy; this.conditions = conditions; } @@ -309,7 +378,9 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { private final String networkId; private final String vmName; - public LbAutoScaleVmProfile(AutoScaleVmProfile profile, String autoScaleUserApiKey, String autoScaleUserSecretKey, String csUrl, String zoneId, String domainId, String serviceOfferingId, String templateId, String vmName, String networkId) { + public LbAutoScaleVmProfile(AutoScaleVmProfile profile, String autoScaleUserApiKey, + String autoScaleUserSecretKey, String csUrl, String zoneId, String domainId, String serviceOfferingId, + String templateId, String vmName, String networkId) { this.profile = profile; this.autoScaleUserApiKey = autoScaleUserApiKey; this.autoScaleUserSecretKey = autoScaleUserSecretKey; @@ -369,7 +440,8 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { private final LbAutoScaleVmProfile profile; private final String currentState; - public LbAutoScaleVmGroup(AutoScaleVmGroup vmGroup, List policies, LbAutoScaleVmProfile profile, String currentState) { + public LbAutoScaleVmGroup(AutoScaleVmGroup vmGroup, List policies, + LbAutoScaleVmProfile profile, String currentState) { this.vmGroup = vmGroup; this.policies = policies; this.profile = profile; diff --git a/api/src/com/cloud/network/lb/LoadBalancingRulesService.java b/api/src/com/cloud/network/lb/LoadBalancingRulesService.java index 3743aae4bf8..ed39bedaa6f 100644 --- a/api/src/com/cloud/network/lb/LoadBalancingRulesService.java +++ b/api/src/com/cloud/network/lb/LoadBalancingRulesService.java @@ -18,8 +18,10 @@ package com.cloud.network.lb; import java.util.List; +import org.apache.cloudstack.api.command.user.loadbalancer.CreateLBHealthCheckPolicyCmd; import org.apache.cloudstack.api.command.user.loadbalancer.CreateLBStickinessPolicyCmd; import org.apache.cloudstack.api.command.user.loadbalancer.CreateLoadBalancerRuleCmd; +import org.apache.cloudstack.api.command.user.loadbalancer.ListLBHealthCheckPoliciesCmd; import org.apache.cloudstack.api.command.user.loadbalancer.ListLBStickinessPoliciesCmd; import org.apache.cloudstack.api.command.user.loadbalancer.ListLoadBalancerRuleInstancesCmd; import org.apache.cloudstack.api.command.user.loadbalancer.ListLoadBalancerRulesCmd; @@ -28,6 +30,8 @@ import org.apache.cloudstack.api.command.user.loadbalancer.UpdateLoadBalancerRul import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; +import com.cloud.network.rules.HealthCheckPolicy; import com.cloud.network.rules.LoadBalancer; import com.cloud.network.rules.StickinessPolicy; import com.cloud.uservm.UserVm; @@ -66,6 +70,22 @@ public interface LoadBalancingRulesService { public boolean applyLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd) throws ResourceUnavailableException; boolean deleteLBStickinessPolicy(long stickinessPolicyId, boolean apply); + + /** + * Create a healthcheck policy to a load balancer from the given healthcheck + * parameters in (name,value) pairs. + * + * @param cmd + * the command specifying the stickiness method name, params + * (name,value pairs), policy name and description. + * @return the newly created stickiness policy if successfull, null + * otherwise + * @thows NetworkRuleConflictException + */ + public HealthCheckPolicy createLBHealthCheckPolicy(CreateLBHealthCheckPolicyCmd cmd); + public boolean applyLBHealthCheckPolicy(CreateLBHealthCheckPolicyCmd cmd) throws ResourceUnavailableException; + boolean deleteLBHealthCheckPolicy(long healthCheckPolicyId, boolean apply); + /** * Assign a virtual machine, or list of virtual machines, to a load balancer. */ @@ -104,8 +124,18 @@ public interface LoadBalancingRulesService { */ List searchForLBStickinessPolicies(ListLBStickinessPoliciesCmd cmd); + /** + * List healthcheck policies based on the given criteria + * + * @param cmd + * the command specifies the load balancing rule id. + * @return list of healthcheck policies that match the criteria. + */ + + List searchForLBHealthCheckPolicies(ListLBHealthCheckPoliciesCmd cmd); + List listByNetworkId(long networkId); LoadBalancer findById(long LoadBalancer); - + public void updateLBHealthChecks() throws ResourceUnavailableException; } diff --git a/api/src/com/cloud/network/rules/HealthCheckPolicy.java b/api/src/com/cloud/network/rules/HealthCheckPolicy.java new file mode 100644 index 00000000000..96bb28204a2 --- /dev/null +++ b/api/src/com/cloud/network/rules/HealthCheckPolicy.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.rules; + +import java.util.List; + +import com.cloud.utils.Pair; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +/** + */ +public interface HealthCheckPolicy extends InternalIdentity, Identity { + + public long getLoadBalancerId(); + + public String getpingpath(); + + public String getDescription(); + + public int getResponseTime(); + + public int getHealthcheckInterval(); + + public int getHealthcheckThresshold(); + + public int getUnhealthThresshold(); + + public boolean isRevoke(); + +} diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index b40b26ce57c..f4c6c527d1a 100755 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -460,6 +460,11 @@ public class ApiConstants { public static final String UCS_BLADE_ID = "bladeid"; public static final String VM_GUEST_IP = "vmguestip"; public static final String OLDER_THAN = "olderthan"; + public static final String HEALTHCHECK_RESPONSE_TIMEOUT = "responsetimeout"; + public static final String HEALTHCHECK_INTERVAL_TIME = "intervaltime"; + public static final String HEALTHCHECK_HEALTHY_THRESHOLD = "healthythreshold"; + public static final String HEALTHCHECK_UNHEALTHY_THRESHOLD = "unhealthythreshold"; + public static final String HEALTHCHECK_PINGPATH = "pingpath"; public enum HostDetails { all, capacity, events, stats, min; diff --git a/api/src/org/apache/cloudstack/api/ResponseGenerator.java b/api/src/org/apache/cloudstack/api/ResponseGenerator.java index a6025149846..628a185e93d 100644 --- a/api/src/org/apache/cloudstack/api/ResponseGenerator.java +++ b/api/src/org/apache/cloudstack/api/ResponseGenerator.java @@ -47,6 +47,7 @@ import org.apache.cloudstack.api.response.HypervisorCapabilitiesResponse; import org.apache.cloudstack.api.response.IPAddressResponse; import org.apache.cloudstack.api.response.InstanceGroupResponse; import org.apache.cloudstack.api.response.IpForwardingRuleResponse; +import org.apache.cloudstack.api.response.LBHealthCheckResponse; import org.apache.cloudstack.api.response.LBStickinessResponse; import org.apache.cloudstack.api.response.LDAPConfigResponse; import org.apache.cloudstack.api.response.LoadBalancerResponse; @@ -133,6 +134,7 @@ import com.cloud.network.as.Condition; import com.cloud.network.as.Counter; import com.cloud.network.router.VirtualRouter; import com.cloud.network.rules.FirewallRule; +import com.cloud.network.rules.HealthCheckPolicy; import com.cloud.network.rules.LoadBalancer; import com.cloud.network.rules.PortForwardingRule; import com.cloud.network.rules.StaticNatRule; @@ -213,6 +215,11 @@ public interface ResponseGenerator { LBStickinessResponse createLBStickinessPolicyResponse(StickinessPolicy stickinessPolicy, LoadBalancer lb); + LBHealthCheckResponse createLBHealthCheckPolicyResponse(List healthcheckPolicies, + LoadBalancer lb); + + LBHealthCheckResponse createLBHealthCheckPolicyResponse(HealthCheckPolicy healthcheckPolicy, LoadBalancer lb); + PodResponse createPodResponse(Pod pod, Boolean showCapacities); ZoneResponse createZoneResponse(DataCenter dataCenter, Boolean showCapacities); diff --git a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBHealthCheckPolicyCmd.java b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBHealthCheckPolicyCmd.java new file mode 100644 index 00000000000..ac0ec3a9dab --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBHealthCheckPolicyCmd.java @@ -0,0 +1,168 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.loadbalancer; + + +import org.apache.cloudstack.api.response.FirewallRuleResponse; +import org.apache.log4j.Logger; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCreateCmd; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.rules.HealthCheckPolicy; +import org.apache.cloudstack.api.response.LBHealthCheckResponse; +import com.cloud.network.rules.LoadBalancer; +import com.cloud.user.Account; +import com.cloud.user.UserContext; + + +@APICommand(name = "createLBHealthCheckPolicy", description = "Creates a Load Balancer healthcheck policy ", responseObject = LBHealthCheckResponse.class, since="4.2.0") +@SuppressWarnings("rawtypes") +public class CreateLBHealthCheckPolicyCmd extends BaseAsyncCreateCmd { + public static final Logger s_logger = Logger + .getLogger(CreateLBHealthCheckPolicyCmd.class.getName()); + + private static final String s_name = "createlbhealthcheckpolicyresponse"; + + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.LBID, type = CommandType.UUID, entityType = FirewallRuleResponse.class, required = true, description = "the ID of the load balancer rule") + private Long lbRuleId; + + @Parameter(name = ApiConstants.DESCRIPTION, type = CommandType.STRING, description = "the description of the load balancer HealthCheck policy") + private String description; + + @Parameter(name = ApiConstants.HEALTHCHECK_PINGPATH, type = CommandType.STRING, required = false, description = "HTTP Ping Path") + private String pingPath; + + @Parameter(name = ApiConstants.HEALTHCHECK_RESPONSE_TIMEOUT, type = CommandType.INTEGER, required = false, description = "Time to wait when receiving a response from the health check (2sec - 60 sec)") + private int responsTimeOut; + + @Parameter(name = ApiConstants.HEALTHCHECK_INTERVAL_TIME, type = CommandType.INTEGER, required = false, description = "Amount of time between health checks (1 sec - 20940 sec)") + private int healthCheckInterval; + + @Parameter(name = ApiConstants.HEALTHCHECK_HEALTHY_THRESHOLD, type = CommandType.INTEGER, required = false, description = "Number of consecutive health check success before declaring an instance healthy") + private int healthyThreshold; + + @Parameter(name = ApiConstants.HEALTHCHECK_UNHEALTHY_THRESHOLD, type = CommandType.INTEGER, required = false, description = "Number of consecutive health check failures before declaring an instance unhealthy") + private int unhealthyThreshold; + + // /////////////////////////////////////////////////// + // ///////////////// Accessors /////////////////////// + // /////////////////////////////////////////////////// + + public Long getLbRuleId() { + return lbRuleId; + } + + public String getDescription() { + return description; + } + + public String getPingPath() { + return pingPath; + } + + // /////////////////////////////////////////////////// + // ///////////// API Implementation/////////////////// + // /////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + Account account = UserContext.current().getCaller(); + if (account != null) { + return account.getId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } + + public int getResponsTimeOut() { + return responsTimeOut; + } + + public int getHealthCheckInterval() { + return healthCheckInterval; + } + + public int getHealthyThreshold() { + return healthyThreshold; + } + + public int getUnhealthyThreshold() { + return unhealthyThreshold; + } + + @Override + public void execute() throws ResourceAllocationException, ResourceUnavailableException { + HealthCheckPolicy policy = null; + boolean success = false; + + try { + UserContext.current().setEventDetails("Load balancer healthcheck policy Id : " + getEntityId()); + success = _lbService.applyLBHealthCheckPolicy(this); + if (success) { + // State might be different after the rule is applied, so get new object here + policy = _entityMgr.findById(HealthCheckPolicy.class, getEntityId()); + LoadBalancer lb = _lbService.findById(policy.getLoadBalancerId()); + LBHealthCheckResponse hcResponse = _responseGenerator.createLBHealthCheckPolicyResponse(policy, lb); + setResponseObject(hcResponse); + hcResponse.setResponseName(getCommandName()); + } + } finally { + if (!success || (policy == null)) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create healthcheck policy "); + } + } + } + + @Override + public void create() { + try { + HealthCheckPolicy result = _lbService.createLBHealthCheckPolicy(this); + this.setEntityId(result.getId()); + this.setEntityUuid(result.getUuid()); + } catch (InvalidParameterValueException e) { + s_logger.warn("Exception: ", e); + throw new ServerApiException(ApiErrorCode.MALFORMED_PARAMETER_ERROR , e.getMessage()); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_LB_HEALTHCHECKPOLICY_CREATE; + } + + @Override + public String getEventDescription() { + return "Create Load Balancer HealthCheck policy"; + } +} diff --git a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBHealthCheckPolicyCmd.java b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBHealthCheckPolicyCmd.java new file mode 100644 index 00000000000..bf91da51d68 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBHealthCheckPolicyCmd.java @@ -0,0 +1,116 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.loadbalancer; + +import org.apache.cloudstack.api.response.LBHealthCheckResponse; +import org.apache.log4j.Logger; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.SuccessResponse; +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.network.rules.HealthCheckPolicy; +import com.cloud.network.rules.LoadBalancer; +import com.cloud.user.Account; +import com.cloud.user.UserContext; + +@APICommand(name = "deleteLBHealthCheckPolicy", description = "Deletes a load balancer HealthCheck policy.", responseObject = SuccessResponse.class, since="4.2.0") +public class DeleteLBHealthCheckPolicyCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(DeleteLBHealthCheckPolicyCmd.class.getName()); + private static final String s_name = "deletelbhealthcheckpolicyresponse"; + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = LBHealthCheckResponse.class, + required = true, description = "the ID of the load balancer HealthCheck policy") + private Long id; + + // /////////////////////////////////////////////////// + // ///////////////// Accessors /////////////////////// + // /////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + // /////////////////////////////////////////////////// + // ///////////// API Implementation/////////////////// + // /////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + Account account = UserContext.current().getCaller(); + if (account != null) { + return account.getId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } + + @Override + public String getEventType() { + return EventTypes.EVENT_LB_HEALTHCHECKPOLICY_DELETE; + } + + @Override + public String getEventDescription() { + return "deleting load balancer HealthCheck policy: " + getId(); + } + + @Override + public void execute() { + UserContext.current().setEventDetails("Load balancer healthcheck policy Id: " + getId()); + boolean result = _lbService.deleteLBHealthCheckPolicy(getId() , true); + + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete load balancer healthcheck policy"); + } + } + + @Override + public String getSyncObjType() { + return BaseAsyncCmd.networkSyncObject; + } + + @Override + public Long getSyncObjId() { + HealthCheckPolicy policy = _entityMgr.findById(HealthCheckPolicy.class, + getId()); + if (policy == null) { + throw new InvalidParameterValueException("Unable to find load balancer healthcheck rule: " + id); + } + LoadBalancer lb = _lbService.findById(policy.getLoadBalancerId()); + if (lb == null) { + throw new InvalidParameterValueException("Unable to find load balancer rule for healthcheck rule: " + id); + } + return lb.getNetworkId(); + } +} diff --git a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java new file mode 100644 index 00000000000..cf5ea3238b8 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.loadbalancer; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.response.FirewallRuleResponse; +import org.apache.log4j.Logger; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.LBHealthCheckResponse; +import org.apache.cloudstack.api.response.LBStickinessResponse; +import org.apache.cloudstack.api.response.ListResponse; + +import com.cloud.network.rules.HealthCheckPolicy; +import com.cloud.network.rules.LoadBalancer; +import com.cloud.user.Account; +import com.cloud.user.UserContext; + +@APICommand(name = "listLBHealthCheckPolicies", description = "Lists load balancer HealthCheck policies.", responseObject = LBHealthCheckResponse.class, since="4.2.0") +public class ListLBHealthCheckPoliciesCmd extends BaseListCmd { + public static final Logger s_logger = Logger + .getLogger(ListLBHealthCheckPoliciesCmd.class.getName()); + + private static final String s_name = "listlbhealthcheckpoliciesresponse"; + + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + @Parameter(name = ApiConstants.LBID, type = CommandType.UUID, entityType = FirewallRuleResponse.class, + required = true, description = "the ID of the load balancer rule") + private Long lbRuleId; + + // /////////////////////////////////////////////////// + // ///////////////// Accessors /////////////////////// + // /////////////////////////////////////////////////// + public Long getLbRuleId() { + return lbRuleId; + } + + // /////////////////////////////////////////////////// + // ///////////// API Implementation/////////////////// + // /////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public void execute() { + List hcpResponses = new ArrayList(); + LoadBalancer lb = _lbService.findById(getLbRuleId()); + ListResponse response = new ListResponse(); + + if (lb != null) { + List healthCheckPolicies = _lbService.searchForLBHealthCheckPolicies(this); + LBHealthCheckResponse spResponse = _responseGenerator.createLBHealthCheckPolicyResponse(healthCheckPolicies, lb); + hcpResponses.add(spResponse); + response.setResponses(hcpResponses); + } + + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } + +} diff --git a/api/src/org/apache/cloudstack/api/response/LBHealthCheckPolicyResponse.java b/api/src/org/apache/cloudstack/api/response/LBHealthCheckPolicyResponse.java new file mode 100644 index 00000000000..5dd123c03a3 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/response/LBHealthCheckPolicyResponse.java @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import com.cloud.network.rules.HealthCheckPolicy; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.BaseResponse; + +public class LBHealthCheckPolicyResponse extends BaseResponse { + @SerializedName("id") + @Param(description = "the LB HealthCheck policy ID") + private String id; + + @SerializedName("pingpath") + @Param(description = "the pingpath of the healthcheck policy") + private String pingpath; + + @SerializedName("description") + @Param(description = "the description of the healthcheck policy") + private String description; + + @SerializedName("state") + @Param(description = "the state of the policy") + private String state; + + @SerializedName("responsetime") + @Param(description = "Time to wait when receiving a response from the health check") + private int responseTime; + + @SerializedName("healthcheckinterval") + @Param(description = "Amount of time between health checks") + private int healthcheckInterval; + + @SerializedName("healthcheckthresshold") + @Param(description = "Number of consecutive health check success before declaring an instance healthy") + private int healthcheckthresshold; + + @SerializedName("unhealthcheckthresshold") + @Param(description = "Number of consecutive health check failures before declaring an instance unhealthy.") + private int unhealthcheckthresshold; + + public void setId(String id) { + this.id = id; + } + + public String getpingpath() { + return pingpath; + } + + public void setpingpath(String pingpath) { + this.pingpath = pingpath; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } + + public LBHealthCheckPolicyResponse(HealthCheckPolicy healthcheckpolicy) { + if (healthcheckpolicy.isRevoke()) { + this.setState("Revoked"); + } + if (healthcheckpolicy.getUuid() != null) + setId(healthcheckpolicy.getUuid()); + this.pingpath = healthcheckpolicy.getpingpath(); + this.healthcheckInterval = healthcheckpolicy.getHealthcheckInterval(); + this.responseTime = healthcheckpolicy.getResponseTime(); + this.healthcheckthresshold = healthcheckpolicy.getHealthcheckThresshold(); + this.unhealthcheckthresshold = healthcheckpolicy.getUnhealthThresshold(); + setObjectName("healthcheckpolicy"); + } +} diff --git a/api/src/org/apache/cloudstack/api/response/LBHealthCheckResponse.java b/api/src/org/apache/cloudstack/api/response/LBHealthCheckResponse.java new file mode 100644 index 00000000000..182013ffef0 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/response/LBHealthCheckResponse.java @@ -0,0 +1,102 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import com.cloud.network.rules.HealthCheckPolicy; +import org.apache.cloudstack.api.ApiConstants; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import java.util.List; +import java.util.UUID; + +@EntityReference(value=HealthCheckPolicy.class) +public class LBHealthCheckResponse extends BaseResponse { +@SerializedName("lbruleid") + @Param(description = "the LB rule ID") + private String lbRuleId; + + + @SerializedName("account") + @Param(description = "the account of the HealthCheck policy") + private String accountName; + + @SerializedName(ApiConstants.DOMAIN_ID) + @Param(description = "the domain ID of the HealthCheck policy") + private String domainId; + + @SerializedName("domain") + @Param(description = "the domain of the HealthCheck policy") + private String domainName; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "the id of the zone the HealthCheck policy belongs to") + private String zoneId; + + @SerializedName("healthcheckpolicy") + @Param(description = "the list of healthcheckpolicies", responseObject = LBHealthCheckPolicyResponse.class) + private List healthCheckPolicies; + + public void setlbRuleId(String lbRuleId) { + this.lbRuleId = lbRuleId; + } + + public void setRules(List policies) { + this.healthCheckPolicies = policies; + } + + public List getHealthCheckPolicies() { + return healthCheckPolicies; + } + + public void setHealthCheckPolicies(List healthCheckPolicies) { + this.healthCheckPolicies = healthCheckPolicies; + } + + public String getAccountName() { + return accountName; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public String getDomainName() { + return domainName; + } + + public void setDomainName(String domainName) { + this.domainName = domainName; + } + + public LBHealthCheckResponse() { + } + + public LBHealthCheckResponse(HealthCheckPolicy healthcheckpolicy) { + setObjectName("healthcheckpolicy"); + } +} diff --git a/client/tomcatconf/commands.properties.in b/client/tomcatconf/commands.properties.in index 5018236e5e2..382573b864c 100644 --- a/client/tomcatconf/commands.properties.in +++ b/client/tomcatconf/commands.properties.in @@ -151,6 +151,9 @@ createLBStickinessPolicy=15 deleteLBStickinessPolicy=15 listLoadBalancerRules=15 listLBStickinessPolicies=15 +listLBHealthCheckPolicies=15 +createLBHealthCheckPolicy=15 +deleteLBHealthCheckPolicy=15 listLoadBalancerRuleInstances=15 updateLoadBalancerRule=15 diff --git a/plugins/network-elements/dns-notifier/resources/components-example.xml b/plugins/network-elements/dns-notifier/resources/components-example.xml index 36441bd667b..3a92a258e9f 100755 --- a/plugins/network-elements/dns-notifier/resources/components-example.xml +++ b/plugins/network-elements/dns-notifier/resources/components-example.xml @@ -155,6 +155,8 @@ under the License. + + diff --git a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java index abb36c36963..bebba3cb09d 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java +++ b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java @@ -28,6 +28,7 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.deploy.DeployDestination; @@ -197,4 +198,10 @@ public class ElasticLoadBalancerElement extends AdapterBase implements LoadBalan public IpDeployer getIpDeployer(Network network) { return this; } + + @Override + public List updateHealthChecks(Network network, List lbrules) { + return null; + } + } diff --git a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java index 81039d1f3c7..283b517dce9 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java +++ b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java @@ -94,6 +94,7 @@ import com.cloud.network.dao.NetworkVO; import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; import com.cloud.network.dao.VirtualRouterProviderDao; import com.cloud.network.lb.LoadBalancingRule.LbDestination; +import com.cloud.network.lb.LoadBalancingRule.LbHealthCheckPolicy; import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.network.lb.dao.ElasticLbVmMapDao; import com.cloud.network.router.VirtualRouter; @@ -367,9 +368,10 @@ ElasticLoadBalancerManager, VirtualMachineGuru { for (LoadBalancerVO lb : lbs) { List dstList = _lbMgr.getExistingDestinations(lb.getId()); List policyList = _lbMgr.getStickinessPolicies(lb.getId()); + List hcPolicyList = _lbMgr.getHealthCheckPolicies(lb.getId()); LoadBalancingRule loadBalancing = new LoadBalancingRule( - lb, dstList, policyList); - lbRules.add(loadBalancing); + lb, dstList, policyList, hcPolicyList); + lbRules.add(loadBalancing); } return applyLBRules(elbVm, lbRules, network.getId()); } else if (elbVm.getState() == State.Stopped @@ -940,7 +942,8 @@ ElasticLoadBalancerManager, VirtualMachineGuru { for (LoadBalancerVO lb : lbs) { List dstList = _lbMgr.getExistingDestinations(lb.getId()); List policyList = _lbMgr.getStickinessPolicies(lb.getId()); - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList); + List hcPolicyList = _lbMgr.getHealthCheckPolicies(lb.getId()); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList, hcPolicyList); lbRules.add(loadBalancing); } diff --git a/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java b/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java index 94c098ed4bb..3e75c3f1afe 100644 --- a/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java +++ b/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.api.response.ExternalLoadBalancerResponse; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import org.apache.log4j.Logger; +import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.api.ApiDBUtils; import com.cloud.api.commands.AddExternalLoadBalancerCmd; import com.cloud.api.commands.AddF5LoadBalancerCmd; @@ -496,4 +497,11 @@ public class F5ExternalLoadBalancerElement extends ExternalLoadBalancerDeviceMan } return this; } + + @Override + public List updateHealthChecks(Network network, + List lbrules) { + // TODO Auto-generated method stub + return null; + } } diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java index c1c735aa270..a90440cc2f3 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java @@ -292,7 +292,8 @@ StaticNatServiceProvider { lbCapabilities.put(Capability.SupportedStickinessMethods, stickyMethodList); lbCapabilities.put(Capability.ElasticLb, "true"); - + //Setting HealthCheck Capability to True for Netscaler element + lbCapabilities.put(Capability.HealthCheckPolicy, "true"); capabilities.put(Service.Lb, lbCapabilities); Map staticNatCapabilities = new HashMap(); @@ -814,4 +815,26 @@ StaticNatServiceProvider { } return null; } + + @Override + public List updateHealthChecks(Network network, List lbrules) { + + if (canHandle(network, Service.Lb)) { + try { + return getLBHealthChecks(network, lbrules); + } catch (ResourceUnavailableException e) { + s_logger.error("Error in getting the LB Rules from NetScaler " + e); + } + } else { + s_logger.error("Network cannot handle to LB service "); + } + return null; + } + + @Override + public List getLBHealthChecks(Network network, List rules) + throws ResourceUnavailableException { + return super.getLBHealthChecks(network, rules); + + } } diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java b/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java index abea4649dbe..4eb0ce2065d 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java @@ -11,11 +11,12 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.network.resource; +import java.util.ArrayList; import java.util.Formatter; import java.util.HashMap; import java.util.List; @@ -31,6 +32,7 @@ import com.citrix.netscaler.nitro.resource.base.base_response; import com.citrix.netscaler.nitro.resource.config.autoscale.autoscalepolicy; import com.citrix.netscaler.nitro.resource.config.autoscale.autoscaleprofile; import com.citrix.netscaler.nitro.resource.config.basic.server_service_binding; +import com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding; import com.citrix.netscaler.nitro.resource.config.basic.servicegroup; import com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding; import com.citrix.netscaler.nitro.resource.config.lb.lbmetrictable; @@ -71,6 +73,8 @@ import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupExternalLoadBalancerCommand; import com.cloud.agent.api.routing.CreateLoadBalancerApplianceCommand; import com.cloud.agent.api.routing.DestroyLoadBalancerApplianceCommand; +import com.cloud.agent.api.routing.HealthCheckLBConfigAnswer; +import com.cloud.agent.api.routing.HealthCheckLBConfigCommand; import com.cloud.agent.api.routing.IpAssocAnswer; import com.cloud.agent.api.routing.IpAssocCommand; import com.cloud.agent.api.routing.LoadBalancerConfigCommand; @@ -84,6 +88,7 @@ import com.cloud.agent.api.to.LoadBalancerTO.AutoScaleVmProfileTO; import com.cloud.agent.api.to.LoadBalancerTO.ConditionTO; import com.cloud.agent.api.to.LoadBalancerTO.CounterTO; import com.cloud.agent.api.to.LoadBalancerTO.DestinationTO; +import com.cloud.agent.api.to.LoadBalancerTO.HealthCheckPolicyTO; import com.cloud.agent.api.to.LoadBalancerTO.StickinessPolicyTO; import com.cloud.agent.api.to.StaticNatRuleTO; import org.apache.cloudstack.api.ApiConstants; @@ -396,12 +401,14 @@ public class NetscalerResource implements ServerResource { return execute((DestroyLoadBalancerApplianceCommand) cmd, numRetries); } else if (cmd instanceof SetStaticNatRulesCommand) { return execute((SetStaticNatRulesCommand) cmd, numRetries); - } else { + } else if (cmd instanceof HealthCheckLBConfigCommand) { + return execute((HealthCheckLBConfigCommand) cmd, numRetries); + }else { return Answer.createUnsupportedCommandAnswer(cmd); } } - private Answer execute(ReadyCommand cmd) { + private Answer execute(ReadyCommand cmd) { return new ReadyAnswer(cmd); } @@ -450,6 +457,65 @@ public class NetscalerResource implements ServerResource { return new IpAssocAnswer(cmd, results); } + private Answer execute(HealthCheckLBConfigCommand cmd, int numRetries) { + + List hcLB = new ArrayList(); + try { + + if (_isSdx) { + return Answer.createUnsupportedCommandAnswer(cmd); + } + + LoadBalancerTO[] loadBalancers = cmd.getLoadBalancers(); + + if (loadBalancers == null) { + return new HealthCheckLBConfigAnswer(hcLB); + } + + for (LoadBalancerTO loadBalancer : loadBalancers) { + HealthCheckPolicyTO[] healthCheckPolicies = loadBalancer.getHealthCheckPolicies(); + if ((healthCheckPolicies != null) && (healthCheckPolicies.length > 0) + && (healthCheckPolicies[0] != null)) { + String nsVirtualServerName = generateNSVirtualServerName(loadBalancer.getSrcIp(), + loadBalancer.getSrcPort()); + + com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding[] serviceBindings = com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding + .get(_netscalerService, nsVirtualServerName); + + if (serviceBindings != null) { + for (DestinationTO destination : loadBalancer.getDestinations()) { + String nsServiceName = generateNSServiceName(destination.getDestIp(), + destination.getDestPort()); + for (com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding binding : serviceBindings) { + if (nsServiceName.equalsIgnoreCase(binding.get_servicename())) { + destination.setMonitorState(binding.get_curstate()); + break; + } + } + } + hcLB.add(loadBalancer); + } + } + } + + } catch (ExecutionException e) { + s_logger.error("Failed to execute HealthCheckLBConfigCommand due to ", e); + if (shouldRetry(numRetries)) { + return retry(cmd, numRetries); + } else { + return new HealthCheckLBConfigAnswer(hcLB); + } + } catch (Exception e) { + s_logger.error("Failed to execute HealthCheckLBConfigCommand due to ", e); + if (shouldRetry(numRetries)) { + return retry(cmd, numRetries); + } else { + return new HealthCheckLBConfigAnswer(hcLB); + } + } + return new HealthCheckLBConfigAnswer(hcLB); + } + private synchronized Answer execute(LoadBalancerConfigCommand cmd, int numRetries) { try { if (_isSdx) { @@ -467,12 +533,13 @@ public class NetscalerResource implements ServerResource { String lbProtocol = getNetScalerProtocol(loadBalancer); String lbAlgorithm = loadBalancer.getAlgorithm(); String nsVirtualServerName = generateNSVirtualServerName(srcIp, srcPort); - + String nsMonitorName = generateNSMonitorName(srcIp, srcPort); if(loadBalancer.isAutoScaleVmGroupTO()) { applyAutoScaleConfig(loadBalancer); return new Answer(cmd); } - + boolean hasMonitor = false; + boolean deleteMonitor = false; boolean destinationsToAdd = false; for (DestinationTO destination : loadBalancer.getDestinations()) { if (!destination.isRevoked()) { @@ -489,11 +556,28 @@ public class NetscalerResource implements ServerResource { s_logger.debug("Created load balancing virtual server " + nsVirtualServerName + " on the Netscaler device"); } + // create a new monitor + HealthCheckPolicyTO[] healthCheckPolicies = loadBalancer.getHealthCheckPolicies(); + if ((healthCheckPolicies != null) && (healthCheckPolicies.length > 0) + && (healthCheckPolicies[0] != null)) { + + for (HealthCheckPolicyTO healthCheckPolicyTO : healthCheckPolicies) { + if ( !healthCheckPolicyTO.isRevoked() ) { + addLBMonitor(nsMonitorName, lbProtocol, healthCheckPolicyTO); + hasMonitor = true; + } + else { + deleteMonitor = true; + hasMonitor = false; + } + } + + } + for (DestinationTO destination : loadBalancer.getDestinations()) { String nsServerName = generateNSServerName(destination.getDestIp()); String nsServiceName = generateNSServiceName(destination.getDestIp(), destination.getDestPort()); - if (!destination.isRevoked()) { // add a new destination to deployed load balancing rule @@ -534,6 +618,26 @@ public class NetscalerResource implements ServerResource { throw new ExecutionException("Failed to bind service: " + nsServiceName + " to the lb virtual server: " + nsVirtualServerName + " on Netscaler device"); } } + + // After binding the service to the LB Vserver + // successfully, bind the created monitor to the + // service. + if (hasMonitor) { + if (!isServiceBoundToMonitor(nsServiceName, nsMonitorName)) { + bindServiceToMonitor(nsServiceName, nsMonitorName); + } + } else { + // check if any monitor created by CS is already + // existing, if yes, unbind it from services and + // delete it. + if (nsMonitorExist(nsMonitorName)) { + // unbind the service from the monitor and + // delete the monitor + unBindServiceToMonitor(nsServiceName, nsMonitorName); + deleteMonitor = true; + } + + } if (s_logger.isDebugEnabled()) { s_logger.debug("Successfully added LB destination: " + destination.getDestIp() + ":" + destination.getDestPort() + " to load balancer " + srcIp + ":" + srcPort); } @@ -609,8 +713,13 @@ public class NetscalerResource implements ServerResource { } } removeLBVirtualServer(nsVirtualServerName); + deleteMonitor = true; } } + if(deleteMonitor) { + removeLBMonitor(nsMonitorName); + } + } if (s_logger.isInfoEnabled()) { @@ -1223,23 +1332,64 @@ public class NetscalerResource implements ServerResource { } } + private lbmonitor getMonitorIfExisits(String lbMonitorName) throws ExecutionException { + try { + return lbmonitor.get(_netscalerService, lbMonitorName); + } catch (nitro_exception e) { + if (e.getErrorCode() == NitroError.NS_RESOURCE_NOT_EXISTS) { + return null; + } else { + throw new ExecutionException(e.getMessage()); + } + } catch (Exception e) { + throw new ExecutionException(e.getMessage()); + } + } + private boolean isServiceBoundToVirtualServer(String serviceName) throws ExecutionException { try { lbvserver[] lbservers = lbvserver.get(_netscalerService); for (lbvserver vserver : lbservers) { filtervalue[] filter = new filtervalue[1]; filter[0] = new filtervalue("servicename", serviceName); - lbvserver_service_binding[] result = lbvserver_service_binding.get_filtered(_netscalerService, vserver.get_name(), filter); + lbvserver_service_binding[] result = lbvserver_service_binding.get_filtered(_netscalerService, + vserver.get_name(), filter); if (result != null && result.length > 0) { return true; } } return false; } catch (Exception e) { - throw new ExecutionException("Failed to verify service " + serviceName + " is bound to any virtual server due to " + e.getMessage()); + throw new ExecutionException("Failed to verify service " + serviceName + + " is bound to any virtual server due to " + e.getMessage()); } } + private boolean isServiceBoundToMonitor(String nsServiceName, String nsMonitorName) throws ExecutionException { + + filtervalue[] filter = new filtervalue[1]; + filter[0] = new filtervalue("monitor_name", nsMonitorName); + service_lbmonitor_binding[] result; + try { + result = service_lbmonitor_binding.get_filtered(_netscalerService, nsServiceName, filter); + if (result != null && result.length > 0) { + return true; + } + + } catch (Exception e) { + throw new ExecutionException("Failed to verify service " + nsServiceName + + " is bound to any monitor due to " + e.getMessage()); + } + return false; + } + + private boolean nsMonitorExist(String nsMonitorname) throws ExecutionException { + if (getMonitorIfExisits(nsMonitorname) != null) + return true; + else + return false; + } + private boolean nsServiceExists(String serviceName) throws ExecutionException { try { if (com.citrix.netscaler.nitro.resource.config.basic.service.get(_netscalerService, serviceName) != null) { @@ -1480,6 +1630,126 @@ public class NetscalerResource implements ServerResource { } } + // Monitor related methods + private void addLBMonitor(String nsMonitorName, String lbProtocol, HealthCheckPolicyTO hcp) + throws ExecutionException { + try { + // check if the monitor exists + boolean csMonitorExisis = false; + lbmonitor csMonitor = getMonitorIfExisits(nsMonitorName); + if (csMonitor != null) { + if (!csMonitor.get_type().equalsIgnoreCase(lbProtocol)) { + throw new ExecutionException("Can not update monitor :" + nsMonitorName + " as current protocol:" + + csMonitor.get_type() + " of monitor is different from the " + " intended protocol:" + + lbProtocol); + } + csMonitorExisis = true; + } + if (!csMonitorExisis) { + lbmonitor csMon = new lbmonitor(); + csMon.set_monitorname(nsMonitorName); + csMon.set_type(lbProtocol); + if (lbProtocol.equalsIgnoreCase("HTTP")) { + csMon.set_httprequest(hcp.getpingPath()); + s_logger.trace("LB Protocol is HTTP, Applying ping path on HealthCheck Policy"); + } else { + s_logger.debug("LB Protocol is not HTTP, Skipping to apply ping path on HealthCheck Policy"); + } + + csMon.set_interval(hcp.getHealthcheckInterval()); + csMon.set_resptimeout(hcp.getResponseTime()); + csMon.set_failureretries(hcp.getUnhealthThresshold()); + csMon.set_successretries(hcp.getHealthcheckThresshold()); + s_logger.debug("Monitor properites going to get created :interval :: " + csMon.get_interval() + "respTimeOUt:: " + csMon.get_resptimeout() + +"failure retires(unhealththresshold) :: " + csMon.get_failureretries() + "successtries(healththresshold) ::" + csMon.get_successretries()); + lbmonitor.add(_netscalerService, csMon); + } else { + s_logger.debug("Monitor :" + nsMonitorName + " is already existing. Skipping to delete and create it"); + } + } catch (nitro_exception e) { + throw new ExecutionException("Failed to create new monitor :" + nsMonitorName + " due to " + e.getMessage()); + } catch (Exception e) { + throw new ExecutionException("Failed to create new monitor :" + nsMonitorName + " due to " + e.getMessage()); + } + } + + private void bindServiceToMonitor(String nsServiceName, String nsMonitorName) throws ExecutionException { + + try { + com.citrix.netscaler.nitro.resource.config.basic.service serviceObject = new com.citrix.netscaler.nitro.resource.config.basic.service(); + serviceObject = com.citrix.netscaler.nitro.resource.config.basic.service.get(_netscalerService, + nsServiceName); + if (serviceObject != null) { + com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding serviceMonitor = new com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding(); + serviceMonitor.set_monitor_name(nsMonitorName); + serviceMonitor.set_name(nsServiceName); + serviceMonitor.set_monstate("ENABLED"); + s_logger.debug("Trying to bind the monitor :" + nsMonitorName + " to the service :" + nsServiceName); + com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding.add(_netscalerService, + serviceMonitor); + s_logger.debug("Successfully binded the monitor :" + nsMonitorName + " to the service :" + + nsServiceName); + } + } catch (nitro_exception e) { + throw new ExecutionException("Failed to create new monitor :" + nsMonitorName + " due to " + e.getMessage()); + } catch (Exception e) { + throw new ExecutionException("Failed to create new monitor :" + nsMonitorName + " due to " + e.getMessage()); + } + } + + private void unBindServiceToMonitor(String nsServiceName, String nsMonitorName) throws ExecutionException { + + try { + com.citrix.netscaler.nitro.resource.config.basic.service serviceObject = new com.citrix.netscaler.nitro.resource.config.basic.service(); + serviceObject = com.citrix.netscaler.nitro.resource.config.basic.service.get(_netscalerService, + nsServiceName); + + if (serviceObject != null) { + com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding serviceMonitor = new com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding(); + serviceMonitor.set_monitor_name(nsMonitorName); + serviceMonitor.set_name(nsServiceName); + s_logger.debug("Trying to unbind the monitor :" + nsMonitorName + " from the service :" + + nsServiceName); + service_lbmonitor_binding.delete(_netscalerService, serviceMonitor); + s_logger.debug("Successfully unbinded the monitor :" + nsMonitorName + " from the service :" + + nsServiceName); + } + + } catch (nitro_exception e) { + if (e.getErrorCode() == NitroError.NS_RESOURCE_NOT_EXISTS) { + return; + } else { + throw new ExecutionException("Failed to unbind monitor :" + nsMonitorName + "from the service :" + + nsServiceName + "due to " + e.getMessage()); + } + } catch (Exception e) { + throw new ExecutionException("Failed to unbind monitor :" + nsMonitorName + "from the service :" + + nsServiceName + "due to " + e.getMessage()); + } + + } + + private void removeLBMonitor(String nsMonitorName) throws ExecutionException { + + try { + if (nsMonitorExist(nsMonitorName)) { + lbmonitor monitorObj = lbmonitor.get(_netscalerService, nsMonitorName); + monitorObj.set_respcode(null); + lbmonitor.delete(_netscalerService, monitorObj); + s_logger.info("Successfully deleted monitor : " + nsMonitorName); + } + } catch (nitro_exception e) { + if (e.getErrorCode() == NitroError.NS_RESOURCE_NOT_EXISTS) { + return; + } else { + throw new ExecutionException("Failed to delete monitor :" + nsMonitorName + " due to " + e.getMessage()); + } + } catch (Exception e) { + throw new ExecutionException("Failed to delete monitor :" + nsMonitorName + " due to " + e.getMessage()); + } + + } + public synchronized void applyAutoScaleConfig(LoadBalancerTO loadBalancer) throws Exception, ExecutionException { AutoScaleVmGroupTO vmGroupTO = loadBalancer.getAutoScaleVmGroupTO(); @@ -2229,6 +2499,11 @@ public class NetscalerResource implements ServerResource { return genObjectName("Cloud-VirtualServer", srcIp, srcPort); } + private String generateNSMonitorName(String srcIp, long srcPort) { + // maximum length supported by NS is 31 + return genObjectName("Cloud-Hc", srcIp, srcPort); + } + private String generateNSServerName(String serverIP) { return genObjectName("Cloud-Server-", serverIP); } diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index 2546f292883..ebf0fcf73d3 100755 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -46,6 +46,8 @@ import org.apache.cloudstack.api.response.ControlledViewEntityResponse; import org.apache.cloudstack.api.response.IPAddressResponse; import org.apache.cloudstack.api.response.InstanceGroupResponse; import org.apache.cloudstack.api.response.IpForwardingRuleResponse; +import org.apache.cloudstack.api.response.LBHealthCheckPolicyResponse; +import org.apache.cloudstack.api.response.LBHealthCheckResponse; import org.apache.cloudstack.api.response.LBStickinessPolicyResponse; import org.apache.cloudstack.api.response.LBStickinessResponse; import org.apache.cloudstack.api.response.LDAPConfigResponse; @@ -143,6 +145,13 @@ import com.cloud.network.dao.NetworkVO; import com.cloud.network.dao.PhysicalNetworkVO; import com.cloud.network.router.VirtualRouter; import com.cloud.network.rules.*; +import com.cloud.network.rules.FirewallRule; +import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.network.rules.HealthCheckPolicy; +import com.cloud.network.rules.LoadBalancer; +import com.cloud.network.rules.PortForwardingRule; +import com.cloud.network.rules.StaticNatRule; +import com.cloud.network.rules.StickinessPolicy; import com.cloud.network.security.SecurityGroup; import com.cloud.network.security.SecurityGroupVO; import com.cloud.network.security.SecurityRule; @@ -2750,6 +2759,58 @@ public class ApiResponseHelper implements ResponseGenerator { return spResponse; } + @Override + public LBHealthCheckResponse createLBHealthCheckPolicyResponse( + List healthcheckPolicies, LoadBalancer lb) { + LBHealthCheckResponse hcResponse = new LBHealthCheckResponse(); + + if (lb == null) + return hcResponse; + hcResponse.setlbRuleId(lb.getUuid()); + Account account = ApiDBUtils.findAccountById(lb.getAccountId()); + if (account != null) { + hcResponse.setAccountName(account.getAccountName()); + Domain domain = ApiDBUtils.findDomainById(account.getDomainId()); + if (domain != null) { + hcResponse.setDomainId(domain.getUuid()); + hcResponse.setDomainName(domain.getName()); + } + } + + List responses = new ArrayList(); + for (HealthCheckPolicy healthcheckPolicy : healthcheckPolicies) { + LBHealthCheckPolicyResponse ruleResponse = new LBHealthCheckPolicyResponse(healthcheckPolicy); + responses.add(ruleResponse); + } + hcResponse.setRules(responses); + + hcResponse.setObjectName("healthcheckpolicies"); + return hcResponse; + } + + @Override + public LBHealthCheckResponse createLBHealthCheckPolicyResponse(HealthCheckPolicy healthcheckPolicy, LoadBalancer lb) { + LBHealthCheckResponse hcResponse = new LBHealthCheckResponse(); + + hcResponse.setlbRuleId(lb.getUuid()); + Account accountTemp = ApiDBUtils.findAccountById(lb.getAccountId()); + if (accountTemp != null) { + hcResponse.setAccountName(accountTemp.getAccountName()); + Domain domain = ApiDBUtils.findDomainById(accountTemp.getDomainId()); + if (domain != null) { + hcResponse.setDomainId(domain.getUuid()); + hcResponse.setDomainName(domain.getName()); + } + } + + List responses = new ArrayList(); + LBHealthCheckPolicyResponse ruleResponse = new LBHealthCheckPolicyResponse(healthcheckPolicy); + responses.add(ruleResponse); + hcResponse.setRules(responses); + hcResponse.setObjectName("healthcheckpolicies"); + return hcResponse; + } + @Override public LDAPConfigResponse createLDAPConfigResponse(String hostname, Integer port, Boolean useSSL, String queryFilter, diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index d01ea5bbaf1..9db7dbdd89b 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -207,7 +207,11 @@ public enum Config { AlertPurgeInterval("Advanced", ManagementServer.class, Integer.class, "alert.purge.interval", "86400", "The interval (in seconds) to wait before running the alert purge thread", null), AlertPurgeDelay("Advanced", ManagementServer.class, Integer.class, "alert.purge.delay", "0", "Alerts older than specified number days will be purged. Set this value to 0 to never delete alerts", null), - DirectAttachNetworkEnabled("Advanced", ManagementServer.class, Boolean.class, "direct.attach.network.externalIpAllocator.enabled", "false", "Direct-attach VMs using external DHCP server", "true,false"), + // LB HealthCheck Interval. + LBHealthCheck("Advanced", ManagementServer.class, String.class, "healthcheck.update.interval", "600", + "Time Interval to fetch the LB health check states (in sec)", null), + + DirectAttachNetworkEnabled("Advanced", ManagementServer.class, Boolean.class, "direct.attach.network.externalIpAllocator.enabled", "false", "Direct-attach VMs using external DHCP server", "true,false"), DirectAttachNetworkExternalAPIURL("Advanced", ManagementServer.class, String.class, "direct.attach.network.externalIpAllocator.url", null, "Direct-attach VMs using external DHCP server (API url)", null), CheckPodCIDRs("Advanced", ManagementServer.class, String.class, "check.pod.cidrs", "true", "If true, different pods must belong to different CIDR subnets.", "true,false"), NetworkGcWait("Advanced", ManagementServer.class, Integer.class, "network.gc.wait", "600", "Time (in seconds) to wait before shutting down a network that's not in used", null), diff --git a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManager.java b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManager.java index d979f079691..dee3ca966e9 100644 --- a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManager.java +++ b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManager.java @@ -18,6 +18,7 @@ package com.cloud.network; import java.util.List; +import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; @@ -96,7 +97,9 @@ public interface ExternalLoadBalancerDeviceManager extends Manager{ * @throws ResourceUnavailableException * @throws InsufficientCapacityException */ - public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network guestConfig) throws ResourceUnavailableException, + public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network guestConfig) throws ResourceUnavailableException, InsufficientCapacityException; - + + public List getLBHealthChecks(Network network, List rules) + throws ResourceUnavailableException; } diff --git a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java index d7b6d78c9bb..049099d6b1f 100644 --- a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java @@ -34,6 +34,8 @@ import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupExternalLoadBalancerCommand; import com.cloud.agent.api.routing.CreateLoadBalancerApplianceCommand; import com.cloud.agent.api.routing.DestroyLoadBalancerApplianceCommand; +import com.cloud.agent.api.routing.HealthCheckLBConfigAnswer; +import com.cloud.agent.api.routing.HealthCheckLBConfigCommand; import com.cloud.agent.api.routing.IpAssocCommand; import com.cloud.agent.api.routing.LoadBalancerConfigCommand; import com.cloud.agent.api.routing.NetworkElementCommand; @@ -888,7 +890,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase if ((destinations != null && !destinations.isEmpty()) || rule.isAutoScaleConfig()) { boolean inline = _networkMgr.isNetworkInlineMode(network); - LoadBalancerTO loadBalancer = new LoadBalancerTO(uuid, srcIp, srcPort, protocol, algorithm, revoked, false, inline, destinations, rule.getStickinessPolicies()); + LoadBalancerTO loadBalancer = new LoadBalancerTO(uuid, srcIp, srcPort, protocol, algorithm, revoked, false, inline, destinations, rule.getStickinessPolicies(), rule.getHealthCheckPolicies()); if (rule.isAutoScaleConfig()) { loadBalancer.setAutoScaleVmGroup(rule.getAutoScaleVmGroup()); } @@ -1111,4 +1113,95 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase s_logger.info("Let " + element.getName() + " handle ip association for " + getName() + " in network " + network.getId()); return (IpDeployer)element; } + + @Override + public List getLBHealthChecks(Network network, List rules) + throws ResourceUnavailableException { + + // Find the external load balancer in this zone + long zoneId = network.getDataCenterId(); + DataCenterVO zone = _dcDao.findById(zoneId); + HealthCheckLBConfigAnswer answer = null; + + List loadBalancingRules = new ArrayList(); + + for (FirewallRule rule : rules) { + if (rule.getPurpose().equals(Purpose.LoadBalancing)) { + loadBalancingRules.add((LoadBalancingRule) rule); + } + } + + if (loadBalancingRules == null || loadBalancingRules.isEmpty()) { + return null; + } + + ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(network); + if (lbDeviceVO == null) { + s_logger.warn("There is no external load balancer device assigned to this network either network is not implement are already shutdown so just returning"); + return null; + } + + HostVO externalLoadBalancer = _hostDao.findById(lbDeviceVO.getHostId()); + + boolean externalLoadBalancerIsInline = _networkMgr.isNetworkInlineMode(network); + + if (network.getState() == Network.State.Allocated) { + s_logger.debug("External load balancer was asked to apply LB rules for network with ID " + network.getId() + + "; this network is not implemented. Skipping backend commands."); + return null; + } + + List loadBalancersToApply = new ArrayList(); + List mappingStates = new ArrayList(); + for (int i = 0; i < loadBalancingRules.size(); i++) { + LoadBalancingRule rule = loadBalancingRules.get(i); + + boolean revoked = (rule.getState().equals(FirewallRule.State.Revoke)); + String protocol = rule.getProtocol(); + String algorithm = rule.getAlgorithm(); + String uuid = rule.getUuid(); + String srcIp = _networkModel.getIp(rule.getSourceIpAddressId()).getAddress().addr(); + int srcPort = rule.getSourcePortStart(); + List destinations = rule.getDestinations(); + + if (externalLoadBalancerIsInline) { + MappingNic nic = getLoadBalancingIpNic(zone, network, rule.getSourceIpAddressId(), revoked, null); + mappingStates.add(nic.getState()); + NicVO loadBalancingIpNic = nic.getNic(); + if (loadBalancingIpNic == null) { + continue; + } + + // Change the source IP address for the load balancing rule to + // be the load balancing IP address + srcIp = loadBalancingIpNic.getIp4Address(); + } + + if ((destinations != null && !destinations.isEmpty()) || !rule.isAutoScaleConfig()) { + boolean inline = _networkMgr.isNetworkInlineMode(network); + LoadBalancerTO loadBalancer = new LoadBalancerTO(uuid, srcIp, srcPort, protocol, algorithm, revoked, + false, inline, destinations, rule.getStickinessPolicies(), rule.getHealthCheckPolicies()); + loadBalancersToApply.add(loadBalancer); + } + } + + try { + if (loadBalancersToApply.size() > 0) { + int numLoadBalancersForCommand = loadBalancersToApply.size(); + LoadBalancerTO[] loadBalancersForCommand = loadBalancersToApply + .toArray(new LoadBalancerTO[numLoadBalancersForCommand]); + // LoadBalancerConfigCommand cmd = new + // LoadBalancerConfigCommand(loadBalancersForCommand, null); + HealthCheckLBConfigCommand cmd = new HealthCheckLBConfigCommand(loadBalancersForCommand); + long guestVlanTag = Integer.parseInt(network.getBroadcastUri().getHost()); + cmd.setAccessDetail(NetworkElementCommand.GUEST_VLAN_TAG, String.valueOf(guestVlanTag)); + + answer = (HealthCheckLBConfigAnswer) _agentMgr.easySend(externalLoadBalancer.getId(), cmd); + } + } catch (Exception ex) { + s_logger.error("Exception Occured ", ex); + } + return answer.getLoadBalancers(); + } + } diff --git a/server/src/com/cloud/network/LBHealthCheckPolicyVO.java b/server/src/com/cloud/network/LBHealthCheckPolicyVO.java new file mode 100644 index 00000000000..ed03a2bbc89 --- /dev/null +++ b/server/src/com/cloud/network/LBHealthCheckPolicyVO.java @@ -0,0 +1,157 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.PrimaryKeyJoinColumn; +import javax.persistence.Table; + +import com.cloud.network.rules.HealthCheckPolicy; +import org.apache.cloudstack.api.InternalIdentity; + +@Entity +@Table(name = ("load_balancer_healthcheck_policies")) +@PrimaryKeyJoinColumn(name = "load_balancer_id", referencedColumnName = "id") +public class LBHealthCheckPolicyVO implements HealthCheckPolicy { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "load_balancer_id") + private long loadBalancerId; + + @Column(name = "pingpath") + private String pingPath; + + @Column(name = "description") + private String description; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "response_time") + private int responseTime; + + @Column(name = "healthcheck_interval") + private int healthcheckInterval; + + @Column(name = "healthcheck_thresshold") + private int healthcheckThresshold; + + @Column(name = "unhealth_thresshold") + private int unhealthThresshold; + + @Column(name = "revoke") + private boolean revoke = false; + + protected LBHealthCheckPolicyVO() { + this.uuid = UUID.randomUUID().toString(); + } + + public LBHealthCheckPolicyVO(long loadBalancerId, String pingPath, String description, int responseTime, + int healthcheckInterval, int healthcheckThresshold, int unhealthThresshold) { + this.loadBalancerId = loadBalancerId; + + if (pingPath == null || pingPath.isEmpty()) + this.pingPath = "/"; + else + this.pingPath = pingPath; + + if (responseTime == 0) + this.responseTime = 2; + else + this.responseTime = responseTime; + + if (healthcheckInterval == 0) + this.healthcheckInterval = 5; + else + this.healthcheckInterval = healthcheckInterval; + + if (healthcheckThresshold == 0) + this.healthcheckThresshold = 2; + else + this.healthcheckThresshold = healthcheckThresshold; + + if (unhealthThresshold == 0) + this.unhealthThresshold = 1; + else + this.unhealthThresshold = unhealthThresshold; + this.uuid = UUID.randomUUID().toString(); + + } + + public int getResponseTime() { + return responseTime; + } + + public int getHealthcheckInterval() { + return healthcheckInterval; + } + + public int getHealthcheckThresshold() { + return healthcheckThresshold; + } + + public int getUnhealthThresshold() { + return unhealthThresshold; + } + + public long getId() { + return id; + } + + public long getLoadBalancerId() { + return loadBalancerId; + } + + public String getpingpath() { + return pingPath; + } + + public String getDescription() { + return description; + } + + public boolean isRevoke() { + return revoke; + } + + public void setRevoke(boolean revoke) { + this.revoke = revoke; + } + + @Override + public String getUuid() { + return this.uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } +} diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index 3220c9174eb..591910b13c6 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -106,6 +106,7 @@ import com.cloud.network.element.UserDataServiceProvider; import com.cloud.network.guru.NetworkGuru; import com.cloud.network.lb.LoadBalancingRule; import com.cloud.network.lb.LoadBalancingRule.LbDestination; +import com.cloud.network.lb.LoadBalancingRule.LbHealthCheckPolicy; import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.network.lb.LoadBalancingRulesManager; import com.cloud.network.rules.*; @@ -2310,52 +2311,51 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L @Override public boolean applyRules(List rules, FirewallRule.Purpose purpose, NetworkRuleApplier applier, boolean continueOnError) throws ResourceUnavailableException { - if (rules == null || rules.size() == 0) { - s_logger.debug("There are no rules to forward to the network elements"); - return true; - } + if (rules == null || rules.size() == 0) { + s_logger.debug("There are no rules to forward to the network elements"); + return true; + } - boolean success = true; - Network network = _networksDao.findById(rules.get(0).getNetworkId()); + boolean success = true; + Network network = _networksDao.findById(rules.get(0).getNetworkId()); FirewallRuleVO.TrafficType trafficType = rules.get(0).getTrafficType(); - List publicIps = new ArrayList(); + List publicIps = new ArrayList(); - if (! (rules.get(0).getPurpose() == FirewallRule.Purpose.Firewall && trafficType == FirewallRule.TrafficType.Egress)) { + if (!(rules.get(0).getPurpose() == FirewallRule.Purpose.Firewall && trafficType == FirewallRule.TrafficType.Egress)) { // get the list of public ip's owned by the network List userIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), null); if (userIps != null && !userIps.isEmpty()) { for (IPAddressVO userIp : userIps) { - PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); - publicIps.add(publicIp); - } - } + PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); + publicIps.add(publicIp); + } + } - // rules can not programmed unless IP is associated with network service provider, so run IP assoication for - // the network so as to ensure IP is associated before applying rules (in add state) - applyIpAssociations(network, false, continueOnError, publicIps); - } - - try { - applier.applyRules(network, purpose, rules); - } catch (ResourceUnavailableException e) { - if (!continueOnError) { - throw e; - } - s_logger.warn("Problems with applying " + purpose + " rules but pushing on", e); - success = false; - } - - if (! (rules.get(0).getPurpose() == FirewallRule.Purpose.Firewall && trafficType == FirewallRule.TrafficType.Egress) ) { - // if all the rules configured on public IP are revoked then dis-associate IP with network service provider + // rules can not programmed unless IP is associated with network + // service provider, so run IP assoication for + // the network so as to ensure IP is associated before applying + // rules (in add state) + applyIpAssociations(network, false, continueOnError, publicIps); + } + + try { + applier.applyRules(network, purpose, rules); + } catch (ResourceUnavailableException e) { + if (!continueOnError) { + throw e; + } + s_logger.warn("Problems with applying " + purpose + " rules but pushing on", e); + success = false; + } + + if (!(rules.get(0).getPurpose() == FirewallRule.Purpose.Firewall && trafficType == FirewallRule.TrafficType.Egress)) { + // if all the rules configured on public IP are revoked then + // dis-associate IP with network service provider applyIpAssociations(network, true, continueOnError, publicIps); } - return success; + return success; } - - - - public class NetworkGarbageCollector implements Runnable { @@ -3099,13 +3099,14 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L lb.setState(FirewallRule.State.Revoke); List dstList = _lbMgr.getExistingDestinations(lb.getId()); List policyList = _lbMgr.getStickinessPolicies(lb.getId()); + List hcPolicyList = _lbMgr.getHealthCheckPolicies (lb.getId()); // mark all destination with revoke state for (LbDestination dst : dstList) { s_logger.trace("Marking lb destination " + dst + " with Revoke state"); dst.setRevoked(true); } - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList, hcPolicyList); lbRules.add(loadBalancing); } diff --git a/server/src/com/cloud/network/dao/LBHealthCheckPolicyDao.java b/server/src/com/cloud/network/dao/LBHealthCheckPolicyDao.java new file mode 100644 index 00000000000..42a9e421485 --- /dev/null +++ b/server/src/com/cloud/network/dao/LBHealthCheckPolicyDao.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.dao; + +import java.util.List; + +import com.cloud.network.LBHealthCheckPolicyVO; +import com.cloud.utils.db.GenericDao; + + +public interface LBHealthCheckPolicyDao extends + GenericDao { + void remove(long loadBalancerId); + + void remove(long loadBalancerId, Boolean pending); + + List listByLoadBalancerId(long loadBalancerId); + + List listByLoadBalancerId(long loadBalancerId, + boolean revoke); +} diff --git a/server/src/com/cloud/network/dao/LBHealthCheckPolicyDaoImpl.java b/server/src/com/cloud/network/dao/LBHealthCheckPolicyDaoImpl.java new file mode 100644 index 00000000000..65e0689e79a --- /dev/null +++ b/server/src/com/cloud/network/dao/LBHealthCheckPolicyDaoImpl.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.dao; + +import java.util.List; + +import javax.ejb.Local; + +import org.springframework.stereotype.Component; + +import com.cloud.network.LBHealthCheckPolicyVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchCriteria; + + +@Component +@Local(value = { LBHealthCheckPolicyDao.class }) +public class LBHealthCheckPolicyDaoImpl extends + GenericDaoBase implements + LBHealthCheckPolicyDao { + + @Override + public void remove(long loadBalancerId) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("loadBalancerId", SearchCriteria.Op.EQ, loadBalancerId); + + expunge(sc); + } + + @Override + public void remove(long loadBalancerId, Boolean revoke) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("loadBalancerId", SearchCriteria.Op.EQ, loadBalancerId); + sc.addAnd("revoke", SearchCriteria.Op.EQ, revoke); + + expunge(sc); + } + + @Override + public List listByLoadBalancerId(long loadBalancerId) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("loadBalancerId", SearchCriteria.Op.EQ, loadBalancerId); + + return listBy(sc); + } + + @Override + public List listByLoadBalancerId(long loadBalancerId, + boolean pending) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("loadBalancerId", SearchCriteria.Op.EQ, loadBalancerId); + sc.addAnd("revoke", SearchCriteria.Op.EQ, pending); + + return listBy(sc); + } + +} diff --git a/server/src/com/cloud/network/dao/LoadBalancerVMMapVO.java b/server/src/com/cloud/network/dao/LoadBalancerVMMapVO.java index 8856993a982..852302e0949 100644 --- a/server/src/com/cloud/network/dao/LoadBalancerVMMapVO.java +++ b/server/src/com/cloud/network/dao/LoadBalancerVMMapVO.java @@ -39,10 +39,14 @@ public class LoadBalancerVMMapVO implements InternalIdentity { @Column(name="instance_id") private long instanceId; - @Column(name="revoke") + @Column(name = "revoke") private boolean revoke = false; - public LoadBalancerVMMapVO() { } + @Column(name = "state") + private String state; + + public LoadBalancerVMMapVO() { + } public LoadBalancerVMMapVO(long loadBalancerId, long instanceId) { this.loadBalancerId = loadBalancerId; @@ -74,4 +78,12 @@ public class LoadBalancerVMMapVO implements InternalIdentity { public void setRevoke(boolean revoke) { this.revoke = revoke; } + + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } } diff --git a/server/src/com/cloud/network/element/VirtualRouterElement.java b/server/src/com/cloud/network/element/VirtualRouterElement.java index 500d0b68ece..169db3283e3 100755 --- a/server/src/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VirtualRouterElement.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.api.command.admin.router.CreateVirtualRouterElement import org.apache.cloudstack.api.command.admin.router.ListVirtualRouterElementsCmd; import org.apache.log4j.Logger; +import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.DataCenter; @@ -363,7 +364,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl return true; } } else { - return true; + return false; } } @@ -938,4 +939,11 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl protected VirtualRouterProviderType getVirtualRouterProvider() { return VirtualRouterProviderType.VirtualRouter; } + + @Override + public List updateHealthChecks(Network network, + List lbrules) { + // TODO Auto-generated method stub + return null; + } } diff --git a/server/src/com/cloud/network/lb/LBHealthCheckManager.java b/server/src/com/cloud/network/lb/LBHealthCheckManager.java new file mode 100644 index 00000000000..2e24965aa35 --- /dev/null +++ b/server/src/com/cloud/network/lb/LBHealthCheckManager.java @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.lb; + + +public interface LBHealthCheckManager { + + void updateLBHealthCheck(); + +} diff --git a/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java b/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java new file mode 100644 index 00000000000..90547328714 --- /dev/null +++ b/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.lb; + +import static java.lang.String.format; + +import java.util.Map; + +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.configuration.Config; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.component.Manager; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.concurrency.NamedThreadFactory; + +@Component +@Local(value = {LBHealthCheckManager.class}) +public class LBHealthCheckManagerImpl extends ManagerBase implements LBHealthCheckManager, Manager { + private static final Logger s_logger = Logger.getLogger(LBHealthCheckManagerImpl.class); + + @Inject + ConfigurationDao _configDao; + @Inject + LoadBalancingRulesService _lbService; + + private String name; + private Map _configs; + ScheduledExecutorService _executor; + + private long _interval; + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + _configs = _configDao.getConfiguration("management-server", params); + if (s_logger.isInfoEnabled()) { + s_logger.info(format("Configuring LBHealthCheck Manager %1$s", name)); + } + this.name = name; + _interval = NumbersUtil.parseLong(_configs.get(Config.LBHealthCheck.key()), 600); + _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("LBHealthCheck")); + return true; + } + + @Override + public boolean start() { + s_logger.debug("LB HealthCheckmanager is getting Started"); + _executor.scheduleAtFixedRate(new UpdateLBHealthCheck(), 10, _interval, TimeUnit.SECONDS); + return true; + } + + @Override + public boolean stop() { + s_logger.debug("HealthCheckmanager is getting Stopped"); + _executor.shutdown(); + return true; + } + + @Override + public String getName() { + return this.name; + } + + protected class UpdateLBHealthCheck implements Runnable { + @Override + public void run() { + try { + updateLBHealthCheck(); + } catch (Exception e) { + s_logger.error("Exception in LB HealthCheck Update Checker", e); + } + } + } + + @Override + public void updateLBHealthCheck() { + try { + _lbService.updateLBHealthChecks(); + } catch (ResourceUnavailableException e) { + s_logger.debug("Error while updating the LB HealtCheck ", e); + } + s_logger.debug("LB HealthCheck Manager is running and getting the updates from LB providers and updating service status"); + } + +} diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManager.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManager.java index 9d7d22fdad7..da19f86c21f 100644 --- a/server/src/com/cloud/network/lb/LoadBalancingRulesManager.java +++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManager.java @@ -20,6 +20,7 @@ import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.Network; import com.cloud.network.lb.LoadBalancingRule.LbDestination; +import com.cloud.network.lb.LoadBalancingRule.LbHealthCheckPolicy; import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.LbStickinessMethod; @@ -38,6 +39,7 @@ public interface LoadBalancingRulesManager extends LoadBalancingRulesService { List getExistingDestinations(long lbId); List getStickinessPolicies(long lbId); List getStickinessMethods(long networkid); + List getHealthCheckPolicies(long lbId); /** * Remove vm from all load balancers diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index 531a42805b6..a06cbc5ca99 100755 --- a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -33,8 +33,11 @@ import javax.inject.Inject; import com.cloud.event.UsageEventUtils; import org.apache.log4j.Logger; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.command.user.loadbalancer.CreateLBHealthCheckPolicyCmd; import org.apache.cloudstack.api.command.user.loadbalancer.CreateLBStickinessPolicyCmd; import org.apache.cloudstack.api.command.user.loadbalancer.CreateLoadBalancerRuleCmd; +import org.apache.cloudstack.api.command.user.loadbalancer.ListLBHealthCheckPoliciesCmd; import org.apache.cloudstack.api.command.user.loadbalancer.ListLBStickinessPoliciesCmd; import org.apache.cloudstack.api.command.user.loadbalancer.ListLoadBalancerRuleInstancesCmd; import org.apache.cloudstack.api.command.user.loadbalancer.ListLoadBalancerRulesCmd; @@ -42,6 +45,7 @@ import org.apache.cloudstack.api.command.user.loadbalancer.UpdateLoadBalancerRul import org.apache.cloudstack.api.response.ServiceResponse; import org.springframework.stereotype.Component; +import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; @@ -61,6 +65,7 @@ import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.ExternalLoadBalancerUsageManager; import com.cloud.network.IpAddress; +import com.cloud.network.LBHealthCheckPolicyVO; import com.cloud.network.Network; import com.cloud.network.Network.Capability; import com.cloud.network.Network.Provider; @@ -87,6 +92,7 @@ import com.cloud.network.dao.FirewallRulesCidrsDao; import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; +import com.cloud.network.dao.LBHealthCheckPolicyDao; import com.cloud.network.dao.LBStickinessPolicyDao; import com.cloud.network.dao.LBStickinessPolicyVO; import com.cloud.network.dao.LoadBalancerDao; @@ -97,17 +103,20 @@ import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkServiceMapDao; import com.cloud.network.dao.NetworkVO; import com.cloud.network.element.LoadBalancingServiceProvider; +import com.cloud.network.element.NetworkElement; import com.cloud.network.lb.LoadBalancingRule.LbAutoScalePolicy; import com.cloud.network.lb.LoadBalancingRule.LbAutoScaleVmGroup; import com.cloud.network.lb.LoadBalancingRule.LbAutoScaleVmProfile; import com.cloud.network.lb.LoadBalancingRule.LbCondition; import com.cloud.network.lb.LoadBalancingRule.LbDestination; +import com.cloud.network.lb.LoadBalancingRule.LbHealthCheckPolicy; import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.network.rules.FirewallManager; import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.FirewallRule.FirewallRuleType; import com.cloud.network.rules.FirewallRule.Purpose; import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.network.rules.HealthCheckPolicy; import com.cloud.network.rules.LbStickinessMethod; import com.cloud.network.rules.LbStickinessMethod.LbStickinessMethodParam; import com.cloud.network.rules.LoadBalancer; @@ -151,7 +160,8 @@ import com.google.gson.reflect.TypeToken; @Component @Local(value = { LoadBalancingRulesManager.class, LoadBalancingRulesService.class }) -public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancingRulesManager, LoadBalancingRulesService, NetworkRuleApplier { +public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancingRulesManager, + LoadBalancingRulesService, NetworkRuleApplier { private static final Logger s_logger = Logger.getLogger(LoadBalancingRulesManagerImpl.class); @Inject @@ -175,6 +185,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Inject LBStickinessPolicyDao _lb2stickinesspoliciesDao; @Inject + LBHealthCheckPolicyDao _lb2healthcheckDao; + @Inject UserVmDao _vmDao; @Inject AccountDao _accountDao; @@ -199,7 +211,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Inject ExternalLoadBalancerUsageManager _externalLBUsageMgr; - @Inject + @Inject NetworkServiceMapDao _ntwkSrvcDao; @Inject ResourceTagDao _resourceTagDao; @@ -229,9 +241,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements DataCenterDao _dcDao = null; @Inject UserDao _userDao; - @Inject List _lbProviders; + @Inject + List _lbProviders; - // Will return a string. For LB Stickiness this will be a json, for autoscale this will be "," separated values + // Will return a string. For LB Stickiness this will be a json, for + // autoscale this will be "," separated values @Override public String getLBCapability(long networkid, String capabilityName) { Map> serviceCapabilitiesMap = _networkModel.getNetworkCapabilities(networkid); @@ -240,11 +254,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements ServiceResponse serviceResponse = new ServiceResponse(); serviceResponse.setName(service.getName()); if ("Lb".equalsIgnoreCase(service.getName())) { - Map serviceCapabilities = serviceCapabilitiesMap - .get(service); + Map serviceCapabilities = serviceCapabilitiesMap.get(service); if (serviceCapabilities != null) { - for (Capability capability : serviceCapabilities - .keySet()) { + for (Capability capability : serviceCapabilities.keySet()) { if (capabilityName.equals(capability.getName())) { return serviceCapabilities.get(capability); } @@ -255,14 +267,17 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } return null; } + private LbAutoScaleVmGroup getLbAutoScaleVmGroup(AutoScaleVmGroupVO vmGroup, String currentState, LoadBalancerVO lb) { long lbNetworkId = lb.getNetworkId(); String lbName = lb.getName(); - List vmGroupPolicyMapList = _autoScaleVmGroupPolicyMapDao.listByVmGroupId(vmGroup.getId()); + List vmGroupPolicyMapList = _autoScaleVmGroupPolicyMapDao.listByVmGroupId(vmGroup + .getId()); List autoScalePolicies = new ArrayList(); for (AutoScaleVmGroupPolicyMapVO vmGroupPolicyMap : vmGroupPolicyMapList) { AutoScalePolicy autoScalePolicy = _autoScalePolicyDao.findById(vmGroupPolicyMap.getPolicyId()); - List autoScalePolicyConditionMapList = _autoScalePolicyConditionMapDao.listByAll(autoScalePolicy.getId(), null); + List autoScalePolicyConditionMapList = _autoScalePolicyConditionMapDao + .listByAll(autoScalePolicy.getId(), null); List lbConditions = new ArrayList(); for (AutoScalePolicyConditionMapVO autoScalePolicyConditionMap : autoScalePolicyConditionMapList) { Condition condition = _conditionDao.findById(autoScalePolicyConditionMap.getConditionId()); @@ -296,32 +311,39 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } } - if (apiKey == null) { - throw new InvalidParameterValueException("apiKey for user: " + user.getUsername() + " is empty. Please generate it"); + throw new InvalidParameterValueException("apiKey for user: " + user.getUsername() + + " is empty. Please generate it"); } if (secretKey == null) { - throw new InvalidParameterValueException("secretKey for user: " + user.getUsername() + " is empty. Please generate it"); + throw new InvalidParameterValueException("secretKey for user: " + user.getUsername() + + " is empty. Please generate it"); } if (csUrl == null || csUrl.contains("localhost")) { - throw new InvalidParameterValueException("Global setting endpointe.url has to be set to the Management Server's API end point"); + throw new InvalidParameterValueException( + "Global setting endpointe.url has to be set to the Management Server's API end point"); } - - LbAutoScaleVmProfile lbAutoScaleVmProfile = new LbAutoScaleVmProfile(autoScaleVmProfile, apiKey, secretKey, csUrl, zoneId, domainId, serviceOfferingId, templateId, vmName, lbNetworkUuid); + LbAutoScaleVmProfile lbAutoScaleVmProfile = new LbAutoScaleVmProfile(autoScaleVmProfile, apiKey, secretKey, + csUrl, zoneId, domainId, serviceOfferingId, templateId, vmName, lbNetworkUuid); return new LbAutoScaleVmGroup(vmGroup, autoScalePolicies, lbAutoScaleVmProfile, currentState); } - private boolean applyAutoScaleConfig(LoadBalancerVO lb, AutoScaleVmGroupVO vmGroup, String currentState) throws ResourceUnavailableException { + private boolean applyAutoScaleConfig(LoadBalancerVO lb, AutoScaleVmGroupVO vmGroup, String currentState) + throws ResourceUnavailableException { LbAutoScaleVmGroup lbAutoScaleVmGroup = getLbAutoScaleVmGroup(vmGroup, currentState, lb); - /* Regular config like destinations need not be packed for applying autoscale config as of today.*/ - LoadBalancingRule rule = new LoadBalancingRule(lb, null, null); + /* + * Regular config like destinations need not be packed for applying + * autoscale config as of today. + */ + LoadBalancingRule rule = new LoadBalancingRule(lb, null, null, null); rule.setAutoScaleVmGroup(lbAutoScaleVmGroup); if (!isRollBackAllowedForProvider(lb)) { - // this is for Netscaler type of devices. if their is failure the db entries will be rollbacked. + // this is for Netscaler type of devices. if their is failure the db + // entries will be rollbacked. return false; } @@ -348,9 +370,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (vmGroup.getState().equals(AutoScaleVmGroup.State_New)) { loadBalancer.setState(FirewallRule.State.Add); _lbDao.persist(loadBalancer); - } - else if (loadBalancer.getState() == FirewallRule.State.Active && - vmGroup.getState().equals(AutoScaleVmGroup.State_Revoke)) { + } else if (loadBalancer.getState() == FirewallRule.State.Active + && vmGroup.getState().equals(AutoScaleVmGroup.State_Revoke)) { loadBalancer.setState(FirewallRule.State.Add); _lbDao.persist(loadBalancer); } @@ -358,11 +379,13 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { success = applyAutoScaleConfig(loadBalancer, vmGroup, currentState); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: " + loadBalancer.getId() + " because resource is unavaliable:", e); + s_logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: " + loadBalancer.getId() + + " because resource is unavaliable:", e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating AutoscaleVmGroup"); + s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + + " lb state rolback while creating AutoscaleVmGroup"); } throw e; } finally { @@ -387,15 +410,24 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return success; } + private boolean validateHealthCheck(CreateLBHealthCheckPolicyCmd cmd) { + LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); + String capability = getLBCapability(loadBalancer.getNetworkId(), Capability.HealthCheckPolicy.getName()); + if (capability != null) { + return true; + } + return false; + } + private boolean genericValidator(CreateLBStickinessPolicyCmd cmd) throws InvalidParameterValueException { LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); /* Validation : check for valid Method name and params */ - List stickinessMethodList = getStickinessMethods(loadBalancer - .getNetworkId()); + List stickinessMethodList = getStickinessMethods(loadBalancer.getNetworkId()); boolean methodMatch = false; if (stickinessMethodList == null) { - throw new InvalidParameterValueException("Failed: No Stickiness method available for LB rule:" + cmd.getLbRuleId()); + throw new InvalidParameterValueException("Failed: No Stickiness method available for LB rule:" + + cmd.getLbRuleId()); } for (LbStickinessMethod method : stickinessMethodList) { if (method.getMethodName().equalsIgnoreCase(cmd.getStickinessMethodName())) { @@ -422,14 +454,16 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (LbStickinessMethodParam param : methodParamList) { if (param.getParamName().equalsIgnoreCase(paramName)) { if ((param.getIsflag() == false) && (paramValue == null)) { - throw new InvalidParameterValueException("Failed : Value expected for the Param :" + param.getParamName()); + throw new InvalidParameterValueException("Failed : Value expected for the Param :" + + param.getParamName()); } found = true; break; } } if (!found) { - throw new InvalidParameterValueException("Failed : Stickiness policy does not support param name :" + paramName); + throw new InvalidParameterValueException( + "Failed : Stickiness policy does not support param name :" + paramName); } } } @@ -438,7 +472,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (LbStickinessMethodParam param : methodParamList) { if (param.getRequired()) { if (tempParamList.get(param.getParamName()) == null) { - throw new InvalidParameterValueException("Failed : Missing Manadatory Param :" + param.getParamName()); + throw new InvalidParameterValueException("Failed : Missing Manadatory Param :" + + param.getParamName()); } } } @@ -447,13 +482,16 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } } if (methodMatch == false) { - throw new InvalidParameterValueException("Failed to match Stickiness method name for LB rule:" + cmd.getLbRuleId()); + throw new InvalidParameterValueException("Failed to match Stickiness method name for LB rule:" + + cmd.getLbRuleId()); } /* Validation : check for the multiple policies to the rule id */ - List stickinessPolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(cmd.getLbRuleId(), false); + List stickinessPolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId( + cmd.getLbRuleId(), false); if (stickinessPolicies.size() > 0) { - throw new InvalidParameterValueException("Failed to create Stickiness policy: Already policy attached " + cmd.getLbRuleId()); + throw new InvalidParameterValueException("Failed to create Stickiness policy: Already policy attached " + + cmd.getLbRuleId()); } return true; } @@ -462,7 +500,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override @DB @ActionEvent(eventType = EventTypes.EVENT_LB_STICKINESSPOLICY_CREATE, eventDescription = "create lb stickinesspolicy to load balancer", create = true) - public StickinessPolicy createLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd) throws NetworkRuleConflictException { + public StickinessPolicy createLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd) + throws NetworkRuleConflictException { UserContext caller = UserContext.current(); /* Validation : check corresponding load balancer rule exist */ @@ -473,30 +512,108 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _accountMgr.checkAccess(caller.getCaller(), null, true, loadBalancer); if (loadBalancer.getState() == FirewallRule.State.Revoke) { - throw new InvalidParameterValueException("Failed: LB rule id: " + cmd.getLbRuleId() + " is in deleting state: "); + throw new InvalidParameterValueException("Failed: LB rule id: " + cmd.getLbRuleId() + + " is in deleting state: "); } /* Generic validations */ if (!genericValidator(cmd)) { - throw new InvalidParameterValueException("Failed to create Stickiness policy: Validation Failed " + cmd.getLbRuleId()); + throw new InvalidParameterValueException("Failed to create Stickiness policy: Validation Failed " + + cmd.getLbRuleId()); } - /* Specific validations using network element validator for specific validations */ - LBStickinessPolicyVO lbpolicy = new LBStickinessPolicyVO(loadBalancer.getId(), cmd.getLBStickinessPolicyName(), cmd.getStickinessMethodName(), cmd.getparamList(), cmd.getDescription()); + /* + * Specific validations using network element validator for specific + * validations + */ + LBStickinessPolicyVO lbpolicy = new LBStickinessPolicyVO(loadBalancer.getId(), cmd.getLBStickinessPolicyName(), + cmd.getStickinessMethodName(), cmd.getparamList(), cmd.getDescription()); List policyList = new ArrayList(); policyList.add(new LbStickinessPolicy(cmd.getStickinessMethodName(), lbpolicy.getParams())); - LoadBalancingRule lbRule = new LoadBalancingRule(loadBalancer, getExistingDestinations(lbpolicy.getId()), policyList); + LoadBalancingRule lbRule = new LoadBalancingRule(loadBalancer, getExistingDestinations(lbpolicy.getId()), + policyList, null); if (!validateRule(lbRule)) { - throw new InvalidParameterValueException("Failed to create Stickiness policy: Validation Failed " + cmd.getLbRuleId()); + throw new InvalidParameterValueException("Failed to create Stickiness policy: Validation Failed " + + cmd.getLbRuleId()); } /* Finally Insert into DB */ - LBStickinessPolicyVO policy = new LBStickinessPolicyVO(loadBalancer.getId(), cmd.getLBStickinessPolicyName(), cmd.getStickinessMethodName(), cmd.getparamList(), cmd.getDescription()); + LBStickinessPolicyVO policy = new LBStickinessPolicyVO(loadBalancer.getId(), cmd.getLBStickinessPolicyName(), + cmd.getStickinessMethodName(), cmd.getparamList(), cmd.getDescription()); policy = _lb2stickinesspoliciesDao.persist(policy); return policy; } + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_LB_HEALTHCHECKPOLICY_CREATE, eventDescription = "create load balancer health check to load balancer", create = true) + public HealthCheckPolicy createLBHealthCheckPolicy(CreateLBHealthCheckPolicyCmd cmd) { + UserContext caller = UserContext.current(); + + /* + * Validation of cmd Monitor interval must be greater than response + * timeout + */ + Map paramMap = cmd.getFullUrlParams(); + + if (paramMap.containsKey(ApiConstants.HEALTHCHECK_RESPONSE_TIMEOUT) + && paramMap.containsKey(ApiConstants.HEALTHCHECK_INTERVAL_TIME)) { + if (cmd.getResponsTimeOut() > cmd.getHealthCheckInterval()) + throw new InvalidParameterValueException( + "Failed to create HealthCheck policy : Monitor interval must be greater than response timeout"); + } + /* Validation : check corresponding load balancer rule exist */ + LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); + if (loadBalancer == null) { + throw new InvalidParameterValueException("Failed: LB rule id: " + cmd.getLbRuleId() + " not present "); + } + + _accountMgr.checkAccess(caller.getCaller(), null, true, loadBalancer); + + if (loadBalancer.getState() == FirewallRule.State.Revoke) { + throw new InvalidParameterValueException("Failed: LB rule id: " + cmd.getLbRuleId() + + " is in deleting state: "); + } + + /* + * Validate Whether LB Provider has the capabilities to support Health + * Checks + */ + if (!validateHealthCheck(cmd)) { + throw new InvalidParameterValueException( + "Failed to create HealthCheck policy: Validation Failed (HealthCheck Policy is not supported by LB Provider for the LB rule id :)" + + cmd.getLbRuleId()); + } + + /* Validation : check for the multiple hc policies to the rule id */ + List hcPolicies = _lb2healthcheckDao.listByLoadBalancerId(cmd.getLbRuleId(), false); + if (hcPolicies.size() > 0) { + throw new InvalidParameterValueException( + "Failed to create HealthCheck policy: Already policy attached for the LB Rule id :" + + cmd.getLbRuleId()); + } + /* + * Specific validations using network element validator for specific + * validations + */ + LBHealthCheckPolicyVO hcpolicy = new LBHealthCheckPolicyVO(loadBalancer.getId(), cmd.getPingPath(), + cmd.getDescription(), cmd.getResponsTimeOut(), cmd.getHealthCheckInterval(), cmd.getHealthyThreshold(), + cmd.getUnhealthyThreshold()); + + List hcPolicyList = new ArrayList(); + hcPolicyList.add(new LbHealthCheckPolicy(hcpolicy.getpingpath(), hcpolicy.getDescription(), hcpolicy + .getResponseTime(), hcpolicy.getHealthcheckInterval(), hcpolicy.getHealthcheckThresshold(), hcpolicy + .getUnhealthThresshold())); + + // Finally Insert into DB + LBHealthCheckPolicyVO policy = new LBHealthCheckPolicyVO(loadBalancer.getId(), cmd.getPingPath(), + cmd.getDescription(), cmd.getResponsTimeOut(), cmd.getHealthCheckInterval(), cmd.getHealthyThreshold(), + cmd.getUnhealthyThreshold()); + + policy = _lb2healthcheckDao.persist(policy); + return policy; + } private boolean validateRule(LoadBalancingRule lbRule) { Network network = _networkDao.findById(lbRule.getNetworkId()); @@ -506,7 +623,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return false; } for (LoadBalancingServiceProvider ne : _lbProviders) { - boolean validated = ne.validateLBRule(network, lbRule); + boolean validated = ne.validateLBRule(network, lbRule); if (!validated) return false; } @@ -521,7 +638,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid Load balancer Id:" + cmd.getLbRuleId()); + throw new InvalidParameterException("Invalid Load balancer Id:" + cmd.getLbRuleId()); } FirewallRule.State backupState = loadBalancer.getState(); loadBalancer.setState(FirewallRule.State.Add); @@ -529,11 +646,13 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to apply Stickiness policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavaliable:", e); + s_logger.warn("Unable to apply Stickiness policy to the lb rule: " + cmd.getLbRuleId() + + " because resource is unavaliable:", e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating sticky policy"); + s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + + " lb state rolback while creating sticky policy"); } deleteLBStickinessPolicy(cmd.getEntityId(), false); success = false; @@ -542,6 +661,36 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return success; } + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_LB_HEALTHCHECKPOLICY_CREATE, eventDescription = "Apply HealthCheckPolicy to load balancer ", async = true) + public boolean applyLBHealthCheckPolicy(CreateLBHealthCheckPolicyCmd cmd) { + boolean success = true; + + LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); + if (loadBalancer == null) { + throw new InvalidParameterException("Invalid Load balancer Id:" + cmd.getLbRuleId()); + } + FirewallRule.State backupState = loadBalancer.getState(); + loadBalancer.setState(FirewallRule.State.Add); + _lbDao.persist(loadBalancer); + try { + applyLoadBalancerConfig(cmd.getLbRuleId()); + } catch (ResourceUnavailableException e) { + s_logger.warn("Unable to apply healthcheck policy to the lb rule: " + cmd.getLbRuleId() + + " because resource is unavaliable:", e); + if (isRollBackAllowedForProvider(loadBalancer)) { + loadBalancer.setState(backupState); + _lbDao.persist(loadBalancer); + s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + + " lb state rolback while creating healthcheck policy"); + } + deleteLBHealthCheckPolicy(cmd.getEntityId(), false); + success = false; + } + return success; + } + @Override @ActionEvent(eventType = EventTypes.EVENT_LB_STICKINESSPOLICY_DELETE, eventDescription = "revoking LB Stickiness policy ", async = true) public boolean deleteLBStickinessPolicy(long stickinessPolicyId, boolean apply) { @@ -555,13 +704,13 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(stickinessPolicy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid Load balancer : " + stickinessPolicy.getLoadBalancerId() + " for Stickiness policy id: " + stickinessPolicyId); + throw new InvalidParameterException("Invalid Load balancer : " + stickinessPolicy.getLoadBalancerId() + + " for Stickiness policy id: " + stickinessPolicyId); } long loadBalancerId = loadBalancer.getId(); FirewallRule.State backupState = loadBalancer.getState(); _accountMgr.checkAccess(caller.getCaller(), null, true, loadBalancer); - if (apply) { if (loadBalancer.getState() == FirewallRule.State.Active) { loadBalancer.setState(FirewallRule.State.Add); @@ -571,12 +720,15 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements boolean backupStickyState = stickinessPolicy.isRevoke(); stickinessPolicy.setRevoke(true); _lb2stickinesspoliciesDao.persist(stickinessPolicy); - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", stickinesspolicyID " + stickinessPolicyId); + s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", stickinesspolicyID " + + stickinessPolicyId); try { if (!applyLoadBalancerConfig(loadBalancerId)) { - s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); - throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); + s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + + " for stickinesspolicyID " + stickinessPolicyId); + throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + + " for stickinesspolicyID " + stickinessPolicyId); } } catch (ResourceUnavailableException e) { if (isRollBackAllowedForProvider(loadBalancer)) { @@ -584,7 +736,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _lb2stickinesspoliciesDao.persist(stickinessPolicy); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting sticky policy: " + stickinessPolicyId); + s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting sticky policy: " + + stickinessPolicyId); } s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); success = false; @@ -592,9 +745,149 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } else { _lb2stickinesspoliciesDao.remove(stickinessPolicy.getLoadBalancerId()); } - return success; - } + } + + @DB + @Override + @ActionEvent(eventType = EventTypes.EVENT_LB_HEALTHCHECKPOLICY_DELETE, eventDescription = "revoking LB HealthCheck policy ", async = true) + public boolean deleteLBHealthCheckPolicy(long healthCheckPolicyId, boolean apply) { + boolean success = true; + + UserContext caller = UserContext.current(); + LBHealthCheckPolicyVO healthCheckPolicy = _lb2healthcheckDao.findById(healthCheckPolicyId); + + if (healthCheckPolicy == null) { + throw new InvalidParameterException("Invalid HealthCheck policy id value: " + healthCheckPolicyId); + } + LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(healthCheckPolicy.getLoadBalancerId())); + if (loadBalancer == null) { + throw new InvalidParameterException("Invalid Load balancer : " + healthCheckPolicy.getLoadBalancerId() + + " for HealthCheck policy id: " + healthCheckPolicyId); + } + long loadBalancerId = loadBalancer.getId(); + FirewallRule.State backupState = loadBalancer.getState(); + _accountMgr.checkAccess(caller.getCaller(), null, true, loadBalancer); + + if (apply) { + if (loadBalancer.getState() == FirewallRule.State.Active) { + loadBalancer.setState(FirewallRule.State.Add); + _lbDao.persist(loadBalancer); + } + + boolean backupStickyState = healthCheckPolicy.isRevoke(); + healthCheckPolicy.setRevoke(true); + _lb2healthcheckDao.persist(healthCheckPolicy); + s_logger.debug("Set health check policy to revoke for loadbalancing rule id : " + loadBalancerId + + ", healthCheckpolicyID " + healthCheckPolicyId); + + // removing the state of services set by the monitor. + List maps = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); + if (maps != null) { + Transaction txn = Transaction.currentTxn(); + txn.start(); + s_logger.debug("Resetting health state policy for services in loadbalancing rule id : " + + loadBalancerId); + for (LoadBalancerVMMapVO map : maps) { + map.setState(null); + _lb2VmMapDao.persist(map); + } + txn.commit(); + } + + try { + if (!applyLoadBalancerConfig(loadBalancerId)) { + s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + + " for healthCheckpolicyID " + healthCheckPolicyId); + throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + + " for healthCheckpolicyID " + healthCheckPolicyId); + } + } catch (ResourceUnavailableException e) { + if (isRollBackAllowedForProvider(loadBalancer)) { + healthCheckPolicy.setRevoke(backupStickyState); + _lb2healthcheckDao.persist(healthCheckPolicy); + loadBalancer.setState(backupState); + _lbDao.persist(loadBalancer); + s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + + " while deleting healthcheck policy: " + healthCheckPolicyId); + } + s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); + success = false; + } + } else { + _lb2healthcheckDao.remove(healthCheckPolicy.getLoadBalancerId()); + } + return success; + } + + // This method will check the status of services which has monitors created + // by CloudStack and update them in lbvmmap table + @DB + @Override + public void updateLBHealthChecks() throws ResourceUnavailableException { + List rules = _lbDao.listAll(); + List networks = _networkDao.listAll(); + List stateRules = null; + boolean isHandled = false; + for (NetworkVO ntwk : networks) { + Network network = _networkDao.findById(ntwk.getId()); + String capability = getLBCapability(network.getId(), Capability.HealthCheckPolicy.getName()); + + if (capability != null && capability.equalsIgnoreCase("true")) { + /* + * s_logger.debug( + * "HealthCheck Manager :: LB Provider in the Network has the Healthcheck policy capability :: " + * + provider.get(0).getName()); + */ + rules = _lbDao.listByNetworkId(network.getId()); + if (rules != null && rules.size() > 0) { + List lbrules = new ArrayList(); + for (LoadBalancerVO lb : rules) { + List dstList = getExistingDestinations(lb.getId()); + List hcPolicyList = getHealthCheckPolicies(lb.getId()); + // adding to lbrules list only if the LB rule + // hashealtChecks + if (hcPolicyList != null && hcPolicyList.size() > 0) { + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, null, hcPolicyList); + lbrules.add(loadBalancing); + } + } + if (lbrules.size() > 0) { + isHandled = false; + for (LoadBalancingServiceProvider lbElement : _lbProviders) { + stateRules = lbElement.updateHealthChecks(network, (List) lbrules); + if (stateRules != null && stateRules.size() > 0) { + for (LoadBalancerTO lbto : stateRules) { + LoadBalancerVO ulb = _lbDao.findByUuid(lbto.getUuid()); + List lbVmMaps = _lb2VmMapDao.listByLoadBalancerId(ulb.getId()); + for (LoadBalancerVMMapVO lbVmMap : lbVmMaps) { + UserVm vm = _vmDao.findById(lbVmMap.getInstanceId()); + Nic nic = _nicDao.findByInstanceIdAndNetworkIdIncludingRemoved( + ulb.getNetworkId(), vm.getId()); + String dstIp = nic.getIp4Address(); + for (int i = 0; i < lbto.getDestinations().length; i++) { + LoadBalancerTO.DestinationTO des = lbto.getDestinations()[i]; + if (dstIp.equalsIgnoreCase(lbto.getDestinations()[i].getDestIp())) { + lbVmMap.setState(des.getMonitorState()); + _lb2VmMapDao.persist(lbVmMap); + s_logger.debug("Updating the LB VM Map table with the service state"); + } + } + } + } + isHandled = true; + } + if (isHandled) { + break; + } + } + } + } + } else { + // s_logger.debug("HealthCheck Manager :: LB Provider in the Network DNOT the Healthcheck policy capability "); + } + } + } private boolean isRollBackAllowedForProvider(LoadBalancerVO loadBalancer) { Network network = _networkDao.findById(loadBalancer.getNetworkId()); @@ -607,6 +900,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } return false; } + @Override @DB @ActionEvent(eventType = EventTypes.EVENT_ASSIGN_TO_LOAD_BALANCER_RULE, eventDescription = "assigning to load balancer", async = true) @@ -616,7 +910,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVO loadBalancer = _lbDao.findById(loadBalancerId); if (loadBalancer == null) { - throw new InvalidParameterValueException("Failed to assign to load balancer " + loadBalancerId + ", the load balancer was not found."); + throw new InvalidParameterValueException("Failed to assign to load balancer " + loadBalancerId + + ", the load balancer was not found."); } List mappedInstances = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId, false); @@ -635,7 +930,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements UserVm vm = _vmDao.findById(instanceId); if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging) { InvalidParameterValueException ex = new InvalidParameterValueException("Invalid instance id specified"); - ex.addProxyObject(vm, instanceId, "instanceId"); + ex.addProxyObject(vm, instanceId, "instanceId"); throw ex; } @@ -645,7 +940,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements throw new PermissionDeniedException("Cannot add virtual machines that do not belong to the same owner."); } - // Let's check to make sure the vm has a nic in the same network as the load balancing rule. + // Let's check to make sure the vm has a nic in the same network as + // the load balancing rule. List nics = _networkModel.getNics(vm.getId()); Nic nicInSameNetwork = null; for (Nic nic : nics) { @@ -656,8 +952,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } if (nicInSameNetwork == null) { - InvalidParameterValueException ex = new InvalidParameterValueException("VM " + instanceId + " cannot be added because it doesn't belong in the same network."); - ex.addProxyObject(vm, instanceId, "instanceId"); + InvalidParameterValueException ex = new InvalidParameterValueException("VM " + instanceId + + " cannot be added because it doesn't belong in the same network."); + ex.addProxyObject(vm, instanceId, "instanceId"); throw ex; } @@ -698,7 +995,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements txn.commit(); if (!vmInstanceIds.isEmpty()) { _lb2VmMapDao.remove(loadBalancer.getId(), vmInstanceIds, null); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while attaching VM: " + vmInstanceIds); + s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while attaching VM: " + + vmInstanceIds); } loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); @@ -707,9 +1005,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } if (!success) { - CloudRuntimeException ex = new CloudRuntimeException("Failed to add specified loadbalancerruleid for vms " + instanceIds); - ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); - // TBD: Also pack in the instanceIds in the exception using the right VO object or table name. + CloudRuntimeException ex = new CloudRuntimeException("Failed to add specified loadbalancerruleid for vms " + + instanceIds); + ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); + // TBD: Also pack in the instanceIds in the exception using the + // right VO object or table name. throw ex; } @@ -748,15 +1048,17 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (_autoScaleVmGroupDao.isAutoScaleLoadBalancer(loadBalancerId)) { // For autoscaled loadbalancer, the rules need not be applied, // meaning the call need not reach the resource layer. - // We can consider the job done and only need to remove the rules in DB + // We can consider the job done and only need to remove the + // rules in DB _lb2VmMapDao.remove(loadBalancer.getId(), instanceIds, null); return true; } if (!applyLoadBalancerConfig(loadBalancerId)) { s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for vms " + instanceIds); - CloudRuntimeException ex = new CloudRuntimeException("Failed to remove specified load balancer rule id for vms " + instanceIds); - ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); + CloudRuntimeException ex = new CloudRuntimeException( + "Failed to remove specified load balancer rule id for vms " + instanceIds); + ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); throw ex; } success = true; @@ -777,8 +1079,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements s_logger.warn("Unable to apply the load balancer config because resource is unavaliable.", e); } if (!success) { - CloudRuntimeException ex = new CloudRuntimeException("Failed to remove specified load balancer rule id for vms " + instanceIds); - ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); + CloudRuntimeException ex = new CloudRuntimeException( + "Failed to remove specified load balancer rule id for vms " + instanceIds); + ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); throw ex; } return success; @@ -806,7 +1109,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements map.setRevoke(true); _lb2VmMapDao.persist(map); - s_logger.debug("Set load balancer rule for revoke: rule id " + map.getLoadBalancerId() + ", vmId " + instanceId); + s_logger.debug("Set load balancer rule for revoke: rule id " + map.getLoadBalancerId() + ", vmId " + + instanceId); } // Reapply all lbs that had the vm assigned @@ -827,10 +1131,10 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements Account caller = ctx.getCaller(); LoadBalancerVO rule = _lbDao.findById(loadBalancerId); + if (rule == null) { throw new InvalidParameterValueException("Unable to find load balancer rule " + loadBalancerId); } - _accountMgr.checkAccess(caller, null, true, rule); boolean result = deleteLoadBalancerRule(loadBalancerId, apply, caller, ctx.getCallerUserId(), true); @@ -841,7 +1145,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } @DB - public boolean deleteLoadBalancerRule(long loadBalancerId, boolean apply, Account caller, long callerUserId, boolean rollBack) { + public boolean deleteLoadBalancerRule(long loadBalancerId, boolean apply, Account caller, long callerUserId, + boolean rollBack) { LoadBalancerVO lb = _lbDao.findById(loadBalancerId); Transaction txn = Transaction.currentTxn(); boolean generateUsageEvent = false; @@ -865,10 +1170,17 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (LoadBalancerVMMapVO map : maps) { map.setRevoke(true); _lb2VmMapDao.persist(map); - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + map.getInstanceId()); + s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + + map.getInstanceId()); } } + List hcPolicies = _lb2healthcheckDao.listByLoadBalancerId(loadBalancerId); + for (LBHealthCheckPolicyVO lbHealthCheck : hcPolicies) { + lbHealthCheck.setRevoke(true); + _lb2healthcheckDao.persist(lbHealthCheck); + } + if (generateUsageEvent) { // Generate usage event right after all rules were marked for revoke UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_DELETE, lb.getAccountId(), 0, lb.getId(), @@ -890,7 +1202,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (_autoScaleVmGroupDao.isAutoScaleLoadBalancer(loadBalancerId)) { // Get the associated VmGroup AutoScaleVmGroupVO vmGroup = _autoScaleVmGroupDao.listByAll(loadBalancerId, null).get(0); - if (!applyAutoScaleConfig(lb, vmGroup,vmGroup.getState())) { + if (!applyAutoScaleConfig(lb, vmGroup, vmGroup.getState())) { s_logger.warn("Unable to apply the autoscale config"); return false; } @@ -920,15 +1232,17 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements FirewallRuleVO relatedRule = _firewallDao.findByRelatedId(lb.getId()); if (relatedRule != null) { - s_logger.warn("Unable to remove firewall rule id=" + lb.getId() + " as it has related firewall rule id=" + relatedRule.getId() + "; leaving it in Revoke state"); + s_logger.warn("Unable to remove firewall rule id=" + lb.getId() + " as it has related firewall rule id=" + + relatedRule.getId() + "; leaving it in Revoke state"); success = false; } else { _firewallMgr.removeRule(lb); } - // FIXME: breaking the dependency on ELB manager. This breaks functionality of ELB using virtual router + // FIXME: breaking the dependency on ELB manager. This breaks + // functionality of ELB using virtual router // Bug CS-15411 opened to document this - //_elbMgr.handleDeleteLoadBalancerRule(lb, callerUserId, caller); + // _elbMgr.handleDeleteLoadBalancerRule(lb, callerUserId, caller); if (success) { s_logger.debug("Load balancer with id " + lb.getId() + " is removed successfully"); @@ -939,7 +1253,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override @ActionEvent(eventType = EventTypes.EVENT_LOAD_BALANCER_CREATE, eventDescription = "creating load balancer") - public LoadBalancer createLoadBalancerRule(CreateLoadBalancerRuleCmd lb, boolean openFirewall) throws NetworkRuleConflictException, InsufficientAddressCapacityException { + public LoadBalancer createLoadBalancerRule(CreateLoadBalancerRuleCmd lb, boolean openFirewall) + throws NetworkRuleConflictException, InsufficientAddressCapacityException { Account lbOwner = _accountMgr.getAccount(lb.getEntityOwnerId()); int defPortStart = lb.getDefaultPortStart(); @@ -949,7 +1264,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements throw new InvalidParameterValueException("privatePort is an invalid value: " + defPortEnd); } if (defPortStart > defPortEnd) { - throw new InvalidParameterValueException("private port range is invalid: " + defPortStart + "-" + defPortEnd); + throw new InvalidParameterValueException("private port range is invalid: " + defPortStart + "-" + + defPortEnd); } if ((lb.getAlgorithm() == null) || !NetUtils.isValidAlgorithm(lb.getAlgorithm())) { throw new InvalidParameterValueException("Invalid algorithm: " + lb.getAlgorithm()); @@ -963,9 +1279,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements Network network = _networkModel.getNetwork(lb.getNetworkId()); - // FIXME: breaking the dependency on ELB manager. This breaks functionality of ELB using virtual router + // FIXME: breaking the dependency on ELB manager. This breaks + // functionality of ELB using virtual router // Bug CS-15411 opened to document this - //LoadBalancer result = _elbMgr.handleCreateLoadBalancerRule(lb, lbOwner, lb.getNetworkId()); + // LoadBalancer result = _elbMgr.handleCreateLoadBalancerRule(lb, + // lbOwner, lb.getNetworkId()); LoadBalancer result = null; if (result == null) { IpAddress systemIp = null; @@ -978,7 +1296,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements // Validate ip address if (ipVO == null) { - throw new InvalidParameterValueException("Unable to create load balance rule; can't find/allocate source IP"); + throw new InvalidParameterValueException( + "Unable to create load balance rule; can't find/allocate source IP"); } else if (ipVO.isOneToOneNat()) { throw new NetworkRuleConflictException("Can't do load balance on ip address: " + ipVO.getAddress()); } @@ -986,13 +1305,14 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements boolean performedIpAssoc = false; try { if (ipVO.getAssociatedWithNetworkId() == null) { - boolean assignToVpcNtwk = network.getVpcId() != null - && ipVO.getVpcId() != null && ipVO.getVpcId().longValue() == network.getVpcId(); + boolean assignToVpcNtwk = network.getVpcId() != null && ipVO.getVpcId() != null + && ipVO.getVpcId().longValue() == network.getVpcId(); if (assignToVpcNtwk) { - //set networkId just for verification purposes + // set networkId just for verification purposes _networkModel.checkIpForService(ipVO, Service.Lb, lb.getNetworkId()); - s_logger.debug("The ip is not associated with the VPC network id="+ lb.getNetworkId() + " so assigning"); + s_logger.debug("The ip is not associated with the VPC network id=" + lb.getNetworkId() + + " so assigning"); ipVO = _networkMgr.associateIPToGuestNetwork(ipAddrId, lb.getNetworkId(), false); performedIpAssoc = true; } @@ -1000,8 +1320,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _networkModel.checkIpForService(ipVO, Service.Lb, null); } - if (ipVO.getAssociatedWithNetworkId() == null) { - throw new InvalidParameterValueException("Ip address " + ipVO + " is not assigned to the network " + network); + if (ipVO.getAssociatedWithNetworkId() == null) { + throw new InvalidParameterValueException("Ip address " + ipVO + " is not assigned to the network " + + network); } if (lb.getSourceIpAddressId() == null) { @@ -1015,7 +1336,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } } finally { if (result == null && systemIp != null) { - s_logger.debug("Releasing system IP address " + systemIp + " as corresponding lb rule failed to create"); + s_logger.debug("Releasing system IP address " + systemIp + + " as corresponding lb rule failed to create"); _networkMgr.handleSystemIpRelease(systemIp); } // release ip address if ipassoc was perfored @@ -1035,7 +1357,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override @DB - public LoadBalancer createLoadBalancer(CreateLoadBalancerRuleCmd lb, boolean openFirewall) throws NetworkRuleConflictException { + public LoadBalancer createLoadBalancer(CreateLoadBalancerRuleCmd lb, boolean openFirewall) + throws NetworkRuleConflictException { UserContext caller = UserContext.current(); int srcPortStart = lb.getSourcePortStart(); int defPortStart = lb.getDefaultPortStart(); @@ -1045,45 +1368,48 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements IPAddressVO ipAddr = _ipAddressDao.findById(sourceIpId); // make sure ip address exists if (ipAddr == null || !ipAddr.readyToUse()) { - InvalidParameterValueException ex = new InvalidParameterValueException("Unable to create load balancer rule, invalid IP address id specified"); - ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); + InvalidParameterValueException ex = new InvalidParameterValueException( + "Unable to create load balancer rule, invalid IP address id specified"); + ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); throw ex; } else if (ipAddr.isOneToOneNat()) { - InvalidParameterValueException ex = new InvalidParameterValueException("Unable to create load balancer rule; specified sourceip id has static nat enabled"); - ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); + InvalidParameterValueException ex = new InvalidParameterValueException( + "Unable to create load balancer rule; specified sourceip id has static nat enabled"); + ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); throw ex; } Long networkId = ipAddr.getAssociatedWithNetworkId(); if (networkId == null) { - InvalidParameterValueException ex = new InvalidParameterValueException("Unable to create load balancer rule ; specified sourceip id is not associated with any network"); - ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); + InvalidParameterValueException ex = new InvalidParameterValueException( + "Unable to create load balancer rule ; specified sourceip id is not associated with any network"); + ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); throw ex; - } _firewallMgr.validateFirewallRule(caller.getCaller(), ipAddr, srcPortStart, srcPortEnd, lb.getProtocol(), Purpose.LoadBalancing, FirewallRuleType.User, networkId, null); - NetworkVO network = _networkDao.findById(networkId); - _accountMgr.checkAccess(caller.getCaller(), null, true, ipAddr); // verify that lb service is supported by the network if (!_networkModel.areServicesSupportedInNetwork(network.getId(), Service.Lb)) { - InvalidParameterValueException ex = new InvalidParameterValueException("LB service is not supported in specified network id"); - ex.addProxyObject(network, networkId, "networkId"); + InvalidParameterValueException ex = new InvalidParameterValueException( + "LB service is not supported in specified network id"); + ex.addProxyObject(network, networkId, "networkId"); throw ex; } Transaction txn = Transaction.currentTxn(); txn.start(); - LoadBalancerVO newRule = new LoadBalancerVO(lb.getXid(), lb.getName(), lb.getDescription(), lb.getSourceIpAddressId(), lb.getSourcePortEnd(), lb.getDefaultPortStart(), - lb.getAlgorithm(), network.getId(), ipAddr.getAllocatedToAccountId(), ipAddr.getAllocatedInDomainId()); + LoadBalancerVO newRule = new LoadBalancerVO(lb.getXid(), lb.getName(), lb.getDescription(), + lb.getSourceIpAddressId(), lb.getSourcePortEnd(), lb.getDefaultPortStart(), lb.getAlgorithm(), + network.getId(), ipAddr.getAllocatedToAccountId(), ipAddr.getAllocatedInDomainId()); // verify rule is supported by Lb provider of the network - LoadBalancingRule loadBalancing = new LoadBalancingRule(newRule, new ArrayList(), new ArrayList()); + LoadBalancingRule loadBalancing = new LoadBalancingRule(newRule, new ArrayList(), + new ArrayList(), new ArrayList()); if (!validateRule(loadBalancing)) { throw new InvalidParameterValueException("LB service provider cannot support this rule"); } @@ -1091,7 +1417,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements newRule = _lbDao.persist(newRule); if (openFirewall) { - _firewallMgr.createRuleForAllCidrs(sourceIpId, caller.getCaller(), lb.getSourcePortStart(), + _firewallMgr.createRuleForAllCidrs(sourceIpId, caller.getCaller(), lb.getSourcePortStart(), lb.getSourcePortEnd(), lb.getProtocol(), null, null, newRule.getId(), networkId); } @@ -1102,10 +1428,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (!_firewallDao.setStateToAdd(newRule)) { throw new CloudRuntimeException("Unable to update the state to add for " + newRule); } - s_logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " + srcPortStart + ", private port " + defPortStart + " is added successfully."); + s_logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " + + srcPortStart + ", private port " + defPortStart + " is added successfully."); UserContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, ipAddr.getAllocatedToAccountId(), - ipAddr.getDataCenterId(), newRule.getId(), null, LoadBalancingRule.class.getName(), newRule.getUuid()); + ipAddr.getDataCenterId(), newRule.getId(), null, LoadBalancingRule.class.getName(), + newRule.getUuid()); txn.commit(); return newRule; @@ -1131,8 +1459,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements public boolean applyLoadBalancerConfig(long lbRuleId) throws ResourceUnavailableException { LoadBalancerVO lb = _lbDao.findById(lbRuleId); List lbs; - if (isRollBackAllowedForProvider(lb)) { - // this is for Netscalar type of devices. if their is failure the db entries will be rollbacked. + if (isRollBackAllowedForProvider(lb)) { + // this is for Netscalar type of devices. if their is failure the db + // entries will be rollbacked. lbs = Arrays.asList(lb); } else { // get all rules in transition state @@ -1153,13 +1482,14 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } @Override - public boolean applyRules(Network network, Purpose purpose, List rules) + public boolean applyRules(Network network, Purpose purpose, List rules) throws ResourceUnavailableException { - assert(purpose == Purpose.LoadBalancing): "LB Manager asked to handle non-LB rules"; + assert (purpose == Purpose.LoadBalancing) : "LB Manager asked to handle non-LB rules"; boolean handled = false; - for (LoadBalancingServiceProvider lbElement: _lbProviders) { + for (LoadBalancingServiceProvider lbElement : _lbProviders) { Provider provider = lbElement.getProvider(); - boolean isLbProvider = _networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.Lb, provider); + boolean isLbProvider = _networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.Lb, + provider); if (!isLbProvider) { continue; } @@ -1171,14 +1501,16 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } @DB - protected boolean applyLoadBalancerRules(List lbs, boolean updateRulesInDB) throws ResourceUnavailableException { + protected boolean applyLoadBalancerRules(List lbs, boolean updateRulesInDB) + throws ResourceUnavailableException { Transaction txn = Transaction.currentTxn(); List rules = new ArrayList(); for (LoadBalancerVO lb : lbs) { List dstList = getExistingDestinations(lb.getId()); List policyList = getStickinessPolicies(lb.getId()); + List hcPolicyList = getHealthCheckPolicies(lb.getId()); - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList, hcPolicyList); rules.add(loadBalancing); } @@ -1217,18 +1549,28 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (_lb2VmMapDao.listByLoadBalancerId(lb.getId()).isEmpty()) { lb.setState(FirewallRule.State.Add); _lbDao.persist(lb); - s_logger.debug("LB rule " + lb.getId() + " state is set to Add as there are no more active LB-VM mappings"); + s_logger.debug("LB rule " + lb.getId() + + " state is set to Add as there are no more active LB-VM mappings"); } // remove LB-Stickiness policy mapping that were state to revoke - List stickinesspolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(lb.getId(), true); + List stickinesspolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId( + lb.getId(), true); if (!stickinesspolicies.isEmpty()) { _lb2stickinesspoliciesDao.remove(lb.getId(), true); s_logger.debug("Load balancer rule id " + lb.getId() + " is removed stickiness policies"); } - txn.commit(); + // remove LB-HealthCheck policy mapping that were state to + // revoke + List healthCheckpolicies = _lb2healthcheckDao.listByLoadBalancerId(lb.getId(), + true); + if (!healthCheckpolicies.isEmpty()) { + _lb2healthcheckDao.remove(lb.getId(), true); + s_logger.debug("Load balancer rule id " + lb.getId() + " is removed health check monitors policies"); + } + txn.commit(); if (checkForReleaseElasticIp) { boolean success = true; long count = _firewallDao.countRulesByIpId(lb.getSourceIpAddressId()); @@ -1236,7 +1578,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements try { success = handleSystemLBIpRelease(lb); } catch (Exception ex) { - s_logger.warn("Failed to release system ip as a part of lb rule " + lb + " deletion due to exception ", ex); + s_logger.warn("Failed to release system ip as a part of lb rule " + lb + + " deletion due to exception ", ex); success = false; } finally { if (!success) { @@ -1245,7 +1588,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } } } - // if the rule is the last one for the ip address assigned to VPC, unassign it from the network + // if the rule is the last one for the ip address assigned to + // VPC, unassign it from the network IpAddress ip = _ipAddressDao.findById(lb.getSourceIpAddressId()); _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), lb.getNetworkId()); } @@ -1259,14 +1603,16 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements boolean success = true; if (ip.getSystem()) { s_logger.debug("Releasing system ip address " + lb.getSourceIpAddressId() + " as a part of delete lb rule"); - if (!_networkMgr.disassociatePublicIpAddress(lb.getSourceIpAddressId(), UserContext.current().getCallerUserId(), UserContext.current().getCaller())) { - s_logger.warn("Unable to release system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + if (!_networkMgr.disassociatePublicIpAddress(lb.getSourceIpAddressId(), UserContext.current() + .getCallerUserId(), UserContext.current().getCaller())) { + s_logger.warn("Unable to release system ip address id=" + lb.getSourceIpAddressId() + + " as a part of delete lb rule"); success = false; } else { - s_logger.warn("Successfully released system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + s_logger.warn("Successfully released system ip address id=" + lb.getSourceIpAddressId() + + " as a part of delete lb rule"); } } - return success; } @@ -1287,7 +1633,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override public boolean removeAllLoadBalanacersForNetwork(long networkId, Account caller, long callerUserId) { - List rules = _firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.LoadBalancing); + List rules = _firewallDao + .listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.LoadBalancing); if (rules != null) s_logger.debug("Found " + rules.size() + " lb rules to cleanup"); for (FirewallRule rule : rules) { @@ -1306,12 +1653,28 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements List sDbpolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(lbId); for (LBStickinessPolicyVO sDbPolicy : sDbpolicies) { - LbStickinessPolicy sPolicy = new LbStickinessPolicy(sDbPolicy.getMethodName(), sDbPolicy.getParams(), sDbPolicy.isRevoke()); + LbStickinessPolicy sPolicy = new LbStickinessPolicy(sDbPolicy.getMethodName(), sDbPolicy.getParams(), + sDbPolicy.isRevoke()); stickinessPolicies.add(sPolicy); } return stickinessPolicies; } + @Override + public List getHealthCheckPolicies(long lbId) { + List healthCheckPolicies = new ArrayList(); + List hcDbpolicies = _lb2healthcheckDao.listByLoadBalancerId(lbId); + + for (LBHealthCheckPolicyVO policy : hcDbpolicies) { + String pingpath = policy.getpingpath(); + LbHealthCheckPolicy hDbPolicy = new LbHealthCheckPolicy(pingpath, policy.getDescription(), + policy.getResponseTime(), policy.getHealthcheckInterval(), policy.getHealthcheckThresshold(), + policy.getUnhealthThresshold(), policy.isRevoke()); + healthCheckPolicies.add(hDbPolicy); + } + return healthCheckPolicies; + } + @Override public List getExistingDestinations(long lbId) { List dstList = new ArrayList(); @@ -1323,7 +1686,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements UserVm vm = _vmDao.findById(lbVmMap.getInstanceId()); Nic nic = _nicDao.findByInstanceIdAndNetworkIdIncludingRemoved(lb.getNetworkId(), vm.getId()); dstIp = nic.getIp4Address(); - LbDestination lbDst = new LbDestination(lb.getDefaultPortStart(), lb.getDefaultPortEnd(), dstIp, lbVmMap.isRevoke()); + LbDestination lbDst = new LbDestination(lb.getDefaultPortStart(), lb.getDefaultPortEnd(), dstIp, + lbVmMap.isRevoke()); dstList.add(lbDst); } return dstList; @@ -1369,16 +1733,19 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements applyLoadBalancerConfig(lbRuleId); } catch (ResourceUnavailableException e) { if (isRollBackAllowedForProvider(lb)) { - /* NOTE : We use lb object to update db instead of lbBackup object since db layer will fail to update if there is no change in the object. + /* + * NOTE : We use lb object to update db instead of lbBackup + * object since db layer will fail to update if there is no + * change in the object. */ if (lbBackup.getName() != null) { - lb.setName(lbBackup.getName()); + lb.setName(lbBackup.getName()); } if (lbBackup.getDescription() != null) { lb.setDescription(lbBackup.getDescription()); } if (lbBackup.getAlgorithm() != null) { - lb.setAlgorithm(lbBackup.getAlgorithm()); + lb.setAlgorithm(lbBackup.getAlgorithm()); } lb.setState(lbBackup.getState()); _lbDao.update(lb.getId(), lb); @@ -1399,7 +1766,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } @Override - public List listLoadBalancerInstances(ListLoadBalancerRuleInstancesCmd cmd) throws PermissionDeniedException { + public List listLoadBalancerInstances(ListLoadBalancerRuleInstancesCmd cmd) + throws PermissionDeniedException { Account caller = UserContext.current().getCaller(); Long loadBalancerId = cmd.getId(); Boolean applied = cmd.isApplied(); @@ -1428,10 +1796,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } IPAddressVO addr = _ipAddressDao.findById(loadBalancer.getSourceIpAddressId()); - List userVms = _vmDao.listVirtualNetworkInstancesByAcctAndZone(loadBalancer.getAccountId(), addr.getDataCenterId(), loadBalancer.getNetworkId()); + List userVms = _vmDao.listVirtualNetworkInstancesByAcctAndZone(loadBalancer.getAccountId(), + addr.getDataCenterId(), loadBalancer.getNetworkId()); for (UserVmVO userVm : userVms) { - // if the VM is destroyed, being expunged, in an error state, or in an unknown state, skip it + // if the VM is destroyed, being expunged, in an error state, or in + // an unknown state, skip it switch (userVm.getState()) { case Destroyed: case Expunging: @@ -1449,10 +1819,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return loadBalancerInstances; } - @Override - public List getStickinessMethods(long networkid) - { + public List getStickinessMethods(long networkid) { String capability = getLBCapability(networkid, Capability.SupportedStickinessMethods.getName()); if (capability == null) { return null; @@ -1465,7 +1833,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } @Override - public List searchForLBStickinessPolicies(ListLBStickinessPoliciesCmd cmd) throws PermissionDeniedException { + public List searchForLBStickinessPolicies(ListLBStickinessPoliciesCmd cmd) + throws PermissionDeniedException { Account caller = UserContext.current().getCaller(); Long loadBalancerId = cmd.getLbRuleId(); LoadBalancerVO loadBalancer = _lbDao.findById(loadBalancerId); @@ -1480,6 +1849,20 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return sDbpolicies; } + @Override + public List searchForLBHealthCheckPolicies(ListLBHealthCheckPoliciesCmd cmd) + throws PermissionDeniedException { + Account caller = UserContext.current().getCaller(); + Long loadBalancerId = cmd.getLbRuleId(); + LoadBalancerVO loadBalancer = _lbDao.findById(loadBalancerId); + if (loadBalancer == null) { + return null; + } + _accountMgr.checkAccess(caller, null, true, loadBalancer); + List hcDbpolicies = _lb2healthcheckDao.listByLoadBalancerId(cmd.getLbRuleId()); + return hcDbpolicies; + } + @Override public Pair, Integer> searchForLoadBalancers(ListLoadBalancerRulesCmd cmd) { Long ipId = cmd.getPublicIpId(); @@ -1493,8 +1876,10 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements Account caller = UserContext.current().getCaller(); List permittedAccounts = new ArrayList(); - Ternary domainIdRecursiveListProject = new Ternary(cmd.getDomainId(), cmd.isRecursive(), null); - _accountMgr.buildACLSearchParameters(caller, id, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); + Ternary domainIdRecursiveListProject = new Ternary( + cmd.getDomainId(), cmd.isRecursive(), null); + _accountMgr.buildACLSearchParameters(caller, id, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, + domainIdRecursiveListProject, cmd.listAll(), false); Long domainId = domainIdRecursiveListProject.first(); Boolean isRecursive = domainIdRecursiveListProject.second(); ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third(); @@ -1510,13 +1895,15 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (instanceId != null) { SearchBuilder lbVMSearch = _lb2VmMapDao.createSearchBuilder(); lbVMSearch.and("instanceId", lbVMSearch.entity().getInstanceId(), SearchCriteria.Op.EQ); - sb.join("lbVMSearch", lbVMSearch, sb.entity().getId(), lbVMSearch.entity().getLoadBalancerId(), JoinBuilder.JoinType.INNER); + sb.join("lbVMSearch", lbVMSearch, sb.entity().getId(), lbVMSearch.entity().getLoadBalancerId(), + JoinBuilder.JoinType.INNER); } if (zoneId != null) { SearchBuilder ipSearch = _ipAddressDao.createSearchBuilder(); ipSearch.and("zoneId", ipSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - sb.join("ipSearch", ipSearch, sb.entity().getSourceIpAddressId(), ipSearch.entity().getId(), JoinBuilder.JoinType.INNER); + sb.join("ipSearch", ipSearch, sb.entity().getSourceIpAddressId(), ipSearch.entity().getId(), + JoinBuilder.JoinType.INNER); } if (tags != null && !tags.isEmpty()) { @@ -1528,7 +1915,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } tagSearch.and("resourceType", tagSearch.entity().getResourceType(), SearchCriteria.Op.EQ); sb.groupBy(sb.entity().getId()); - sb.join("tagSearch", tagSearch, sb.entity().getId(), tagSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER); + sb.join("tagSearch", tagSearch, sb.entity().getId(), tagSearch.entity().getResourceId(), + JoinBuilder.JoinType.INNER); } SearchCriteria sc = sb.create(); @@ -1561,7 +1949,6 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements sc.setJoinParameters("ipSearch", "zoneId", zoneId); } - if (tags != null && !tags.isEmpty()) { int count = 0; sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.LoadBalancer.toString()); @@ -1583,7 +1970,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements for (LoadBalancerVO lb : lbs) { List dstList = getExistingDestinations(lb.getId()); List policyList = this.getStickinessPolicies(lb.getId()); - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList); + List hcPolicyList = this.getHealthCheckPolicies(lb.getId()); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList, hcPolicyList); lbRules.add(loadBalancing); } return lbRules; @@ -1594,10 +1982,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return _lbDao.findById(lbId); } - protected void removeLBRule(LoadBalancerVO rule) { - //remove the rule + protected void removeLBRule(LoadBalancerVO rule) { + // remove the rule _lbDao.remove(rule.getId()); } - - } diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index ce65586ec6e..6742f7b378d 100755 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -167,6 +167,7 @@ import com.cloud.network.dao.VirtualRouterProviderDao; import com.cloud.network.dao.VpnUserDao; import com.cloud.network.lb.LoadBalancingRule; import com.cloud.network.lb.LoadBalancingRule.LbDestination; +import com.cloud.network.lb.LoadBalancingRule.LbHealthCheckPolicy; import com.cloud.network.lb.LoadBalancingRule.LbStickinessPolicy; import com.cloud.network.lb.LoadBalancingRulesManager; import com.cloud.network.router.VirtualRouter.RedundantState; @@ -2382,11 +2383,12 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V for (LoadBalancerVO lb : lbs) { List dstList = _lbMgr.getExistingDestinations(lb.getId()); List policyList = _lbMgr.getStickinessPolicies(lb.getId()); - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList); + List hcPolicyList = _lbMgr.getHealthCheckPolicies(lb.getId()); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList, hcPolicyList); lbRules.add(loadBalancing); } } - + s_logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of domR " + router + " start."); if (!lbRules.isEmpty()) { createApplyLoadBalancingRulesCommands(lbRules, router, cmds, guestNetworkId); @@ -3284,7 +3286,8 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V for (LoadBalancerVO lb : lbs) { List dstList = _lbMgr.getExistingDestinations(lb.getId()); List policyList = _lbMgr.getStickinessPolicies(lb.getId()); - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList,policyList); + List hcPolicyList = _lbMgr.getHealthCheckPolicies(lb.getId() ); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList, hcPolicyList); lbRules.add(loadBalancing); } return sendLBRules(router, lbRules, network.getId()); diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index d70c45f1f8a..1f1f12edfc1 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -2103,10 +2103,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(QueryAsyncJobResultCmd.class); cmdList.add(AssignToLoadBalancerRuleCmd.class); cmdList.add(CreateLBStickinessPolicyCmd.class); + cmdList.add(CreateLBHealthCheckPolicyCmd .class); cmdList.add(CreateLoadBalancerRuleCmd.class); cmdList.add(DeleteLBStickinessPolicyCmd.class); + cmdList.add(DeleteLBHealthCheckPolicyCmd .class); cmdList.add(DeleteLoadBalancerRuleCmd.class); cmdList.add(ListLBStickinessPoliciesCmd.class); + cmdList.add(ListLBHealthCheckPoliciesCmd .class); cmdList.add(ListLoadBalancerRuleInstancesCmd.class); cmdList.add(ListLoadBalancerRulesCmd.class); cmdList.add(RemoveFromLoadBalancerRuleCmd.class); diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql index ca15bdaf781..4e39a71ef9d 100644 --- a/setup/db/db/schema-410to420.sql +++ b/setup/db/db/schema-410to420.sql @@ -27,6 +27,7 @@ UPDATE `cloud`.`hypervisor_capabilities` SET `max_hosts_per_cluster`=32 WHERE `h INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.1', 128, 0, 32); DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max'; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen'); +ALTER TABLE `cloud`.`load_balancer_vm_map` ADD state VARCHAR(40) NULL COMMENT 'service status updated by LB healthcheck manager'; alter table template_host_ref add state varchar(255); alter table template_host_ref add update_count bigint unsigned; @@ -97,6 +98,21 @@ CREATE TABLE `vpc_service_map` ( UNIQUE (`vpc_id`, `service`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +CREATE TABLE `cloud`.`load_balancer_healthcheck_policies` ( + `id` bigint(20) NOT NULL auto_increment, + `uuid` varchar(40), + `load_balancer_id` bigint unsigned NOT NULL, + `pingpath` varchar(225) NULL DEFAULT '/', + `description` varchar(4096) NULL, + `response_time` int(11) DEFAULT 5, + `healthcheck_interval` int(11) DEFAULT 5, + `healthcheck_thresshold` int(11) DEFAULT 2, + `unhealth_thresshold` int(11) DEFAULT 10, + `revoke` tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 is when rule is set for Revoke', + PRIMARY KEY (`id`), + UNIQUE KEY `id_UNIQUE` (`id`), + CONSTRAINT `fk_load_balancer_healthcheck_policies_loadbalancer_id` FOREIGN KEY(`load_balancer_id`) REFERENCES `load_balancing_rules`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.instancename.flag', 'false', 'Append guest VM display Name (if set) to the internal name of the VM'); From e1c72bc02848e2d128139db477b19de217f77e0d Mon Sep 17 00:00:00 2001 From: Radhika PC Date: Mon, 18 Mar 2013 14:57:31 +0530 Subject: [PATCH 018/123] CLOUDSTACK-1681 --- docs/en-US/Release_Notes.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en-US/Release_Notes.xml b/docs/en-US/Release_Notes.xml index 4d1f62fb713..2cedb90a763 100644 --- a/docs/en-US/Release_Notes.xml +++ b/docs/en-US/Release_Notes.xml @@ -92,11 +92,11 @@ Name - systemvm-vmware-3.0.0 + systemvm-vmware-4.0 Description - systemvm-vmware-3.0.0 + systemvm-vmware-4.0 URL From 61754cd987a54de53a742bf7475336b3938e0de9 Mon Sep 17 00:00:00 2001 From: Anshul Gangwar Date: Wed, 6 Mar 2013 15:09:56 +0530 Subject: [PATCH 019/123] CLOUDSTACK-680: Feature SNMP Alerts support in CloudStack Signed-off-by: Anshul Gangwar Signed-off-by: Sateesh Chodapuneedi --- client/pom.xml | 5 + client/tomcatconf/log4j-cloud.xml.in | 25 +++ core/src/com/cloud/alert/AlertManager.java | 41 ++-- plugins/alert-handlers/snmp-alerts/pom.xml | 45 ++++ .../alert/snmp/CsSnmpConstants.java | 45 ++++ .../alert/snmp/SnmpEnhancedPatternLayout.java | 107 +++++++++ .../cloudstack/alert/snmp/SnmpHelper.java | 106 +++++++++ .../alert/snmp/SnmpTrapAppender.java | 207 ++++++++++++++++++ .../cloudstack/alert/snmp/SnmpTrapInfo.java | 90 ++++++++ .../snmp/SnmpEnhancedPatternLayoutTest.java | 90 ++++++++ .../alert/snmp/SnmpTrapAppenderTest.java | 86 ++++++++ plugins/pom.xml | 1 + .../src/com/cloud/alert/AlertManagerImpl.java | 6 + .../cloud/usage/UsageAlertManagerImpl.java | 13 +- 14 files changed, 843 insertions(+), 24 deletions(-) create mode 100644 plugins/alert-handlers/snmp-alerts/pom.xml create mode 100644 plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/CsSnmpConstants.java create mode 100644 plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayout.java create mode 100644 plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpHelper.java create mode 100644 plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapAppender.java create mode 100644 plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapInfo.java create mode 100644 plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayoutTest.java create mode 100644 plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpTrapAppenderTest.java diff --git a/client/pom.xml b/client/pom.xml index cda6ab8b4e7..ecf232be7ac 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -65,6 +65,11 @@ cloud-plugin-network-nvp ${project.version} + + org.apache.cloudstack + cloud-plugin-snmp-alerts + ${project.version} + org.apache.cloudstack cloud-plugin-network-ovs diff --git a/client/tomcatconf/log4j-cloud.xml.in b/client/tomcatconf/log4j-cloud.xml.in index 086669376aa..ce4079f9c96 100755 --- a/client/tomcatconf/log4j-cloud.xml.in +++ b/client/tomcatconf/log4j-cloud.xml.in @@ -74,6 +74,20 @@ under the License. + + + + + + + + + + + + + + @@ -142,6 +156,17 @@ under the License. + + + + + + + + + + + diff --git a/core/src/com/cloud/alert/AlertManager.java b/core/src/com/cloud/alert/AlertManager.java index a24e18c8373..b6d005a5f21 100755 --- a/core/src/com/cloud/alert/AlertManager.java +++ b/core/src/com/cloud/alert/AlertManager.java @@ -27,26 +27,27 @@ public interface AlertManager extends Manager { public static final short ALERT_TYPE_VIRTUAL_NETWORK_PUBLIC_IP = CapacityVO.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP; public static final short ALERT_TYPE_PRIVATE_IP = CapacityVO.CAPACITY_TYPE_PRIVATE_IP; public static final short ALERT_TYPE_SECONDARY_STORAGE = CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE; - public static final short ALERT_TYPE_HOST = 6; - public static final short ALERT_TYPE_USERVM = 7; - public static final short ALERT_TYPE_DOMAIN_ROUTER = 8; - public static final short ALERT_TYPE_CONSOLE_PROXY = 9; - public static final short ALERT_TYPE_ROUTING = 10; // lost connection to default route (to the gateway) - public static final short ALERT_TYPE_STORAGE_MISC = 11; // lost connection to default route (to the gateway) - public static final short ALERT_TYPE_USAGE_SERVER = 12; // lost connection to default route (to the gateway) - public static final short ALERT_TYPE_MANAGMENT_NODE = 13; // lost connection to default route (to the gateway) - public static final short ALERT_TYPE_DOMAIN_ROUTER_MIGRATE = 14; - public static final short ALERT_TYPE_CONSOLE_PROXY_MIGRATE = 15; - public static final short ALERT_TYPE_USERVM_MIGRATE = 16; - public static final short ALERT_TYPE_VLAN = 17; - public static final short ALERT_TYPE_SSVM = 18; - public static final short ALERT_TYPE_USAGE_SERVER_RESULT = 19; // Usage job result - public static final short ALERT_TYPE_STORAGE_DELETE = 20; - public static final short ALERT_TYPE_UPDATE_RESOURCE_COUNT = 21; // Generated when we fail to update the resource count - public static final short ALERT_TYPE_USAGE_SANITY_RESULT = 22; - public static final short ALERT_TYPE_DIRECT_ATTACHED_PUBLIC_IP = 23; - public static final short ALERT_TYPE_LOCAL_STORAGE = 24; - public static final short ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED = 25; // Generated when the resource limit exceeds the limit. Currently used for recurring snapshots only + public static final short ALERT_TYPE_HOST = 7; + public static final short ALERT_TYPE_USERVM = 8; + public static final short ALERT_TYPE_DOMAIN_ROUTER = 9; + public static final short ALERT_TYPE_CONSOLE_PROXY = 10; + public static final short ALERT_TYPE_ROUTING = 11; // lost connection to default route (to the gateway) + public static final short ALERT_TYPE_STORAGE_MISC = 12; // lost connection to default route (to the gateway) + public static final short ALERT_TYPE_USAGE_SERVER = 13; // lost connection to default route (to the gateway) + public static final short ALERT_TYPE_MANAGMENT_NODE = 14; // lost connection to default route (to the gateway) + public static final short ALERT_TYPE_DOMAIN_ROUTER_MIGRATE = 15; + public static final short ALERT_TYPE_CONSOLE_PROXY_MIGRATE = 16; + public static final short ALERT_TYPE_USERVM_MIGRATE = 17; + public static final short ALERT_TYPE_VLAN = 18; + public static final short ALERT_TYPE_SSVM = 19; + public static final short ALERT_TYPE_USAGE_SERVER_RESULT = 20; // Usage job result + public static final short ALERT_TYPE_STORAGE_DELETE = 21; + public static final short ALERT_TYPE_UPDATE_RESOURCE_COUNT = 22; // Generated when we fail to update the resource + // count + public static final short ALERT_TYPE_USAGE_SANITY_RESULT = 23; + public static final short ALERT_TYPE_DIRECT_ATTACHED_PUBLIC_IP = 24; + public static final short ALERT_TYPE_LOCAL_STORAGE = 25; + public static final short ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED = 26; // Generated when the resource limit exceeds the limit. Currently used for recurring snapshots only void clearAlert(short alertType, long dataCenterId, long podId); diff --git a/plugins/alert-handlers/snmp-alerts/pom.xml b/plugins/alert-handlers/snmp-alerts/pom.xml new file mode 100644 index 00000000000..b5cebf31b7a --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/pom.xml @@ -0,0 +1,45 @@ + + + + cloudstack-plugins + org.apache.cloudstack + 4.2.0-SNAPSHOT + ../../pom.xml + + 4.0.0 + Apache CloudStack Plugin - SNMP Alerts + cloud-plugin-snmp-alerts + + + + org.apache.servicemix.bundles + org.apache.servicemix.bundles.snmp4j + 2.1.0_1 + + + log4j + log4j + ${cs.log4j.version} + + + + diff --git a/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/CsSnmpConstants.java b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/CsSnmpConstants.java new file mode 100644 index 00000000000..36970a958fd --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/CsSnmpConstants.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +/** + *

+ * IMPORTANT + *

+ * These OIDs are based on CS-ROOT-MIB MIB file. If there is any change in MIB file + * then that should be reflected in this file also * + *

+ * suffix 2 due to conflict with SnmpConstants class of snmp4j + */ +public class CsSnmpConstants { + public static final String CLOUDSTACK = "1.3.6.1.4.1.18060.15"; + + public static final String OBJECTS_PREFIX = CLOUDSTACK + ".1.1."; + + public static final String TRAPS_PREFIX = CLOUDSTACK + ".1.2.0."; + + public static final String DATA_CENTER_ID = OBJECTS_PREFIX + 1; + + public static final String POD_ID = OBJECTS_PREFIX + 2; + + public static final String CLUSTER_ID = OBJECTS_PREFIX + 3; + + public static final String MESSAGE = OBJECTS_PREFIX + 4; + + public static final String GENERATION_TIME = OBJECTS_PREFIX + 5; +} \ No newline at end of file diff --git a/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayout.java b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayout.java new file mode 100644 index 00000000000..67420915607 --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayout.java @@ -0,0 +1,107 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +import org.apache.log4j.EnhancedPatternLayout; +import org.apache.log4j.spi.LoggingEvent; + +import java.util.Date; +import java.util.StringTokenizer; + +public class SnmpEnhancedPatternLayout extends EnhancedPatternLayout { + private String _pairDelimiter = "//"; + private String _keyValueDelimiter = "::"; + + private static final int LENGTH_OF_STRING_MESSAGE_AND_KEY_VALUE_DELIMITER = 9; + private static final int LENGTH_OF_STRING_MESSAGE = 8; + + public String getKeyValueDelimeter() { + return _keyValueDelimiter; + } + + public void setKeyValueDelimiter(String keyValueDelimiter) { + this._keyValueDelimiter = keyValueDelimiter; + } + + public String getPairDelimiter() { + return _pairDelimiter; + } + + public void setPairDelimiter(String pairDelimiter) { + this._pairDelimiter = pairDelimiter; + } + + public SnmpTrapInfo parseEvent(LoggingEvent event) { + SnmpTrapInfo snmpTrapInfo = null; + + final String message = event.getRenderedMessage(); + if (message.contains("alertType") && message.contains("message")) { + snmpTrapInfo = new SnmpTrapInfo(); + final StringTokenizer messageSplitter = new StringTokenizer(message, _pairDelimiter); + while (messageSplitter.hasMoreTokens()) { + final String pairToken = messageSplitter.nextToken(); + final StringTokenizer pairSplitter = new StringTokenizer(pairToken, _keyValueDelimiter); + String keyToken; + String valueToken; + + if (pairSplitter.hasMoreTokens()) { + keyToken = pairSplitter.nextToken().trim(); + } else { + break; + } + + if (pairSplitter.hasMoreTokens()) { + valueToken = pairSplitter.nextToken().trim(); + } else { + break; + } + + if (keyToken.equalsIgnoreCase("alertType") && !valueToken.equalsIgnoreCase("null")) { + snmpTrapInfo.setAlertType(Short.parseShort(valueToken)); + } else if (keyToken.equalsIgnoreCase("dataCenterId") && !valueToken.equalsIgnoreCase("null")) { + snmpTrapInfo.setDataCenterId(Long.parseLong(valueToken)); + } else if (keyToken.equalsIgnoreCase("podId") && !valueToken.equalsIgnoreCase("null")) { + snmpTrapInfo.setPodId(Long.parseLong(valueToken)); + } else if (keyToken.equalsIgnoreCase("clusterId") && !valueToken.equalsIgnoreCase("null")) { + snmpTrapInfo.setClusterId(Long.parseLong(valueToken)); + } else if (keyToken.equalsIgnoreCase("message") && !valueToken.equalsIgnoreCase("null")) { + snmpTrapInfo.setMessage(getSnmpMessage(message)); + } + } + + snmpTrapInfo.setGenerationTime(new Date(event.getTimeStamp())); + } + return snmpTrapInfo; + } + + private String getSnmpMessage(String message) { + int lastIndexOfKeyValueDelimiter = message.lastIndexOf(_keyValueDelimiter); + int lastIndexOfMessageInString = message.lastIndexOf("message"); + + if (lastIndexOfKeyValueDelimiter - lastIndexOfMessageInString <= + LENGTH_OF_STRING_MESSAGE_AND_KEY_VALUE_DELIMITER) { + return message.substring(lastIndexOfKeyValueDelimiter + _keyValueDelimiter.length()).trim(); + } else if (lastIndexOfMessageInString < lastIndexOfKeyValueDelimiter) { + return message.substring( + lastIndexOfMessageInString + _keyValueDelimiter.length() + LENGTH_OF_STRING_MESSAGE).trim(); + } + + return message.substring(message.lastIndexOf("message" + _keyValueDelimiter) + + LENGTH_OF_STRING_MESSAGE_AND_KEY_VALUE_DELIMITER).trim(); + } +} \ No newline at end of file diff --git a/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpHelper.java b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpHelper.java new file mode 100644 index 00000000000..4bee94bd9d0 --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpHelper.java @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.snmp4j.CommunityTarget; +import org.snmp4j.PDU; +import org.snmp4j.Snmp; +import org.snmp4j.mp.SnmpConstants; +import org.snmp4j.smi.OID; +import org.snmp4j.smi.OctetString; +import org.snmp4j.smi.UdpAddress; +import org.snmp4j.smi.UnsignedInteger32; +import org.snmp4j.smi.VariableBinding; +import org.snmp4j.transport.DefaultUdpTransportMapping; + +import java.io.IOException; + +public class SnmpHelper { + private Snmp _snmp; + private CommunityTarget _target; + + public SnmpHelper(String address, String community) { + _target = new CommunityTarget(); + _target.setCommunity(new OctetString(community)); + _target.setVersion(SnmpConstants.version2c); + _target.setAddress(new UdpAddress(address)); + try { + _snmp = new Snmp(new DefaultUdpTransportMapping()); + } catch (IOException e) { + _snmp = null; + throw new CloudRuntimeException(" Error in crearting snmp object, " + e.getMessage()); + } + } + + public void sendSnmpTrap(SnmpTrapInfo snmpTrapInfo) { + try { + if (_snmp != null) { + _snmp.send(createPDU(snmpTrapInfo), _target, null, null); + } + } catch (IOException e) { + throw new CloudRuntimeException(" Error in sending SNMP Trap, " + e.getMessage()); + } + } + + private PDU createPDU(SnmpTrapInfo snmpTrapInfo) { + PDU trap = new PDU(); + trap.setType(PDU.TRAP); + + int alertType = snmpTrapInfo.getAlertType() + 1; + if (alertType > 0) { + trap.add(new VariableBinding(SnmpConstants.snmpTrapOID, getOID(CsSnmpConstants.TRAPS_PREFIX + alertType))); + if (snmpTrapInfo.getDataCenterId() != 0) { + trap.add(new VariableBinding(getOID(CsSnmpConstants.DATA_CENTER_ID), + new UnsignedInteger32(snmpTrapInfo.getDataCenterId()))); + } + + if (snmpTrapInfo.getPodId() != 0) { + trap.add(new VariableBinding(getOID(CsSnmpConstants.POD_ID), new UnsignedInteger32(snmpTrapInfo + .getPodId()))); + } + + if (snmpTrapInfo.getClusterId() != 0) { + trap.add(new VariableBinding(getOID(CsSnmpConstants.CLUSTER_ID), new UnsignedInteger32(snmpTrapInfo + .getClusterId()))); + } + + if (snmpTrapInfo.getMessage() != null) { + trap.add(new VariableBinding(getOID(CsSnmpConstants.MESSAGE), new OctetString(snmpTrapInfo.getMessage + ()))); + } else { + throw new CloudRuntimeException(" What is the use of alert without message "); + } + + if (snmpTrapInfo.getGenerationTime() != null) { + trap.add(new VariableBinding(getOID(CsSnmpConstants.GENERATION_TIME), + new OctetString(snmpTrapInfo.getGenerationTime().toString()))); + } else { + trap.add(new VariableBinding(getOID(CsSnmpConstants.GENERATION_TIME))); + } + } else { + throw new CloudRuntimeException(" Invalid alert Type "); + } + + return trap; + } + + private OID getOID(String oidString) { + return new OID(oidString); + } +} \ No newline at end of file diff --git a/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapAppender.java b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapAppender.java new file mode 100644 index 00000000000..eaa4a132b7e --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapAppender.java @@ -0,0 +1,207 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +import com.cloud.utils.net.NetUtils; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.spi.ErrorCode; +import org.apache.log4j.spi.LoggingEvent; + +import java.util.ArrayList; +import java.util.List; +import java.util.StringTokenizer; + +public class SnmpTrapAppender extends AppenderSkeleton { + private String _delimiter = ","; + private String _snmpManagerIpAddresses; + private String _snmpManagerPorts; + private String _snmpManagerCommunities; + + private String _oldSnmpManagerIpAddresses = null; + private String _oldSnmpManagerPorts = null; + private String _oldSnmpManagerCommunities = null; + + private List _ipAddresses = null; + private List _communities = null; + private List _ports = null; + + List _snmpHelpers = new ArrayList(); + + @Override + protected void append(LoggingEvent event) { + SnmpEnhancedPatternLayout snmpEnhancedPatternLayout; + + if (getLayout() == null) { + errorHandler.error("No layout set for the Appender named [" + getName() + ']', null, + ErrorCode.MISSING_LAYOUT); + return; + } + + if (getLayout() instanceof SnmpEnhancedPatternLayout) { + snmpEnhancedPatternLayout = (SnmpEnhancedPatternLayout) getLayout(); + } else { + return; + } + + if (!isAsSevereAsThreshold(event.getLevel())) { + return; + } + + SnmpTrapInfo snmpTrapInfo = snmpEnhancedPatternLayout.parseEvent(event); + + if (snmpTrapInfo != null && !_snmpHelpers.isEmpty()) { + for (SnmpHelper helper : _snmpHelpers) { + try { + helper.sendSnmpTrap(snmpTrapInfo); + } catch (Exception e) { + errorHandler.error(e.getMessage()); + } + } + } + } + + void setSnmpHelpers() { + if (_snmpManagerIpAddresses == null || _snmpManagerIpAddresses.trim().isEmpty() || _snmpManagerCommunities == + null || _snmpManagerCommunities.trim().isEmpty() || _snmpManagerPorts == null || + _snmpManagerPorts.trim().isEmpty()) { + reset(); + return; + } + + if (_oldSnmpManagerIpAddresses != null && _oldSnmpManagerIpAddresses.equals(_snmpManagerIpAddresses) && + _oldSnmpManagerCommunities.equals(_snmpManagerCommunities) && + _oldSnmpManagerPorts.equals(_snmpManagerPorts)) { + return; + } + + _oldSnmpManagerIpAddresses = _snmpManagerIpAddresses; + _oldSnmpManagerPorts = _snmpManagerPorts; + _oldSnmpManagerCommunities = _snmpManagerCommunities; + + _ipAddresses = parse(_snmpManagerIpAddresses); + _communities = parse(_snmpManagerCommunities); + _ports = parse(_snmpManagerPorts); + + if (!(_ipAddresses.size() == _communities.size() && _ipAddresses.size() == _ports.size())) { + reset(); + errorHandler.error(" size of ip addresses , communities, " + "and ports list doesn't match, " + + "setting all to null"); + return; + } + + if (!validateIpAddresses() || !validatePorts()) { + reset(); + errorHandler.error(" Invalid format for the IP Addresses or Ports parameter "); + return; + } + + String address; + + for (int i = 0; i < _ipAddresses.size(); i++) { + address = _ipAddresses.get(i) + "/" + _ports.get(i); + try { + _snmpHelpers.add(new SnmpHelper(address, _communities.get(i))); + } catch (Exception e) { + errorHandler.error(e.getMessage()); + } + } + } + + private void reset() { + _ipAddresses = null; + _communities = null; + _ports = null; + _snmpHelpers.clear(); + } + + @Override + public void close() { + if (!closed) closed = true; + } + + @Override + public boolean requiresLayout() { + return true; + } + + private List parse(String str) { + List result = new ArrayList(); + + final StringTokenizer tokenizer = new StringTokenizer(str, _delimiter); + while (tokenizer.hasMoreTokens()) { + result.add(tokenizer.nextToken().trim()); + } + return result; + } + + private boolean validatePorts() { + for (String port : _ports) { + if (!NetUtils.isValidPort(port)) { + return false; + } + } + return true; + } + + private boolean validateIpAddresses() { + for (String ipAddress : _ipAddresses) { + if (ipAddress.trim().equalsIgnoreCase("localhost")) { + continue; + } + if (!NetUtils.isValidIp(ipAddress)) { + return false; + } + } + return true; + } + + public String getSnmpManagerIpAddresses() { + return _snmpManagerIpAddresses; + } + + public void setSnmpManagerIpAddresses(String snmpManagerIpAddresses) { + this._snmpManagerIpAddresses = snmpManagerIpAddresses; + setSnmpHelpers(); + } + + public String getSnmpManagerPorts() { + return _snmpManagerPorts; + } + + public void setSnmpManagerPorts(String snmpManagerPorts) { + this._snmpManagerPorts = snmpManagerPorts; + setSnmpHelpers(); + } + + public String getSnmpManagerCommunities() { + return _snmpManagerCommunities; + } + + public void setSnmpManagerCommunities(String snmpManagerCommunities) { + this._snmpManagerCommunities = snmpManagerCommunities; + setSnmpHelpers(); + } + + public String getDelimiter() { + return _delimiter; + } + + public void setDelimiter(String delimiter) { + this._delimiter = delimiter; + } +} \ No newline at end of file diff --git a/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapInfo.java b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapInfo.java new file mode 100644 index 00000000000..71bfee02cb6 --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/src/org/apache/cloudstack/alert/snmp/SnmpTrapInfo.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +import java.util.Date; + +public class SnmpTrapInfo { + private String message; + private long podId; + private long dataCenterId; + private long clusterId; + private Date generationTime; + private short alertType; + + public SnmpTrapInfo() { + } + + public SnmpTrapInfo(short alertType, long dataCenterId, long podId, long clusterId, String message, + Date generationTime) { + this.podId = podId; + this.alertType = alertType; + this.clusterId = clusterId; + this.dataCenterId = dataCenterId; + this.generationTime = generationTime; + this.message = message; + } + + public short getAlertType() { + return alertType; + } + + public void setAlertType(short alertType) { + this.alertType = alertType; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public long getPodId() { + return podId; + } + + public void setPodId(long podId) { + this.podId = podId; + } + + public long getDataCenterId() { + return dataCenterId; + } + + public void setDataCenterId(long dataCenterId) { + this.dataCenterId = dataCenterId; + } + + public long getClusterId() { + return clusterId; + } + + public void setClusterId(long clusterId) { + this.clusterId = clusterId; + } + + public Date getGenerationTime() { + return generationTime; + } + + public void setGenerationTime(Date generationTime) { + this.generationTime = generationTime; + } +} \ No newline at end of file diff --git a/plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayoutTest.java b/plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayoutTest.java new file mode 100644 index 00000000000..b903a1e18b9 --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayoutTest.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +import org.apache.log4j.spi.LoggingEvent; +import org.junit.Before; +import org.junit.Test; + +import javax.naming.ConfigurationException; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertNotNull; +import static junit.framework.Assert.assertNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SnmpEnhancedPatternLayoutTest { + SnmpEnhancedPatternLayout _snmpEnhancedPatternLayout = new SnmpEnhancedPatternLayout(); + + @Before + public void setUp() throws ConfigurationException { + _snmpEnhancedPatternLayout.setKeyValueDelimiter("::"); + _snmpEnhancedPatternLayout.setPairDelimiter("//"); + } + + @Test + public void parseAlertTest() { + LoggingEvent event = mock(LoggingEvent.class); + setMessage(" alertType:: 14 // dataCenterId:: 1 // podId:: 1 // " + "clusterId:: null // message:: Management" + + " network CIDR is not configured originally. Set it default to 10.102.192.0/22", event); + SnmpTrapInfo info = _snmpEnhancedPatternLayout.parseEvent(event); + commonAssertions(info, "Management network CIDR is not configured originally. Set it default to 10.102.192" + + ".0/22"); + } + + @Test + public void ParseAlertWithPairDelimeterInMessageTest() { + LoggingEvent event = mock(LoggingEvent.class); + setMessage(" alertType:: 14 // dataCenterId:: 1 // podId:: 1 // " + "clusterId:: null // message:: Management" + + " //network CIDR is not configured originally. Set it default to 10.102.192.0/22", event); + SnmpTrapInfo info = _snmpEnhancedPatternLayout.parseEvent(event); + commonAssertions(info, "Management //network CIDR is not configured originally. Set it default to 10.102.192" + + ".0/22"); + } + + @Test + public void ParseAlertWithKeyValueDelimeterInMessageTest() { + LoggingEvent event = mock(LoggingEvent.class); + setMessage(" alertType:: 14 // dataCenterId:: 1 // podId:: 1 // " + "clusterId:: null // message:: Management" + + " ::network CIDR is not configured originally. Set it default to 10.102.192.0/22", event); + SnmpTrapInfo info = _snmpEnhancedPatternLayout.parseEvent(event); + commonAssertions(info, "Management ::network CIDR is not configured originally. Set it default to 10.102.192" + + ".0/22"); + } + + @Test + public void parseRandomTest() { + LoggingEvent event = mock(LoggingEvent.class); + when(event.getRenderedMessage()).thenReturn("Problem clearing email alert"); + assertNull(" Null value was expected ", _snmpEnhancedPatternLayout.parseEvent(event)); + } + + private void commonAssertions(SnmpTrapInfo info, String message) { + assertEquals(" alert type not as expected ", 14, info.getAlertType()); + assertEquals(" data center id not as expected ", 1, info.getDataCenterId()); + assertEquals(" pod id os not as expected ", 1, info.getPodId()); + assertEquals(" cluster id is not as expected ", 0, info.getClusterId()); + assertNotNull(" generation time is set to null", info.getGenerationTime()); + assertEquals(" message is not as expected ", message, info.getMessage()); + } + + private void setMessage(String message, LoggingEvent event) { + when(event.getRenderedMessage()).thenReturn(message); + } +} \ No newline at end of file diff --git a/plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpTrapAppenderTest.java b/plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpTrapAppenderTest.java new file mode 100644 index 00000000000..2a65d90acc2 --- /dev/null +++ b/plugins/alert-handlers/snmp-alerts/test/org/apache/cloudstack/alert/snmp/SnmpTrapAppenderTest.java @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License + +package org.apache.cloudstack.alert.snmp; + +import org.apache.log4j.spi.LoggingEvent; +import org.junit.Test; +import org.mockito.Mock; + +import java.util.List; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertTrue; +import static org.mockito.Mockito.mock; + +public class SnmpTrapAppenderTest { + SnmpTrapAppender _appender = new SnmpTrapAppender(); + LoggingEvent _event = mock(LoggingEvent.class); + SnmpEnhancedPatternLayout _snmpEnhancedPatternLayout = mock(SnmpEnhancedPatternLayout.class); + @Mock + List snmpHelpers; + + @Test + public void appendTest() { + _appender.setSnmpManagerIpAddresses("10.1.1.1,10.1.1.2"); + _appender.setSnmpManagerPorts("162,164"); + _appender.setSnmpManagerCommunities("public,snmp"); + + _appender.setSnmpHelpers(); + assertEquals(" error snmpHelper list size not as expected ", _appender._snmpHelpers.size(), 2); + } + + @Test + public void InvalidInputTest() { + _appender.setSnmpManagerIpAddresses("10.1.1.1,10.1.1.2"); + _appender.setSnmpManagerPorts("162,164"); + _appender.setSnmpManagerCommunities("public"); + + _appender.setSnmpHelpers(); + assertTrue(" list was expected to be empty", _appender._snmpHelpers.isEmpty()); + } + + @Test + public void InvalidIpInputTest() { + _appender.setSnmpManagerIpAddresses("10.1.1,10.1.1.2"); + _appender.setSnmpManagerPorts("162,164"); + _appender.setSnmpManagerCommunities("public,snmp"); + + _appender.setSnmpHelpers(); + assertTrue(" list was expected to be empty", _appender._snmpHelpers.isEmpty()); + } + + @Test + public void InvalidPortInputTest() { + _appender.setSnmpManagerIpAddresses("10.1.1,10.1.1.2"); + _appender.setSnmpManagerPorts("162,164897489978"); + _appender.setSnmpManagerCommunities("public,snmp"); + + _appender.setSnmpHelpers(); + assertTrue(" list was expected to be empty", _appender._snmpHelpers.isEmpty()); + } + + @Test + public void mismatchListLengthInputTest() { + _appender.setSnmpManagerIpAddresses("10.1.1"); + _appender.setSnmpManagerPorts("162,164"); + _appender.setSnmpManagerCommunities("public,snmp"); + + _appender.setSnmpHelpers(); + assertTrue(" list was expected to be empty", _appender._snmpHelpers.isEmpty()); + } +} \ No newline at end of file diff --git a/plugins/pom.xml b/plugins/pom.xml index 88f617b4560..5d31a72ee91 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -57,6 +57,7 @@ network-elements/dns-notifier storage/image/s3 storage/volume/solidfire + alert-handlers/snmp-alerts diff --git a/server/src/com/cloud/alert/AlertManagerImpl.java b/server/src/com/cloud/alert/AlertManagerImpl.java index f8a8fd8b1b9..a45482fd4ef 100755 --- a/server/src/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/com/cloud/alert/AlertManagerImpl.java @@ -84,6 +84,7 @@ import com.sun.mail.smtp.SMTPTransport; @Local(value={AlertManager.class}) public class AlertManagerImpl extends ManagerBase implements AlertManager { private static final Logger s_logger = Logger.getLogger(AlertManagerImpl.class.getName()); + private static final Logger s_alertsLogger = Logger.getLogger("org.apache.cloudstack.alerts"); private static final long INITIAL_CAPACITY_CHECK_DELAY = 30L * 1000L; // thirty seconds expressed in milliseconds @@ -256,6 +257,9 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager { try { if (_emailAlert != null) { _emailAlert.sendAlert(alertType, dataCenterId, podId, null, subject, body); + } else { + s_alertsLogger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + + podId + " // clusterId:: " + null + " // message:: " + subject ); } } catch (Exception ex) { s_logger.error("Problem sending email alert", ex); @@ -789,6 +793,8 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager { // TODO: make sure this handles SSL transport (useAuth is true) and regular public void sendAlert(short alertType, long dataCenterId, Long podId, Long clusterId, String subject, String content) throws MessagingException, UnsupportedEncodingException { + s_alertsLogger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + + podId + " // clusterId:: " + null + " // message:: " + subject); AlertVO alert = null; if ((alertType != AlertManager.ALERT_TYPE_HOST) && (alertType != AlertManager.ALERT_TYPE_USERVM) && diff --git a/usage/src/com/cloud/usage/UsageAlertManagerImpl.java b/usage/src/com/cloud/usage/UsageAlertManagerImpl.java index a0765b2b272..dc918b83b6d 100644 --- a/usage/src/com/cloud/usage/UsageAlertManagerImpl.java +++ b/usage/src/com/cloud/usage/UsageAlertManagerImpl.java @@ -50,11 +50,12 @@ import com.sun.mail.smtp.SMTPTransport; @Local(value={AlertManager.class}) public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { private static final Logger s_logger = Logger.getLogger(UsageAlertManagerImpl.class.getName()); + private static final Logger s_alertsLogger = Logger.getLogger("org.apache.cloudstack.alerts"); private EmailAlert _emailAlert; @Inject private AlertDao _alertDao; @Inject private ConfigurationDao _configDao; - + @Override public boolean configure(String name, Map params) throws ConfigurationException { Map configs = _configDao.getConfiguration("management-server", params); @@ -101,6 +102,9 @@ public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { try { if (_emailAlert != null) { _emailAlert.sendAlert(alertType, dataCenterId, podId, subject, body); + } else { + s_alertsLogger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + + podId + " // clusterId:: " + null + " // message:: " + subject ); } } catch (Exception ex) { s_logger.error("Problem sending email alert", ex); @@ -171,18 +175,19 @@ public class UsageAlertManagerImpl extends ManagerBase implements AlertManager { // TODO: make sure this handles SSL transport (useAuth is true) and regular public void sendAlert(short alertType, long dataCenterId, Long podId, String subject, String content) throws MessagingException, UnsupportedEncodingException { + s_alertsLogger.warn(" alertType:: " + alertType + " // dataCenterId:: " + dataCenterId + " // podId:: " + + podId + " // clusterId:: " + null + " // message:: " + subject); AlertVO alert = null; - if ((alertType != AlertManager.ALERT_TYPE_HOST) && (alertType != AlertManager.ALERT_TYPE_USERVM) && (alertType != AlertManager.ALERT_TYPE_DOMAIN_ROUTER) && (alertType != AlertManager.ALERT_TYPE_CONSOLE_PROXY) && - (alertType != AlertManager.ALERT_TYPE_SSVM) && + (alertType != AlertManager.ALERT_TYPE_SSVM) && (alertType != AlertManager.ALERT_TYPE_STORAGE_MISC) && (alertType != AlertManager.ALERT_TYPE_MANAGMENT_NODE)) { alert = _alertDao.getLastAlert(alertType, dataCenterId, podId); } - + if (alert == null) { // set up a new alert AlertVO newAlert = new AlertVO(); From 6ba0ef1a17e70d64894f7f9b1d5baa886aab651c Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Mon, 18 Mar 2013 18:15:59 +0530 Subject: [PATCH 020/123] Removing the test tooltip from Pingpath variable --- ui/scripts/ui-custom/healthCheck.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/scripts/ui-custom/healthCheck.js b/ui/scripts/ui-custom/healthCheck.js index c4c84e5236b..ebb7e5a8903 100644 --- a/ui/scripts/ui-custom/healthCheck.js +++ b/ui/scripts/ui-custom/healthCheck.js @@ -73,7 +73,7 @@ form: { title: '', fields:{ - pingpath: {label: 'Ping Path', docID:'helpAccountUsername' , validation: {required: false}, defaultValue: pingpath1} + pingpath: {label: 'Ping Path', validation: {required: false}, defaultValue: pingpath1} } } }); From 2b05f546b7b882e61d54a653a8c5069be22b1be2 Mon Sep 17 00:00:00 2001 From: Min Chen Date: Fri, 15 Mar 2013 16:28:46 -0700 Subject: [PATCH 021/123] CLOUDSTACK-1690: Remove region_id from sql insert statement for inserting "system" and "admin" account and user in MS startup due to recent region feature change. --- .../cloud/server/ConfigurationServerImpl.java | 30 +++++++------------ 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/server/src/com/cloud/server/ConfigurationServerImpl.java b/server/src/com/cloud/server/ConfigurationServerImpl.java index 8c665ad1eee..3b0ec0f943b 100755 --- a/server/src/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/com/cloud/server/ConfigurationServerImpl.java @@ -106,10 +106,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.script.Script; import com.cloud.uuididentity.dao.IdentityDao; -import org.apache.cloudstack.region.RegionVO; -import org.apache.cloudstack.region.dao.RegionDao; -import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; + @Component public class ConfigurationServerImpl extends ManagerBase implements ConfigurationServer { @@ -152,7 +149,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio public void persistDefaultValues() throws InternalErrorException { fixupScriptFileAttribute(); - + // Create system user and admin user saveUser(); @@ -337,23 +334,20 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio @DB protected void saveUser() { - int region_id = _configDao.getRegionId(); // insert system account - String insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, region_id) VALUES (1, UUID(), 'system', '1', '1', ?)"; + String insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id) VALUES (1, UUID(), 'system', '1', '1')"; Transaction txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); - stmt.setInt(1, region_id); stmt.executeUpdate(); } catch (SQLException ex) { } // insert system user - insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, region_id)" + - " VALUES (1, UUID(), 'system', RAND(), 1, 'system', 'cloud', now(), ?)"; + insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created)" + + " VALUES (1, UUID(), 'system', RAND(), 1, 'system', 'cloud', now())"; txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); - stmt.setInt(1, region_id); stmt.executeUpdate(); } catch (SQLException ex) { } @@ -366,23 +360,21 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio String lastname = "cloud"; // create an account for the admin user first - insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, region_id) VALUES (" + id + ", UUID(), '" + username + "', '1', '1', ?)"; + insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id) VALUES (" + id + ", UUID(), '" + username + "', '1', '1')"; txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); - stmt.setInt(1, region_id); stmt.executeUpdate(); } catch (SQLException ex) { } // now insert the user - insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, state, region_id) " + - "VALUES (" + id + ", UUID(), '" + username + "', RAND(), 2, '" + firstname + "','" + lastname + "',now(), 'disabled', ?)"; + insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, state) " + + "VALUES (" + id + ", UUID(), '" + username + "', RAND(), 2, '" + firstname + "','" + lastname + "',now(), 'disabled')"; txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); - stmt.setInt(1, region_id); stmt.executeUpdate(); } catch (SQLException ex) { } @@ -708,15 +700,15 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } } - + private void fixupScriptFileAttribute() { - // TODO : this is a hacking fix to workaround that executable bit is not preserved in WAR package + // TODO : this is a hacking fix to workaround that executable bit is not preserved in WAR package String scriptPath = Script.findScript("", "scripts/vm/systemvm/injectkeys.sh"); if(scriptPath != null) { File file = new File(scriptPath); if(!file.canExecute()) { s_logger.info("Some of the shell script files may not have executable bit set. Fixup..."); - + String cmd = "chmod ugo+x " + scriptPath; s_logger.info("Executing " + cmd); String result = Script.runSimpleBashScript(cmd); From d9b85e397d10f2291058ff972094decd8fc4ad7d Mon Sep 17 00:00:00 2001 From: Prachi Damle Date: Mon, 18 Mar 2013 12:03:57 -0700 Subject: [PATCH 022/123] CLOUDSTACK-1706 Failed to deploy VM with error "cannot find DeployPlannerSelector" Changes: - Regular plugin/adapter components should usually be loaded at run level RUNLEVEL_COMPONENT(5) - HypervisorVmPlannerSelector was at level 0, while configurationServer at level 2 - causing config to be not loaded for the HypervisorVmPlannerSelector --- .../src/com/cloud/deploy/HypervisorVmPlannerSelector.java | 8 ++++++++ utils/src/com/cloud/utils/component/AdapterBase.java | 4 +++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java b/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java index 8b2a1441151..0f454cdb582 100755 --- a/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java +++ b/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java @@ -18,12 +18,16 @@ package com.cloud.deploy; import javax.ejb.Local; +import org.apache.log4j.Logger; + import com.cloud.deploy.DeploymentPlanner.AllocationAlgorithm; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.vm.UserVmVO; @Local(value = {DeployPlannerSelector.class}) public class HypervisorVmPlannerSelector extends AbstractDeployPlannerSelector { + private static final Logger s_logger = Logger.getLogger(HypervisorVmPlannerSelector.class); + @Override public String selectPlanner(UserVmVO vm) { if (vm.getHypervisorType() != HypervisorType.BareMetal) { @@ -38,6 +42,10 @@ public class HypervisorVmPlannerSelector extends AbstractDeployPlannerSelector { || _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString())) { return "UserConcentratedPodPlanner"; } + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("The allocation algorithm is null, cannot select the planner"); + } } } diff --git a/utils/src/com/cloud/utils/component/AdapterBase.java b/utils/src/com/cloud/utils/component/AdapterBase.java index a8f4f468090..ea5e9611ab6 100644 --- a/utils/src/com/cloud/utils/component/AdapterBase.java +++ b/utils/src/com/cloud/utils/component/AdapterBase.java @@ -22,8 +22,10 @@ import java.util.List; public class AdapterBase extends ComponentLifecycleBase implements Adapter { public AdapterBase() { + // set default run level for adapter components + setRunLevel(ComponentLifecycle.RUN_LEVEL_COMPONENT); } - + public static T getAdapterByName(List adapters, String name) { for(T adapter : adapters) { if(adapter.getName() != null && adapter.getName().equalsIgnoreCase(name)) From fb94b72213bf96f2878b90260067f61629c6a956 Mon Sep 17 00:00:00 2001 From: Kelven Yang Date: Mon, 18 Mar 2013 18:05:09 -0700 Subject: [PATCH 023/123] CLOUDSTACK-1568,CLOUDSTACK-1664: Fix ActionEvent interception and optimize @DB lookup with caching --- .../cloud/event/ActionEventInterceptor.java | 15 +++- .../ConsoleProxyHttpHandlerHelper.java | 14 ++- .../component/ComponentMethodProxyCache.java | 90 +++++++++++++++++++ .../component/SpringComponentScanUtils.java | 1 - .../utils/db/TransactionContextBuilder.java | 14 ++- 5 files changed, 119 insertions(+), 15 deletions(-) create mode 100644 utils/src/com/cloud/utils/component/ComponentMethodProxyCache.java diff --git a/server/src/com/cloud/event/ActionEventInterceptor.java b/server/src/com/cloud/event/ActionEventInterceptor.java index fb89498ffce..a6c2565510e 100644 --- a/server/src/com/cloud/event/ActionEventInterceptor.java +++ b/server/src/com/cloud/event/ActionEventInterceptor.java @@ -19,22 +19,29 @@ package com.cloud.event; import java.lang.reflect.AnnotatedElement; import java.lang.reflect.Method; +import org.apache.log4j.Logger; import org.aspectj.lang.ProceedingJoinPoint; import org.aspectj.lang.reflect.MethodSignature; import com.cloud.user.UserContext; +import com.cloud.utils.component.ComponentMethodProxyCache; public class ActionEventInterceptor { + private static final Logger s_logger = Logger.getLogger(ActionEventInterceptor.class); public ActionEventInterceptor() { } public Object AroundAnyMethod(ProceedingJoinPoint call) throws Throwable { MethodSignature methodSignature = (MethodSignature)call.getSignature(); - Method targetMethod = methodSignature.getMethod(); - if(needToIntercept(targetMethod)) { + + // Note: AOP for ActionEvent is triggered annotation, no need to check the annotation on method again + Method targetMethod = ComponentMethodProxyCache.getTargetMethod( + methodSignature.getMethod(), call.getTarget()); + + if(targetMethod != null) { EventVO event = interceptStart(targetMethod); - + boolean success = true; Object ret = null; try { @@ -49,6 +56,8 @@ public class ActionEventInterceptor { } } return ret; + } else { + s_logger.error("Unable to find the proxied method behind. Method: " + methodSignature.getMethod().getName()); } return call.proceed(); } diff --git a/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java b/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java index 6815b0d43bc..297e71118ad 100644 --- a/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java +++ b/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java @@ -53,7 +53,7 @@ public class ConsoleProxyHttpHandlerHelper { ConsoleProxyClientParam param = encryptor.decryptObject(ConsoleProxyClientParam.class, map.get("token")); // make sure we get information from token only - map.clear(); + guardUserInput(map); if(param != null) { if(param.getClientHostAddress() != null) map.put("host", param.getClientHostAddress()); @@ -72,9 +72,19 @@ public class ConsoleProxyHttpHandlerHelper { } } else { // we no longer accept information from parameter other than token - map.clear(); + guardUserInput(map); } return map; } + + private static void guardUserInput(Map map) { + map.remove("host"); + map.remove("port"); + map.remove("tag"); + map.remove("sid"); + map.remove("consoleurl"); + map.remove("sessionref"); + map.remove("ticket"); + } } diff --git a/utils/src/com/cloud/utils/component/ComponentMethodProxyCache.java b/utils/src/com/cloud/utils/component/ComponentMethodProxyCache.java new file mode 100644 index 00000000000..ea3b68573cf --- /dev/null +++ b/utils/src/com/cloud/utils/component/ComponentMethodProxyCache.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.utils.component; + +import java.lang.ref.WeakReference; +import java.lang.reflect.Method; +import java.util.WeakHashMap; + +public class ComponentMethodProxyCache { + + private static WeakHashMap> s_cache = new WeakHashMap>(); + + public ComponentMethodProxyCache() { + } + + public static Method getTargetMethod(Method method, Object target) { + synchronized(s_cache) { + WeakReference targetMethod = s_cache.get(new TargetKey(method, target)); + if(targetMethod != null && targetMethod.get() != null) + return targetMethod.get(); + + Class clazz = target.getClass(); + for(Method m : clazz.getMethods()) { + if(isMethodMatched(method, m)) { + s_cache.put(new TargetKey(method, target), new WeakReference(m)); + return m; + } + } + + return method; + } + } + + private static boolean isMethodMatched(Method m1, Method m2) { + if(!m1.getName().equals(m2.getName())) + return false; + + Class[] params1 = m1.getParameterTypes(); + Class[] params2 = m2.getParameterTypes(); + + if(params1.length != params2.length) + return false; + + for(int i = 0; i < params1.length; i++) { + if(!params1[i].isAssignableFrom(params2[i])) + return false; + } + + return true; + } + + public static class TargetKey { + Method _method; + Object _target; + + public TargetKey(Method method, Object target) { + _method = method; + _target = target; + } + + @Override + public boolean equals(Object obj) { + if(!(obj instanceof TargetKey)) + return false; + + // for target object, we just check the reference + return _method.equals(((TargetKey)obj)._method) && + _target == ((TargetKey)obj)._target; + } + + public int hashCode() { + return _target.hashCode() ^ _target.hashCode(); + } + } +} diff --git a/utils/src/com/cloud/utils/component/SpringComponentScanUtils.java b/utils/src/com/cloud/utils/component/SpringComponentScanUtils.java index fda11b74609..9a85c79fa80 100644 --- a/utils/src/com/cloud/utils/component/SpringComponentScanUtils.java +++ b/utils/src/com/cloud/utils/component/SpringComponentScanUtils.java @@ -38,5 +38,4 @@ public class SpringComponentScanUtils { } return false; } - } diff --git a/utils/src/com/cloud/utils/db/TransactionContextBuilder.java b/utils/src/com/cloud/utils/db/TransactionContextBuilder.java index e03b25f912d..7ca33ab5f5d 100644 --- a/utils/src/com/cloud/utils/db/TransactionContextBuilder.java +++ b/utils/src/com/cloud/utils/db/TransactionContextBuilder.java @@ -22,9 +22,10 @@ import org.aopalliance.intercept.MethodInterceptor; import org.aopalliance.intercept.MethodInvocation; import org.apache.log4j.Logger; import org.aspectj.lang.ProceedingJoinPoint; -import org.aspectj.lang.Signature; import org.aspectj.lang.reflect.MethodSignature; +import com.cloud.utils.component.ComponentMethodProxyCache; + public class TransactionContextBuilder implements MethodInterceptor { private static final Logger s_logger = Logger.getLogger(TransactionContextBuilder.class); public TransactionContextBuilder() { @@ -72,14 +73,9 @@ public class TransactionContextBuilder implements MethodInterceptor { Class clazz = method.getDeclaringClass(); if(clazz.isInterface()) { clazz = target.getClass(); - for(Method m : clazz.getMethods()) { - // it is supposed that we need to check against type arguments, - // this can be simplified by just checking method name - if(m.getName().equals(method.getName())) { - if(m.getAnnotation(DB.class) != null) - return true; - } - } + Method targetMethod = ComponentMethodProxyCache.getTargetMethod(method, target); + if(targetMethod != null && targetMethod.getAnnotation(DB.class) != null) + return true; } do { From beb15af0f267c59b7e070d279e1df960701b6cf6 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Tue, 19 Mar 2013 12:21:19 +0530 Subject: [PATCH 024/123] CLOUDSTACK-1720: Add upgrade paths from 4.0.x to 4.2.0 Signed-off-by: Rohit Yadav --- server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java index 5bd749fe842..8f9be0f5d57 100755 --- a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -162,6 +162,10 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { _upgradeMap.put("4.0.0", new DbUpgrade[] { new Upgrade40to41(), new Upgrade410to420() }); + _upgradeMap.put("4.0.1", new DbUpgrade[] { new Upgrade40to41(), new Upgrade410to420() }); + + _upgradeMap.put("4.0.2", new DbUpgrade[] { new Upgrade40to41(), new Upgrade410to420() }); + _upgradeMap.put("4.1.0", new DbUpgrade[] { new Upgrade410to420() }); } From 19d1a30360750df280e46e1da5cf138625243852 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Tue, 19 Mar 2013 15:35:31 +0530 Subject: [PATCH 025/123] Dv switch UI space removal changes --- ui/scripts/system.js | 4 ++-- ui/scripts/zoneWizard.js | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ui/scripts/system.js b/ui/scripts/system.js index 4d529aeb04e..c0a5d141700 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -7597,7 +7597,7 @@ if(vSwitchEnabled) { - items.push({ id:" nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); + items.push({ id:"nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); items.push({id: "vmwaresvs", description: "VMware vNetwork Standard Virtual Switch"}); items.push({id: "vmwaredvs", description: "VMware vNetwork Distributed Virtual Switch"}); @@ -7610,7 +7610,7 @@ else{ items.push({id: "vmwaredvs", description: "VMware vNetwork Distributed Virtual Switch"}); items.push({id: "vmwaresvs", description: "VMware vNetwork Standard Virtual Switch"}); - items.push({ id:" nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); + items.push({ id:"nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); } args.response.success({data: items}); diff --git a/ui/scripts/zoneWizard.js b/ui/scripts/zoneWizard.js index 47932664927..5108c5c0c3b 100755 --- a/ui/scripts/zoneWizard.js +++ b/ui/scripts/zoneWizard.js @@ -897,7 +897,7 @@ if(vSwitchEnabled) { - items.push({ id:" nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); + items.push({ id:"nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); items.push({id: "vmwaresvs", description: "VMware vNetwork Standard Virtual Switch"}); items.push({id: "vmwaredvs", description: "VMware vNetwork Distributed Virtual Switch"}); @@ -909,9 +909,9 @@ // items.push({id: " ", description: " "}); else{ - items.push({id: "vmwaredvs", description: "VMware vNetwork Distributed Virtual Switch"}); - items.push({id: "vmwaresvs", description: "VMware vNetwork Standard Virtual Switch"}); - items.push({ id:" nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); + items.push({id:"vmwaredvs", description: "VMware vNetwork Distributed Virtual Switch"}); + items.push({ id: "vmwaresvs", description: "VMware vNetwork Standard Virtual Switch"}); + items.push({ id:"nexusdvs" , description: "Cisco Nexus 1000v Distributed Virtual Switch"}); } args.response.success({data: items}); From 93bc669a0e2fdfc5860e72113e7a51866c93ab2c Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Tue, 19 Mar 2013 15:34:20 +0530 Subject: [PATCH 026/123] cli: Pass config file by value in cloudmonkey's config Signed-off-by: Rohit Yadav --- tools/cli/cloudmonkey/config.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/tools/cli/cloudmonkey/config.py b/tools/cli/cloudmonkey/config.py index 6a5feab8d12..4a4aa99cdc8 100644 --- a/tools/cli/cloudmonkey/config.py +++ b/tools/cli/cloudmonkey/config.py @@ -19,6 +19,12 @@ # Use following rules for versioning: # - __version__ = "4.1.0-0" +__description__ = "Command Line Interface for Apache CloudStack" +__maintainer__ = "Rohit Yadav" +__maintaineremail__ = "bhaisaab@apache.org" +__project__ = "The Apache CloudStack Team" +__projectemail__ = "cloudstack-dev@incubator.apache.org" +__projecturl__ = "http://incubator.apache.org/cloudstack" try: import os @@ -36,14 +42,14 @@ iterable_type = ['set', 'list', 'object'] config_dir = expanduser('~/.cloudmonkey') config_file = expanduser(config_dir + '/config') -cache_file = expanduser(config_dir + '/cache') # cloudmonkey config fields -config_fields = {'core': {}, 'ui': {}, 'server': {}, 'user': {}} +config_fields = {'core': {}, 'server': {}, 'user': {}, 'ui': {}} # core config_fields['core']['asyncblock'] = 'true' config_fields['core']['paramcompletion'] = 'false' +config_fields['core']['cache_file'] = expanduser(config_dir + '/cache') config_fields['core']['history_file'] = expanduser(config_dir + '/history') config_fields['core']['log_file'] = expanduser(config_dir + '/log') @@ -64,8 +70,8 @@ config_fields['user']['apikey'] = '' config_fields['user']['secretkey'] = '' -def write_config(get_attr, first_time=False): - global config_fields, config_file +def write_config(get_attr, config_file, first_time=False): + global config_fields config = ConfigParser() for section in config_fields.keys(): config.add_section(section) @@ -79,8 +85,8 @@ def write_config(get_attr, first_time=False): return config -def read_config(get_attr, set_attr): - global config_fields, config_dir, config_file +def read_config(get_attr, set_attr, config_file): + global config_fields, config_dir if not os.path.exists(config_dir): os.makedirs(config_dir) @@ -95,7 +101,7 @@ def read_config(get_attr, set_attr): except IOError, e: print "Error: config_file not found", e else: - config = write_config(get_attr, True) + config = write_config(get_attr, config_file, True) print "Welcome! Using `set` configure the necessary settings:" print " ".join(sorted(config_options)) print "Config file:", config_file From e28aa09f01fcaf6abd806ab94d962563e1fba6c2 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Tue, 19 Mar 2013 15:47:51 +0530 Subject: [PATCH 027/123] cli: cachemaker should import config and not the field Signed-off-by: Rohit Yadav --- tools/cli/cloudmonkey/cachemaker.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/cli/cloudmonkey/cachemaker.py b/tools/cli/cloudmonkey/cachemaker.py index 42a077ad928..8ac123caa4b 100644 --- a/tools/cli/cloudmonkey/cachemaker.py +++ b/tools/cli/cloudmonkey/cachemaker.py @@ -21,7 +21,7 @@ try: import os import types - from config import cache_file + from config import config_fields except ImportError, e: import sys print "ImportError", e @@ -168,6 +168,7 @@ def main(json_file): f.close() if __name__ == "__main__": + cache_file = config_fields['core']['cache_file'] print "[cachemaker] Pre-caching using user's cloudmonkey cache", cache_file if os.path.exists(cache_file): main(cache_file) From 4421f2bb60ea44a6b28593eb7e0254d5108919ff Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Tue, 19 Mar 2013 15:49:42 +0530 Subject: [PATCH 028/123] CLOUDSTACK-1708: Let cloudmonkey accept cfg passed in cmd line The patch adds feature in cloudmonkey to have multiple profiles by passing custom cfg file to set custom profile in both interactive shell and cmd line tool use cases. Signed-off-by: Rohit Yadav --- tools/cli/cloudmonkey/cloudmonkey.py | 47 +++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/tools/cli/cloudmonkey/cloudmonkey.py b/tools/cli/cloudmonkey/cloudmonkey.py index 25422412613..f88632d828c 100644 --- a/tools/cli/cloudmonkey/cloudmonkey.py +++ b/tools/cli/cloudmonkey/cloudmonkey.py @@ -29,8 +29,9 @@ try: import types from cachemaker import loadcache, savecache, monkeycache, splitverbsubject - from config import __version__, cache_file - from config import read_config, write_config + from config import __version__, __description__, __projecturl__ + from config import read_config, write_config, config_file + from optparse import OptionParser from prettytable import PrettyTable from printer import monkeyprint from requester import monkeyrequest @@ -63,13 +64,14 @@ class CloudMonkeyShell(cmd.Cmd, object): intro = ("☁ Apache CloudStack 🐵 cloudmonkey " + __version__ + ". Type help or ? to list commands.\n") ruler = "=" - cache_file = cache_file config_options = [] verbs = [] - def __init__(self, pname): + def __init__(self, pname, cfile): self.program_name = pname - self.config_options = read_config(self.get_attr, self.set_attr) + self.config_file = cfile + self.config_options = read_config(self.get_attr, self.set_attr, + self.config_file) self.loadcache() self.prompt = self.prompt.strip() + " " # Cosmetic fix for prompt @@ -364,7 +366,7 @@ class CloudMonkeyShell(cmd.Cmd, object): key, value = (args[0], args[2]) setattr(self, key, value) # keys and attributes should have same names self.prompt = self.prompt.strip() + " " # prompt fix - write_config(self.get_attr) + write_config(self.get_attr, self.config_file) def complete_set(self, text, line, begidx, endidx): mline = line.partition(" ")[2] @@ -458,10 +460,39 @@ class CloudMonkeyShell(cmd.Cmd, object): return self.do_EOF(args) +class MonkeyParser(OptionParser): + def format_help(self, formatter=None): + if formatter is None: + formatter = self.formatter + result = [] + if self.usage: + result.append("Usage: cloudmonkey [options] [cmds] [params]\n\n") + if self.description: + result.append(self.format_description(formatter) + "\n") + result.append(self.format_option_help(formatter)) + result.append("\nTry cloudmonkey [help|?]\n") + return "".join(result) + + def main(): - shell = CloudMonkeyShell(sys.argv[0]) + parser = MonkeyParser() + parser.add_option("-c", "--config-file", + dest="cfile", default=config_file, + help="config file for cloudmonkey", metavar="FILE") + parser.add_option("-v", "--version", + action="store_true", dest="version", default=False, + help="prints cloudmonkey version information") + + (options, args) = parser.parse_args() + print 'args', args + print 'options', options + if options.version: + print "cloudmonkey", __version__ + print __description__, "(%s)" % __projecturl__ + + shell = CloudMonkeyShell(sys.argv[0], options.cfile) if len(sys.argv) > 1: - shell.onecmd(' '.join(sys.argv[1:])) + shell.onecmd(' '.join(args)) else: shell.cmdloop() From 19a290500e17765a67eddc0d61769e483ee3d7f7 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Tue, 19 Mar 2013 15:52:03 +0530 Subject: [PATCH 029/123] cli: Import project info from config in setup.py, fix debug msgs Signed-off-by: Rohit Yadav lsdfjk Signed-off-by: Rohit Yadav --- tools/cli/cloudmonkey/__init__.py | 4 +++- tools/cli/cloudmonkey/cloudmonkey.py | 2 -- tools/cli/setup.py | 34 +++++++++++++--------------- 3 files changed, 19 insertions(+), 21 deletions(-) diff --git a/tools/cli/cloudmonkey/__init__.py b/tools/cli/cloudmonkey/__init__.py index e4c4e6d24f1..cf689e79480 100644 --- a/tools/cli/cloudmonkey/__init__.py +++ b/tools/cli/cloudmonkey/__init__.py @@ -16,6 +16,8 @@ # under the License. try: - from config import __version__ + from config import __version__, __description__ + from config import __maintainer__, __maintaineremail__ + from config import __project__, __projecturl__, __projectemail__ except ImportError, e: print e diff --git a/tools/cli/cloudmonkey/cloudmonkey.py b/tools/cli/cloudmonkey/cloudmonkey.py index f88632d828c..f750c3afa8d 100644 --- a/tools/cli/cloudmonkey/cloudmonkey.py +++ b/tools/cli/cloudmonkey/cloudmonkey.py @@ -484,8 +484,6 @@ def main(): help="prints cloudmonkey version information") (options, args) = parser.parse_args() - print 'args', args - print 'options', options if options.version: print "cloudmonkey", __version__ print __description__, "(%s)" % __projecturl__ diff --git a/tools/cli/setup.py b/tools/cli/setup.py index 9624115ed5f..4c7b2978b2f 100644 --- a/tools/cli/setup.py +++ b/tools/cli/setup.py @@ -22,13 +22,9 @@ except ImportError: use_setuptools() from setuptools import setup, find_packages -from cloudmonkey import __version__ - -name = 'cloudmonkey' -version = __version__ -requires = ['Pygments>=1.5', - 'prettytable>=0.6', - ] +from cloudmonkey import __version__, __description__ +from cloudmonkey import __maintainer__, __maintaineremail__ +from cloudmonkey import __project__, __projecturl__, __projectemail__ try: import readline @@ -36,20 +32,22 @@ except ImportError: requires.append('readline') setup( - name = name, - version = version, - author = "The Apache CloudStack Team", - author_email = "cloudstack-dev@incubator.apache.org", - maintainer = "Rohit Yadav", - maintainer_email = "bhaisaab@apache.org", - url = "http://incubator.apache.org/cloudstack", - description = "Command Line Interface for Apache CloudStack", - long_description = "cloudmonkey is a command line interface for Apache " - "CloudStack powered by CloudStack Marvin", + name = 'cloudmonkey', + version = __version__, + author = __project__, + author_email = __projectemail__, + maintainer = __maintainer__, + maintainer_email = __maintaineremail__, + url = __projecturl__, + description = __description__, + long_description = "cloudmonkey is a CLI for Apache CloudStack", platforms = ("Any",), license = 'ASL 2.0', packages = find_packages(), - install_requires = requires, + install_requires = [ + 'Pygments>=1.5', + 'prettytable>=0.6', + ], include_package_data = True, zip_safe = False, classifiers = [ From c02ab3f4d7a502eadca76026bdeeec40f00ca8ee Mon Sep 17 00:00:00 2001 From: Anshul Gangwar Date: Tue, 19 Mar 2013 16:08:11 +0530 Subject: [PATCH 030/123] CLOUDSTACK-1723:Fix for dashboard alerts --- ui/scripts/sharedFunctions.js | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/ui/scripts/sharedFunctions.js b/ui/scripts/sharedFunctions.js index ad26b34196e..8bcdff91574 100644 --- a/ui/scripts/sharedFunctions.js +++ b/ui/scripts/sharedFunctions.js @@ -400,20 +400,28 @@ cloudStack.converters = { case 4 : return _l('label.public.ips'); case 5 : return _l('label.management.ips'); case 6 : return _l('label.secondary.storage'); - case 7 : return _l('label.vlan'); - case 8 : return _l('label.direct.ips'); - case 9 : return _l('label.local.storage'); + case 7 : return _l('label.host'); + case 9 : return _l('label.domain.router'); + case 10 : return _l('label.console.proxy'); - // These are old values -- can be removed in the future - case 10 : return "Routing Host"; - case 11 : return "Storage"; - case 12 : return "Usage Server"; - case 13 : return "Management Server"; - case 14 : return "Domain Router"; - case 15 : return "Console Proxy"; - case 16 : return "User VM"; - case 17 : return "VLAN"; - case 18 : return "Secondary Storage VM"; + // These are old values -- can be removed in the future + case 8 : return "User VM"; + case 11 : return "Routing Host"; + case 12 : return "Storage"; + case 13 : return "Usage Server"; + case 14 : return "Management Server"; + case 15 : return "Domain Router"; + case 16 : return "Console Proxy"; + case 17 : return "User VM"; + case 18 : return "VLAN"; + case 19 : return "Secondary Storage VM"; + case 20 : return "Usage Server"; + case 21 : return "Storage"; + case 22 : return "Update Resource Count"; + case 23 : return "Usage Sanity Result"; + case 24 : return "Direct Attached Public IP"; + case 25 : return "Local Storage"; + case 26 : return "Resource Limit Exceeded"; } }, convertByType: function(alertCode, value) { From 217ebf20a62006c999505ff1535a7bbf908b7763 Mon Sep 17 00:00:00 2001 From: Kishan Kavala Date: Tue, 19 Mar 2013 14:24:12 +0530 Subject: [PATCH 031/123] CLOUDSTACK-1717, CLOUDSTACK-1718: Corrected RemoveRegion response. Removed /api in Local region end_point --- .../cloudstack/api/command/admin/region/RemoveRegionCmd.java | 2 +- setup/db/db/schema-40to410.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/src/org/apache/cloudstack/api/command/admin/region/RemoveRegionCmd.java b/api/src/org/apache/cloudstack/api/command/admin/region/RemoveRegionCmd.java index 79c34d0690f..d2b696d2b6b 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/region/RemoveRegionCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/region/RemoveRegionCmd.java @@ -33,7 +33,7 @@ import com.cloud.user.Account; @APICommand(name = "removeRegion", description="Removes specified region", responseObject=SuccessResponse.class) public class RemoveRegionCmd extends BaseCmd { public static final Logger s_logger = Logger.getLogger(RemoveRegionCmd.class.getName()); - private static final String s_name = "updateregionresponse"; + private static final String s_name = "removeregionresponse"; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index 865fbd3181c..0f316a5acdd 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -261,7 +261,7 @@ CREATE TABLE `cloud`.`region` ( PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -INSERT INTO `cloud`.`region` values ('1','Local','http://localhost:8080/client/api'); +INSERT INTO `cloud`.`region` values ('1','Local','http://localhost:8080/client'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.cpus', '40', 'The default maximum number of cpu cores that can be used for an account'); From 0b7a4e04aaf2228a41bfb39d2699184220e1cc7d Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Tue, 19 Mar 2013 18:04:31 +0530 Subject: [PATCH 032/123] cli: Check args from optparser and not sys.argv Signed-off-by: Rohit Yadav --- tools/cli/cloudmonkey/cloudmonkey.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/cli/cloudmonkey/cloudmonkey.py b/tools/cli/cloudmonkey/cloudmonkey.py index f750c3afa8d..e94d53091ac 100644 --- a/tools/cli/cloudmonkey/cloudmonkey.py +++ b/tools/cli/cloudmonkey/cloudmonkey.py @@ -489,7 +489,7 @@ def main(): print __description__, "(%s)" % __projecturl__ shell = CloudMonkeyShell(sys.argv[0], options.cfile) - if len(sys.argv) > 1: + if len(args) > 1: shell.onecmd(' '.join(args)) else: shell.cmdloop() From 90678c3d14d31617108ed30c4c06d3f1ee65a3e4 Mon Sep 17 00:00:00 2001 From: Kishan Kavala Date: Tue, 19 Mar 2013 18:16:36 +0530 Subject: [PATCH 033/123] CLOUDSTACK-198: Included VpnUsers in Add state while listing. Added state to Vpn user response. --- .../cloudstack/api/response/VpnUsersResponse.java | 10 ++++++++++ server/src/com/cloud/api/ApiResponseHelper.java | 1 + .../cloud/network/vpn/RemoteAccessVpnManagerImpl.java | 4 ++-- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/api/src/org/apache/cloudstack/api/response/VpnUsersResponse.java b/api/src/org/apache/cloudstack/api/response/VpnUsersResponse.java index e654e8a522a..c29a37e7a48 100644 --- a/api/src/org/apache/cloudstack/api/response/VpnUsersResponse.java +++ b/api/src/org/apache/cloudstack/api/response/VpnUsersResponse.java @@ -48,6 +48,8 @@ public class VpnUsersResponse extends BaseResponse implements ControlledEntityRe @SerializedName(ApiConstants.PROJECT) @Param(description="the project name of the vpn") private String projectName; + @SerializedName(ApiConstants.STATE) @Param(description="the state of the Vpn User") + private String state; public void setId(String id) { this.id = id; @@ -80,4 +82,12 @@ public class VpnUsersResponse extends BaseResponse implements ControlledEntityRe this.projectName = projectName; } + public String getState() { + return state; + } + + public void setState(String state) { + this.state = state; + } + } diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index ebf0fcf73d3..663139da41f 100755 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -1108,6 +1108,7 @@ public class ApiResponseHelper implements ResponseGenerator { VpnUsersResponse vpnResponse = new VpnUsersResponse(); vpnResponse.setId(vpnUser.getUuid()); vpnResponse.setUserName(vpnUser.getUsername()); + vpnResponse.setState(vpnUser.getState().toString()); populateOwner(vpnResponse, vpnUser); diff --git a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index 82c0015e317..d64a0212b46 100755 --- a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -511,13 +511,13 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); sb.and("username", sb.entity().getUsername(), SearchCriteria.Op.EQ); - sb.and("state", sb.entity().getState(), SearchCriteria.Op.EQ); + sb.and("state", sb.entity().getState(), Op.IN); SearchCriteria sc = sb.create(); _accountMgr.buildACLSearchCriteria(sc, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); //list only active users - sc.setParameters("state", State.Active); + sc.setParameters("state", State.Active, State.Add); if (id != null) { sc.setParameters("id", id); From 7acdb17228582c62e3c30c3c712d58d330f54161 Mon Sep 17 00:00:00 2001 From: radhikap Date: Tue, 19 Mar 2013 19:41:21 +0530 Subject: [PATCH 034/123] IPv6 support doc --- docs/en-US/changed-apicommands-4.1.xml | 130 ++++++++++++++++- docs/en-US/ipv6-support.xml | 191 +++++++++++++++++++++++++ docs/en-US/whats-new.xml | 3 +- 3 files changed, 320 insertions(+), 4 deletions(-) create mode 100644 docs/en-US/ipv6-support.xml diff --git a/docs/en-US/changed-apicommands-4.1.xml b/docs/en-US/changed-apicommands-4.1.xml index 42bd088afb3..f0045a56de3 100644 --- a/docs/en-US/changed-apicommands-4.1.xml +++ b/docs/en-US/changed-apicommands-4.1.xml @@ -34,15 +34,139 @@ createNetworkOffering + + + The following request parameters have been added: + + + isPersistent + + + startipv6 + + + endipv6 + + + ip6gateway + + + ip6cidr + + + + + + listNetworkOfferings listNetworks - The following request parameters is added: isPersistent. - This parameter determines if the network or network offering created or listed by - using this offering are persistent or not. + The following request parameters has been added: isPersistent + This parameter determines if the network or network offering listed by using this + offering are persistent or not. + + + createVlanIpRange + + + The following request parameters have been added: + + + startipv6 + + + endipv6 + + + ip6gateway + + + ip6cidr + + + + + + + deployVirtualMachine + + + The following parameter has been added: ip6Address. + The following parameter is updated to accept the IPv6 address: + iptonetworklist. + + + + + CreateZoneCmd + + + The following parameter are added: ip6dns1, ip6dns2. + + + + + listRouters + listVirtualMachines + + + For nic responses, the following fields has been added. + + + ip6address + + + ip6gateway + + + ip6cidr + + + + + + + listVlanIpRanges + + + For nic responses, the following fields has been added. + + + startipv6 + + + endipv6 + + + ip6gateway + + + ip6cidr + + + + + + + listRouters + listZones + + + For DomainRouter and DataCenter response, the following fields have been + added. + + + ip6dns1 + + + ip6dns2 + + + + addF5LoadBalancer diff --git a/docs/en-US/ipv6-support.xml b/docs/en-US/ipv6-support.xml new file mode 100644 index 00000000000..22a5d7a5370 --- /dev/null +++ b/docs/en-US/ipv6-support.xml @@ -0,0 +1,191 @@ + + +%BOOK_ENTITIES; +]> + + +

+ IPv6 Support in &PRODUCT; + &PRODUCT;supports Internet Protocol version 6 (IPv6), the recent version of the Internet + Protocol (IP) that defines routing the network traffic. IPv6 uses a 128-bit address that + exponentially expands the current address space that is available to the users. IPv6 addresses + consist of eight groups of four hexadecimal digits separated by colons, for example, + 5001:0dt8:83a3:1012:1000:8s2e:0870:7454. &PRODUCT; supports IPv6 for public IPs in shared + networks. With IPv6 support, VMs in shared networks can obtain both IPv4 and IPv6 addresses from + the DHCP server. You can deploy VMs either in a IPv6 or IPv4 network, or in a dual network + environment. If IPv6 network is used, the VM generates a link-local IPv6 address by itself, and + receives a stateful IPv6 address from the DHCPv6 server. + IPv6 is supported only on KVM and XenServer hypervisors. The IPv6 support is only an + experimental feature. + Here's the sequence of events when IPv6 is used: + + + The administrator creates an IPv6 shared network in an advanced zone. + + + The user deploys a VM in an IPv6 shared network. + + + The user VM generates an IPv6 link local address by itself, and gets an IPv6 global or + site local address through DHCPv6. + For information on API changes, see . + + +
+ Prerequisites and Guidelines + Consider the following: + + + CIDR size must be 64 for IPv6 networks. + + + The DHCP client of the guest VMs should support generating DUID based on Link-layer + Address (DUID- LL). DUID-LL derives from the MAC address of guest VMs, and therefore the + user VM can be identified by using DUID. See Dynamic Host Configuration Protocol for IPv6 + for more information. + + + The gateway of the guest network generates Router Advisement and Response messages to + Router Solicitation. The M (Managed Address Configuration) flag of Router Advisement + should enable stateful IP address configuration. Set the M flag to where the end nodes + receive their IPv6 addresses from the DHCPv6 server as opposed to the router or + switch. + + The M flag is the 1-bit Managed Address Configuration flag for Router Advisement. + When set, Dynamic Host Configuration Protocol (DHCPv6) is available for address + configuration in addition to any IPs set by using stateless address + auto-configuration. + + + + Use the System VM template exclusively designed to support IPv6. Download the System + VM template from http://nfs1.lab.vmops.com/templates/routing/debian/ipv6/. + + + The concept of Default Network applies to IPv6 networks. However, unlike IPv4 + &PRODUCT; does not control the routing information of IPv6 in shared network; the choice + of Default Network will not affect the routing in the user VM. + + + In a multiple shared network, the default route is set by the rack router, rather than + the DHCP server, which is out of &PRODUCT; control. Therefore, in order for the user VM to + get only the default route from the default NIC, modify the configuration of the user VM, + and set non-default NIC's accept_ra to 0 explicitly. The + accept_ra parameter accepts Router Advertisements and auto-configure + /proc/sys/net/ipv6/conf/interface with received data. + + +
+
+ Limitations of IPv6 in &PRODUCT; + The following are not yet supported: + + + Security groups + + + Userdata and metadata + + + Passwords + + + The administrator cannot specify the IPv6 address of a VM. + + +
+
+ Network Configuration for DHCPv6 + Use DUID-LL to get IPv6 address from DHCP server + + + Set up dhclient by using DUID-LL. + Perform the following for DHCP Client 4.2 and above: + + + Run the following command on the selected VM to get the dhcpv6 offer from + VR: + dhclient -6 -D LL <dev> + + + Perform the following for DHCP Client 4.1: + + + Open the following to the dhclient configuration file: + vi /etc/dhcp/dhclient.conf + + + Add the following to the dhclient configuration file: + send dhcp6.client-id = concat(00:03:00, hardware); + + + + + Get IPv6 address from DHCP server as part of the system or network restart. + Based on the operating systems, perform the following: + On CentOS 6.2: + + + Open the Ethernet interface configuration file: + vi /etc/sysconfig/network-scripts/ifcfg-eth0 + The ifcfg-eth0 file controls the first NIC in a system. + + + Make the necessary configuration changes, as given below: + DEVICE=eth0 +HWADDR=06:A0:F0:00:00:38 +NM_CONTROLLED=no +ONBOOT=yes +BOOTPROTO=dhcp6 +TYPE=Ethernet +USERCTL=no +PEERDNS=yes +IPV6INIT=yes +DHCPV6C=yes + + + Open the following: + vi /etc/sysconfig/network + + + Make the necessary configuration changes, as given below: + NETWORKING=yes +HOSTNAME=centos62mgmt.lab.vmops.com +NETWORKING_IPV6=yes +IPV6_AUTOCONF=no + + + On Ubuntu 12.10 + + + Open the following: + etc/network/interfaces: + + + Make the necessary configuration changes, as given below: + iface eth0 inet6 dhcp +autoconf 0 +accept_ra 1 + + + + +
+
diff --git a/docs/en-US/whats-new.xml b/docs/en-US/whats-new.xml index 761d7a2eb37..252f87d0543 100644 --- a/docs/en-US/whats-new.xml +++ b/docs/en-US/whats-new.xml @@ -1,5 +1,5 @@ - %BOOK_ENTITIES; ]> @@ -25,6 +25,7 @@
What's New in the API for 4.1 + From ba69ce0c8911eb7186774d2ddbae97483e65d48c Mon Sep 17 00:00:00 2001 From: Gavin Lee Date: Tue, 19 Mar 2013 22:23:09 +0800 Subject: [PATCH 035/123] CLOUDSTACK-1599:Convert "Best Practices" section to XML and add to Install Guide --- docs/en-US/Installation_Guide.xml | 1 + docs/en-US/best-practices.xml | 82 +++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100644 docs/en-US/best-practices.xml diff --git a/docs/en-US/Installation_Guide.xml b/docs/en-US/Installation_Guide.xml index 2f60acac984..f2f27ad9621 100644 --- a/docs/en-US/Installation_Guide.xml +++ b/docs/en-US/Installation_Guide.xml @@ -57,5 +57,6 @@ + diff --git a/docs/en-US/best-practices.xml b/docs/en-US/best-practices.xml new file mode 100644 index 00000000000..41d7cde9036 --- /dev/null +++ b/docs/en-US/best-practices.xml @@ -0,0 +1,82 @@ + + +%BOOK_ENTITIES; +]> + + + + + Best Practices + Deploying a cloud is challenging. There are many different technology choices to make, and &PRODUCT; is flexible enough in its configuration that there are many possible ways to combine and configure the chosen technology. This section contains suggestions and requirements about cloud deployments. + These should be treated as suggestions and not absolutes. However, we do encourage anyone planning to build a cloud outside of these guidelines to seek guidance and advice on the project mailing lists. +
+ Process Best Practices + + + A staging system that models the production environment is strongly advised. It is critical if customizations have been applied to &PRODUCT;. + + + Allow adequate time for installation, a beta, and learning the system. Installs with basic networking can be done in hours. Installs with advanced networking usually take several days for the first attempt, with complicated installations taking longer. For a full production system, allow at least 4-8 weeks for a beta to work through all of the integration issues. You can get help from fellow users on the cloudstack-users mailing list. + + +
+
+ Setup Best Practices + + + Each host should be configured to accept connections only from well-known entities such as the &PRODUCT; Management Server or your network monitoring software. + + + Use multiple clusters per pod if you need to achieve a certain switch density. + + + Primary storage mountpoints or LUNs should not exceed 6 TB in size. It is better to have multiple smaller primary storage elements per cluster than one large one. + + + When exporting shares on primary storage, avoid data loss by restricting the range of IP addresses that can access the storage. See "Linux NFS on Local Disks and DAS" or "Linux NFS on iSCSI". + + + NIC bonding is straightforward to implement and provides increased reliability. + + + 10G networks are generally recommended for storage access when larger servers that can support relatively more VMs are used. + + + Host capacity should generally be modeled in terms of RAM for the guests. Storage and CPU may be overprovisioned. RAM may not. RAM is usually the limiting factor in capacity designs. + + + (XenServer) Configure the XenServer dom0 settings to allocate more memory to dom0. This can enable XenServer to handle larger numbers of virtual machines. We recommend 2940 MB of RAM for XenServer dom0. For instructions on how to do this, see http://support.citrix.com/article/CTX126531. The article refers to XenServer 5.6, but the same information applies to XenServer 6.0. + + +
+
+ Maintenance Best Practices + + + Monitor host disk space. Many host failures occur because the host's root disk fills up from logs that were not rotated adequately. + + + Monitor the total number of VM instances in each cluster, and disable allocation to the cluster if the total is approaching the maximum that the hypervisor can handle. Be sure to leave a safety margin to allow for the possibility of one or more hosts failing, which would increase the VM load on the other hosts as the VMs are redeployed. Consult the documentation for your chosen hypervisor to find the maximum permitted number of VMs per host, then use &PRODUCT; global configuration settings to set this as the default limit. Monitor the VM activity in each cluster and keep the total number of VMs below a safe level that allows for the occasional host failure. For example, if there are N hosts in the cluster, and you want to allow for one host in the cluster to be down at any given time, the total number of VM instances you can permit in the cluster is at most (N-1) * (per-host-limit). Once a cluster reaches this number of VMs, use the &PRODUCT; UI to disable allocation to the cluster. + + + The lack of up-do-date hotfixes can lead to data corruption and lost VMs. + Be sure all the hotfixes provided by the hypervisor vendor are applied. Track the release of hypervisor patches through your hypervisor vendor’s support channel, and apply patches as soon as possible after they are released. &PRODUCT; will not track or notify you of required hypervisor patches. It is essential that your hosts are completely up to date with the provided hypervisor patches. The hypervisor vendor is likely to refuse to support any system that is not up to date with patches. +
+
From 13691048fb622467271b75fc7b64298e3afe9912 Mon Sep 17 00:00:00 2001 From: hongtu_zang Date: Sat, 16 Mar 2013 09:54:09 +0800 Subject: [PATCH 036/123] fix bug vmware create volume from snapshot will missing data --- .../vmware/manager/VmwareStorageManagerImpl.java | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java index e11dd53f3c9..1f116455761 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java @@ -684,13 +684,16 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); String srcOVAFileName = secondaryMountPoint + "/" + secStorageDir + "/" + backupName + "." + ImageFormat.OVA.getFileExtension(); - + String snapshotDir = ""; + if (backupName.contains("/")){ + snapshotDir = backupName.split("/")[0]; + } String srcFileName = getOVFFilePath(srcOVAFileName); if(srcFileName == null) { Script command = new Script("tar", 0, s_logger); command.add("--no-same-owner"); command.add("-xf", srcOVAFileName); - command.setWorkDir(secondaryMountPoint + "/" + secStorageDir); + command.setWorkDir(secondaryMountPoint + "/" + secStorageDir + "/" + snapshotDir); s_logger.info("Executing command: " + command.toString()); String result = command.execute(); if(result != null) { @@ -731,7 +734,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { String backupUuid = UUID.randomUUID().toString(); exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, getSnapshotRelativeDirInSecStorage(accountId, volumeId), backupUuid, workerVmName); - return backupUuid; + return backupUuid + "/" + backupUuid; } private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, @@ -739,8 +742,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { String workerVmName) throws Exception { String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); - String exportPath = secondaryMountPoint + "/" + secStorageDir; - + String exportPath = secondaryMountPoint + "/" + secStorageDir + "/" + exportName; + synchronized(exportPath.intern()) { if(!new File(exportPath).exists()) { Script command = new Script(false, "mkdir", _timeout, s_logger); From b1a25cf917da1fa427e76f71c1cc19de2c4d55d0 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Tue, 19 Mar 2013 13:14:29 -0700 Subject: [PATCH 037/123] CLOUDSTACK-1065: cloudstack UI - regions menu - implement create region action. --- ui/scripts/regions.js | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/ui/scripts/regions.js b/ui/scripts/regions.js index 42a3e9de57c..902d3f80ed3 100644 --- a/ui/scripts/regions.js +++ b/ui/scripts/regions.js @@ -55,26 +55,34 @@ fields: { id: { label: 'label.id', validation: { required: true } }, name: { label: 'label.name', validation: { required: true } }, - endpoint: { label: 'label.endpoint', validation: { required: true } }, - userapikey: { label: 'label.api.key' }, - userapisecretkey: { label: 'label.s3.secret_key' } + endpoint: { label: 'label.endpoint', validation: { required: true } } } }, - action: function(args) { + action: function(args) { + var data = { + id: args.data.id, + name: args.data.name, + endpoint: args.data.endpoint + }; + $.ajax({ url: createURL('addRegion'), - data: args.data, - success: function(json) { - var jobID = json.addregionresponse.jobid; - - args.response.success({ _custom: { jobId: jobID }}); - $(window).trigger('cloudStack.refreshRegions'); + data: data, + success: function(json) { + var item = json.addregionresponse.region; + args.response.success({data: item}); + //$(window).trigger('cloudStack.refreshRegions'); }, error: function(json) { args.response.error(parseXMLHttpResponse(json)); } }); - } + }, + notification: { + poll: function(args) { + args.complete(); + } + } } }, dataProvider: function(args) { From c60ef79321c3e208d658838c8f89ce48716a9b54 Mon Sep 17 00:00:00 2001 From: Edison Su Date: Tue, 19 Mar 2013 14:36:37 -0700 Subject: [PATCH 038/123] CLOUDSTACK-1608: don't support attach volume between different storage scopes --- .../subsystem/api/storage/AbstractScope.java | 30 ++++++++ .../subsystem/api/storage/ClusterScope.java | 2 +- .../subsystem/api/storage/HostScope.java | 2 +- .../engine/subsystem/api/storage/Scope.java | 1 + .../subsystem/api/storage/ZoneScope.java | 2 +- .../subsystem/api/storage/ScopeTest.java | 59 ++++++++++++++++ .../datastore/DefaultPrimaryDataStore.java | 11 +++ .../com/cloud/storage/VolumeManagerImpl.java | 69 +++++-------------- 8 files changed, 120 insertions(+), 56 deletions(-) create mode 100644 engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/AbstractScope.java create mode 100644 engine/api/test/org/apache/cloudstack/engine/subsystem/api/storage/ScopeTest.java diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/AbstractScope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/AbstractScope.java new file mode 100644 index 00000000000..c94db66b202 --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/AbstractScope.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.engine.subsystem.api.storage; + +public abstract class AbstractScope implements Scope { + @Override + public boolean isSameScope(Scope scope) { + if (this.getScopeType() == scope.getScopeType() && this.getScopeId() == scope.getScopeId()) { + return true; + } else { + return false; + } + } +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ClusterScope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ClusterScope.java index fce7d82cb99..0f0e9581523 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ClusterScope.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ClusterScope.java @@ -19,7 +19,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage; -public class ClusterScope implements Scope { +public class ClusterScope extends AbstractScope { private ScopeType type = ScopeType.CLUSTER; private Long clusterId; private Long podId; diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HostScope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HostScope.java index 71d1952c625..c5e90ac894c 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HostScope.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HostScope.java @@ -19,7 +19,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage; -public class HostScope implements Scope { +public class HostScope extends AbstractScope { private ScopeType type = ScopeType.HOST; private Long hostId; public HostScope(Long hostId) { diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/Scope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/Scope.java index c1596d4f5f7..91d4734ef15 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/Scope.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/Scope.java @@ -20,5 +20,6 @@ package org.apache.cloudstack.engine.subsystem.api.storage; public interface Scope { public ScopeType getScopeType(); + public boolean isSameScope(Scope scope); public Long getScopeId(); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ZoneScope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ZoneScope.java index ac277af36de..2d3d41f22b5 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ZoneScope.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ZoneScope.java @@ -19,7 +19,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage; -public class ZoneScope implements Scope { +public class ZoneScope extends AbstractScope { private ScopeType type = ScopeType.ZONE; private Long zoneId; diff --git a/engine/api/test/org/apache/cloudstack/engine/subsystem/api/storage/ScopeTest.java b/engine/api/test/org/apache/cloudstack/engine/subsystem/api/storage/ScopeTest.java new file mode 100644 index 00000000000..e3ec48c74f0 --- /dev/null +++ b/engine/api/test/org/apache/cloudstack/engine/subsystem/api/storage/ScopeTest.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.engine.subsystem.api.storage; + +import static org.junit.Assert.*; +import junit.framework.Assert; + +import org.junit.Test; + +public class ScopeTest { + + @Test + public void testZoneScope() { + ZoneScope zoneScope = new ZoneScope(1L); + ZoneScope zoneScope2 = new ZoneScope(1L); + Assert.assertTrue(zoneScope.isSameScope(zoneScope2)); + + ZoneScope zoneScope3 = new ZoneScope(2L); + Assert.assertFalse(zoneScope.isSameScope(zoneScope3)); + } + + @Test + public void testClusterScope() { + ClusterScope clusterScope = new ClusterScope(1L, 1L, 1L); + ClusterScope clusterScope2 = new ClusterScope(1L, 1L, 1L); + + Assert.assertTrue(clusterScope.isSameScope(clusterScope2)); + + ClusterScope clusterScope3 = new ClusterScope(2L, 2L, 1L); + Assert.assertFalse(clusterScope.isSameScope(clusterScope3)); + } + + @Test + public void testHostScope() { + HostScope hostScope = new HostScope(1L); + HostScope hostScope2 = new HostScope(1L); + HostScope hostScope3 = new HostScope(2L); + + Assert.assertTrue(hostScope.isSameScope(hostScope2)); + Assert.assertFalse(hostScope.isSameScope(hostScope3)); + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java index fbfade6c6aa..7b8741c87c2 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; @@ -48,9 +49,11 @@ import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.component.ComponentContext; @@ -74,6 +77,8 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { protected DataStoreProvider provider; @Inject VMTemplatePoolDao templatePoolDao; + @Inject + StoragePoolHostDao poolHostDao; private VolumeDao volumeDao; @@ -152,6 +157,12 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { vo.getDataCenterId()); } else if (vo.getScope() == ScopeType.ZONE) { return new ZoneScope(vo.getDataCenterId()); + } else if (vo.getScope() == ScopeType.HOST) { + List poolHosts = poolHostDao.listByPoolId(vo.getId()); + if (poolHosts.size() > 0) { + return new HostScope(poolHosts.get(0).getHostId()); + } + s_logger.debug("can't find a local storage in pool host table: " + vo.getId()); } return null; } diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java index 4951975786f..eb33bc4de26 100644 --- a/server/src/com/cloud/storage/VolumeManagerImpl.java +++ b/server/src/com/cloud/storage/VolumeManagerImpl.java @@ -48,6 +48,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManag import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; @@ -1439,64 +1440,26 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { } private boolean needMoveVolume(VolumeVO rootVolumeOfVm, VolumeInfo volume) { - StoragePoolVO vmRootVolumePool = _storagePoolDao - .findById(rootVolumeOfVm.getPoolId()); - DiskOfferingVO volumeDiskOffering = _diskOfferingDao - .findById(volume.getDiskOfferingId()); - String[] volumeTags = volumeDiskOffering.getTagsArray(); - - boolean isVolumeOnSharedPool = !volumeDiskOffering - .getUseLocalStorage(); - StoragePoolVO sourcePool = _storagePoolDao.findById(volume - .getPoolId()); - List matchingVMPools = _storagePoolDao - .findPoolsByTags(vmRootVolumePool.getDataCenterId(), - vmRootVolumePool.getPodId(), - vmRootVolumePool.getClusterId(), volumeTags - ); + DataStore storeForRootVol = this.dataStoreMgr.getPrimaryDataStore(rootVolumeOfVm.getPoolId()); + DataStore storeForDataVol = this.dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); - boolean moveVolumeNeeded = true; - if (matchingVMPools.size() == 0) { - String poolType; - if (vmRootVolumePool.getClusterId() != null) { - poolType = "cluster"; - } else if (vmRootVolumePool.getPodId() != null) { - poolType = "pod"; - } else { - poolType = "zone"; - } - throw new CloudRuntimeException( - "There are no storage pools in the VM's " + poolType - + " with all of the volume's tags (" - + volumeDiskOffering.getTags() + ")."); - } else { - long sourcePoolId = sourcePool.getId(); - Long sourcePoolDcId = sourcePool.getDataCenterId(); - Long sourcePoolPodId = sourcePool.getPodId(); - Long sourcePoolClusterId = sourcePool.getClusterId(); - for (StoragePoolVO vmPool : matchingVMPools) { - long vmPoolId = vmPool.getId(); - Long vmPoolDcId = vmPool.getDataCenterId(); - Long vmPoolPodId = vmPool.getPodId(); - Long vmPoolClusterId = vmPool.getClusterId(); - - // Moving a volume is not required if storage pools belongs - // to same cluster in case of shared volume or - // identical storage pool in case of local - if (sourcePoolDcId == vmPoolDcId - && sourcePoolPodId == vmPoolPodId - && sourcePoolClusterId == vmPoolClusterId - && (isVolumeOnSharedPool || sourcePoolId == vmPoolId)) { - moveVolumeNeeded = false; - break; - } - } + Scope storeForRootStoreScope = storeForRootVol.getScope(); + if (storeForRootStoreScope == null) { + throw new CloudRuntimeException("Can't get scope of data store: " + storeForRootVol.getId()); } - return moveVolumeNeeded; + Scope storeForDataStoreScope = storeForDataVol.getScope(); + if (storeForDataStoreScope == null) { + throw new CloudRuntimeException("Can't get scope of data store: " + storeForDataVol.getId()); + } + + if (storeForRootStoreScope.getScopeType() != storeForDataStoreScope.getScopeType()) { + throw new CloudRuntimeException("Can't move volume between scope: " + storeForDataStoreScope.getScopeType() + " and " + storeForRootStoreScope.getScopeType()); + } + + return storeForRootStoreScope.isSameScope(storeForRootStoreScope); } - private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volume, Long deviceId) { String errorMsg = "Failed to attach volume: " + volume.getName() + " to VM: " + vm.getHostName(); From ba249c0e669bbdbbf26a047d0b2f1a24b9671b8d Mon Sep 17 00:00:00 2001 From: Edison Su Date: Tue, 19 Mar 2013 14:45:17 -0700 Subject: [PATCH 039/123] CLOUDSTACK-1608: enable attach a volume created on zone wide storage to a vm created on cluster or host wide storage --- server/src/com/cloud/storage/VolumeManagerImpl.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java index eb33bc4de26..a23ea3294f1 100644 --- a/server/src/com/cloud/storage/VolumeManagerImpl.java +++ b/server/src/com/cloud/storage/VolumeManagerImpl.java @@ -1453,11 +1453,15 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { throw new CloudRuntimeException("Can't get scope of data store: " + storeForDataVol.getId()); } + if (storeForDataStoreScope.getScopeType() == ScopeType.ZONE) { + return false; + } + if (storeForRootStoreScope.getScopeType() != storeForDataStoreScope.getScopeType()) { throw new CloudRuntimeException("Can't move volume between scope: " + storeForDataStoreScope.getScopeType() + " and " + storeForRootStoreScope.getScopeType()); } - return storeForRootStoreScope.isSameScope(storeForRootStoreScope); + return !storeForRootStoreScope.isSameScope(storeForDataStoreScope); } private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volume, Long deviceId) { From 1ea5d6c169eb470cf8c356f5ea358b2a3ee170da Mon Sep 17 00:00:00 2001 From: Edison Su Date: Tue, 19 Mar 2013 15:34:18 -0700 Subject: [PATCH 040/123] CLOUDSTACK-1641: fix NPE during migrate volume --- server/src/com/cloud/storage/VolumeManagerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java index a23ea3294f1..737ed0a3bac 100644 --- a/server/src/com/cloud/storage/VolumeManagerImpl.java +++ b/server/src/com/cloud/storage/VolumeManagerImpl.java @@ -2133,7 +2133,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { + assignedPool + " assigned by deploymentPlanner"); } - VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, null); + VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, assignedPool); tasks.add(task); } } else { From 2bebb124cc0e2ab59143263de8fe1e85ba1194c2 Mon Sep 17 00:00:00 2001 From: Kelven Yang Date: Tue, 19 Mar 2013 15:47:08 -0700 Subject: [PATCH 041/123] CLOUDSTACK-1729: a partical resolution for user authenticators to unblock developers who are currently working on and relying on the fix --- client/tomcatconf/componentContext.xml.in | 20 +++++++++++++++++-- .../cloud/server/ManagementServerImpl.java | 17 ++++++++-------- .../com/cloud/user/AccountManagerImpl.java | 11 +++++++--- 3 files changed, 35 insertions(+), 13 deletions(-) diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in index 7b64f49ee20..016df0a2095 100644 --- a/client/tomcatconf/componentContext.xml.in +++ b/client/tomcatconf/componentContext.xml.in @@ -36,9 +36,25 @@ --> - - + + + + + + + + + + + + + + + + + + diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 1f1f12edfc1..b689f93f8aa 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -414,13 +414,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject S3Manager _s3Mgr; -/* - @Inject - ComponentContext _forceContextRef; // create a dependency to ComponentContext so that it can be loaded beforehead - - @Inject - EventUtils _forceEventUtilsRef; -*/ private final ScheduledExecutorService _eventExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("EventChecker")); private final ScheduledExecutorService _alertExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AlertChecker")); private KeystoreManager _ksMgr; @@ -429,7 +422,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe private Map _availableIdsMap; - @Inject List _userAuthenticators; + List _userAuthenticators; @Inject ClusterManager _clusterMgr; private String _hashKey = null; @@ -437,6 +430,14 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public ManagementServerImpl() { setRunLevel(ComponentLifecycle.RUN_LEVEL_APPLICATION_MAINLOOP); } + + public List getUserAuthenticators() { + return _userAuthenticators; + } + + public void setUserAuthenticators(List authenticators) { + _userAuthenticators = authenticators; + } @Override public boolean configure(String name, Map params) diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java index be5f4f4d77d..b69f31464ba 100755 --- a/server/src/com/cloud/user/AccountManagerImpl.java +++ b/server/src/com/cloud/user/AccountManagerImpl.java @@ -48,7 +48,6 @@ import org.apache.cloudstack.api.command.admin.user.RegisterCmd; import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.dao.UserAccountJoinDao; @@ -140,7 +139,6 @@ import com.cloud.vm.dao.InstanceGroupDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; -@Component @Local(value = { AccountManager.class, AccountService.class }) public class AccountManagerImpl extends ManagerBase implements AccountManager, Manager { public static final Logger s_logger = Logger.getLogger(AccountManagerImpl.class); @@ -223,7 +221,6 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M private AutoScaleManager _autoscaleMgr; @Inject VolumeManager volumeMgr; - @Inject private List _userAuthenticators; private final ScheduledExecutorService _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AccountChecker")); @@ -237,6 +234,14 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M List _securityCheckers; int _cleanupInterval; + public List getUserAuthenticators() { + return _userAuthenticators; + } + + public void setUserAuthenticators(List authenticators) { + _userAuthenticators = authenticators; + } + @Override public boolean configure(final String name, final Map params) throws ConfigurationException { _systemAccount = _accountDao.findById(AccountVO.ACCOUNT_ID_SYSTEM); From 86a2a7504693e2672552fa98b90a60f9fea30196 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Wed, 20 Mar 2013 00:26:37 +0000 Subject: [PATCH 042/123] CLOUDSTACK-1668: Fix IP conflict in VPC tier Currently, allPossibleIps return the Ip lists which include the gateway, so we need to remove gateway ip from this list. Now, for non-VPC network it works, because NetUtils.getAllIpsFromCidr return the Ip lists which do not include the first IP of the network (like 192.168.0.1). We need too add the first IP into the returned Ip list, because it can be used for VM if it is not the gateway IP (for example, VPC networks). The corresponding patch for 4.0.1 has been posted on https://reviews.apache.org/r/9923/ Signed-off-by: Chip Childers --- server/src/com/cloud/network/NetworkModelImpl.java | 5 +++++ server/src/com/cloud/network/NetworkServiceImpl.java | 5 +++++ utils/src/com/cloud/utils/net/NetUtils.java | 2 +- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/server/src/com/cloud/network/NetworkModelImpl.java b/server/src/com/cloud/network/NetworkModelImpl.java index 779b9f23466..40a18c742dd 100644 --- a/server/src/com/cloud/network/NetworkModelImpl.java +++ b/server/src/com/cloud/network/NetworkModelImpl.java @@ -1644,6 +1644,11 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { if (usedIps.size() != 0) { allPossibleIps.removeAll(usedIps); } + + String gateway = network.getGateway(); + if ((gateway != null) && (allPossibleIps.contains(NetUtils.ip2Long(gateway)))) + allPossibleIps.remove(NetUtils.ip2Long(gateway)); + return allPossibleIps; } diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index 52e81e5c8c8..8303b0bba3c 100755 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -2046,6 +2046,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (usedIps.size() != 0) { allPossibleIps.removeAll(usedIps); } + + String gateway = network.getGateway(); + if ((gateway != null) && (allPossibleIps.contains(NetUtils.ip2Long(gateway)))) + allPossibleIps.remove(NetUtils.ip2Long(gateway)); + return allPossibleIps; } diff --git a/utils/src/com/cloud/utils/net/NetUtils.java b/utils/src/com/cloud/utils/net/NetUtils.java index dd40a33934d..5988dd5f337 100755 --- a/utils/src/com/cloud/utils/net/NetUtils.java +++ b/utils/src/com/cloud/utils/net/NetUtils.java @@ -632,7 +632,7 @@ public class NetUtils { Set result = new TreeSet(); long ip = ip2Long(cidr); long startNetMask = ip2Long(getCidrNetmask(size)); - long start = (ip & startNetMask) + 2; + long start = (ip & startNetMask) + 1; long end = start; end = end >> (32 - size); From 6bc12fa66d983f295c3f3ab9d300e5e57a52cf4e Mon Sep 17 00:00:00 2001 From: Min Chen Date: Tue, 19 Mar 2013 16:57:00 -0700 Subject: [PATCH 043/123] CLOUDSTACK-1511 and CLOUDSTACK-1446 --- .../com/cloud/api/query/dao/UserVmJoinDaoImpl.java | 2 ++ server/src/com/cloud/api/query/vo/UserVmJoinVO.java | 13 +++++++++++++ setup/db/db/schema-40to410.sql | 3 ++- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index f561449fe2a..8b6abf8a3e4 100644 --- a/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -185,6 +185,7 @@ public class UserVmJoinDaoImpl extends GenericDaoBase implem nicResponse.setGateway(userVm.getGateway()); nicResponse.setNetmask(userVm.getNetmask()); nicResponse.setNetworkid(userVm.getNetworkUuid()); + nicResponse.setNetworkName(userVm.getNetworkName()); nicResponse.setMacAddress(userVm.getMacAddress()); nicResponse.setIp6Address(userVm.getIp6Address()); nicResponse.setIp6Gateway(userVm.getIp6Gateway()); @@ -246,6 +247,7 @@ public class UserVmJoinDaoImpl extends GenericDaoBase implem nicResponse.setGateway(uvo.getGateway()); nicResponse.setNetmask(uvo.getNetmask()); nicResponse.setNetworkid(uvo.getNetworkUuid()); + nicResponse.setNetworkName(uvo.getNetworkName()); nicResponse.setMacAddress(uvo.getMacAddress()); nicResponse.setIp6Address(uvo.getIp6Address()); nicResponse.setIp6Gateway(uvo.getIp6Gateway()); diff --git a/server/src/com/cloud/api/query/vo/UserVmJoinVO.java b/server/src/com/cloud/api/query/vo/UserVmJoinVO.java index d7238224e4e..33c49cdeae9 100644 --- a/server/src/com/cloud/api/query/vo/UserVmJoinVO.java +++ b/server/src/com/cloud/api/query/vo/UserVmJoinVO.java @@ -293,6 +293,9 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="network_uuid") private String networkUuid; + @Column(name="network_name") + private String networkName; + @Column(name="traffic_type") @Enumerated(value=EnumType.STRING) private TrafficType trafficType; @@ -1168,6 +1171,16 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { } + public String getNetworkName() { + return networkName; + } + + + public void setNetworkName(String networkName) { + this.networkName = networkName; + } + + public TrafficType getTrafficType() { return trafficType; } diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index 0f316a5acdd..9d51030876c 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -572,6 +572,7 @@ CREATE VIEW `cloud`.`user_vm_view` AS vpc.id vpc_id, vpc.uuid vpc_uuid, networks.uuid network_uuid, + networks.name network_name, networks.traffic_type traffic_type, networks.guest_type guest_type, user_ip_address.id public_ip_id, @@ -750,7 +751,7 @@ CREATE VIEW `cloud`.`domain_router_view` AS left join `cloud`.`networks` ON nics.network_id = networks.id left join - `cloud`.`vpc` ON networks.vpc_id = vpc.id + `cloud`.`vpc` ON domain_router.vpc_id = vpc.id left join `cloud`.`async_job` ON async_job.instance_id = vm_instance.id and async_job.instance_type = 'DomainRouter' From 4faad73d6c0f47a31c5ec8ff9270a61120d04eb2 Mon Sep 17 00:00:00 2001 From: Kelven Yang Date: Tue, 19 Mar 2013 18:54:16 -0700 Subject: [PATCH 044/123] CLOUDSTACK-1729: a partical resolution for user authenticators to unblock developers who are currently working on, for non-oss version --- .../tomcatconf/nonossComponentContext.xml.in | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/client/tomcatconf/nonossComponentContext.xml.in b/client/tomcatconf/nonossComponentContext.xml.in index 7e3552db67e..8f8dae5b10a 100644 --- a/client/tomcatconf/nonossComponentContext.xml.in +++ b/client/tomcatconf/nonossComponentContext.xml.in @@ -37,8 +37,24 @@ --> - + + + + + + + + + + + + + + + + + From 792db8b5cc2cd2d593435457d54620768941dd61 Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Sun, 10 Mar 2013 21:07:29 +0530 Subject: [PATCH 045/123] simulator: removing cyclic dependency from simulator The database creator caused a cyclic dependecny in the simulator which is removed with this commit. Additionally the simulator profile is now merged with developer profile and a test for server health is included Steps to run: $ mvn -Pdeveloper clean install $ mvn -Pdeveloper -pl developer -Ddeploydb $ mvn -Pdeveloper -pl developer -Ddeploydb-simulator $ mvn -pl client jetty:run To deploy an adv. zone and test the server health: $ mvn -Pdeveloper,marvin -Dmarvin.config=`find . -name simulator.cfg` -pl :cloud-marvin test Conflicts: pom.xml Signed-off-by: Prasanna Santhanam --- client/pom.xml | 20 +- client/tomcatconf/componentContext.xml.in | 6 +- developer/pom.xml | 240 +++++++++++++--------- pom.xml | 113 +--------- tools/apidoc/gen_toc.py | 1 + tools/marvin/pom.xml | 218 ++++++++++---------- 6 files changed, 259 insertions(+), 339 deletions(-) diff --git a/client/pom.xml b/client/pom.xml index ecf232be7ac..382706d930f 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -219,6 +219,11 @@ cloud-engine-storage-volume ${project.version} + + org.apache.cloudstack + cloud-plugin-hypervisor-simulator + ${project.version} + install @@ -481,21 +486,6 @@ - - simulator - - - simulator - - - - - org.apache.cloudstack - cloud-plugin-hypervisor-simulator - ${project.version} - - - netapp diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in index 016df0a2095..b536879044b 100644 --- a/client/tomcatconf/componentContext.xml.in +++ b/client/tomcatconf/componentContext.xml.in @@ -215,11 +215,9 @@ - @@ -318,11 +316,9 @@ - diff --git a/developer/pom.xml b/developer/pom.xml index ff47b143093..3dc276adc23 100644 --- a/developer/pom.xml +++ b/developer/pom.xml @@ -10,7 +10,7 @@ language governing permissions and limitations under the License. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 cloud-developer Apache CloudStack Developer Tools @@ -21,25 +21,98 @@ 4.2.0-SNAPSHOT + mysql mysql-connector-java - 5.1.21 - runtime + ${cs.mysql.version} + + + commons-dbcp + commons-dbcp + ${cs.dbcp.version} + + + commons-pool + commons-pool + ${cs.pool.version} + + + org.jasypt + jasypt + ${cs.jasypt.version} + + + org.apache.cloudstack + cloud-utils + ${project.version} + + + org.apache.cloudstack + cloud-server + ${project.version} + + + org.apache.cloudstack + cloud-plugin-hypervisor-simulator + ${project.version} + compile - - org.apache.cloudstack - cloud-plugin-hypervisor-simulator - ${project.version} - compile - install + + + org.codehaus.mojo + properties-maven-plugin + 1.0-alpha-2 + + + initialize + + read-project-properties + + + + ${basedir}/../utils/conf/db.properties + ${basedir}/../utils/conf/db.properties.override + + true + + + + + + maven-antrun-plugin + 1.7 + + + generate-resources + + run + + + + + + + + + + + + + + + + + + - + deploydb @@ -48,91 +121,10 @@ - - org.codehaus.mojo - properties-maven-plugin - 1.0-alpha-2 - - - initialize - - read-project-properties - - - - ${project.parent.basedir}/utils/conf/db.properties - ${project.parent.basedir}/utils/conf/db.properties.override - - true - - - - - - maven-antrun-plugin - 1.7 - - - generate-resources - - run - - - - - - - - - - - - - - - - - - - org.codehaus.mojo exec-maven-plugin 1.2.1 - - - - mysql - mysql-connector-java - ${cs.mysql.version} - - - commons-dbcp - commons-dbcp - ${cs.dbcp.version} - - - commons-pool - commons-pool - ${cs.pool.version} - - - org.jasypt - jasypt - ${cs.jasypt.version} - - - org.apache.cloudstack - cloud-utils - ${project.version} - - - org.apache.cloudstack - cloud-server - ${project.version} - - process-resources @@ -143,17 +135,11 @@ - false - true - - org.apache.cloudstack - cloud-server - com.cloud.upgrade.DatabaseCreator - ${project.parent.basedir}/utils/conf/db.properties - ${project.parent.basedir}/utils/conf/db.properties.override + ${basedir}/../utils/conf/db.properties + ${basedir}/../utils/conf/db.properties.override ${basedir}/target/db/create-schema.sql ${basedir}/target/db/create-schema-premium.sql @@ -181,7 +167,59 @@ catalina.home - ${project.parent.basedir}/utils + ${basedir}/../utils + + + paths.script + ${basedir}/target/db + + + + + + + + + + deploydb-simulator + + + deploydb-simulator + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + process-resources + create-schema-simulator + + java + + + + + com.cloud.upgrade.DatabaseCreator + + + ${basedir}/../utils/conf/db.properties + ${basedir}/../utils/conf/db.properties.override + + ${basedir}/target/db/create-schema-simulator.sql + ${basedir}/target/db/templates.simulator.sql + + com.cloud.upgrade.DatabaseUpgradeChecker + --database=simulator + --rootpassword=${db.root.password} + + + + catalina.home + ${basedir}/../utils paths.script @@ -194,4 +232,4 @@ - + \ No newline at end of file diff --git a/pom.xml b/pom.xml index e75c420a616..1faafb2914d 100644 --- a/pom.xml +++ b/pom.xml @@ -182,7 +182,6 @@ ${cs.junit.version} test - org.springframework spring-core @@ -222,14 +221,12 @@ 1.9.5 test - org.springframework spring-test ${org.springframework.version} test - org.aspectj aspectjrt @@ -276,7 +273,7 @@ - + @@ -509,113 +506,5 @@ vmware-base - - simulator - - - deploydb-simulator - - - - - - org.codehaus.mojo - properties-maven-plugin - 1.0-alpha-2 - - - initialize - - read-project-properties - - - - ${project.basedir}/utils/conf/db.properties - ${project.basedir}/utils/conf/db.properties.override - - true - - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - - mysql - mysql-connector-java - ${cs.mysql.version} - - - commons-dbcp - commons-dbcp - ${cs.dbcp.version} - - - commons-pool - commons-pool - ${cs.pool.version} - - - org.jasypt - jasypt - ${cs.jasypt.version} - - - org.apache.cloudstack - cloud-utils - ${project.version} - - - org.apache.cloudstack - cloud-server - ${project.version} - - - - - process-resources - create-schema - - java - - - - - false - true - - org.apache.cloudstack - cloud-server - - com.cloud.upgrade.DatabaseCreator - - - ${project.basedir}/utils/conf/db.properties - ${project.basedir}/utils/conf/db.properties.override - - ${basedir}/target/db/create-schema-simulator.sql - ${basedir}/target/db/templates.simulator.sql - - com.cloud.upgrade.DatabaseUpgradeChecker - --database=simulator - --rootpassword=${db.root.password} - - - - - catalina.home - ${project.basedir}/utils - - - - - - - diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 6292c536a9d..1fe5e1641f4 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -123,6 +123,7 @@ known_categories = { 'Pool': 'Pool', 'VPC': 'VPC', 'PrivateGateway': 'VPC', + 'Simulator': 'simulator', 'StaticRoute': 'VPC', 'Tags': 'Resource tags', 'NiciraNvpDevice': 'Nicira NVP', diff --git a/tools/marvin/pom.xml b/tools/marvin/pom.xml index 80099be1ecb..8cb9ec370d5 100644 --- a/tools/marvin/pom.xml +++ b/tools/marvin/pom.xml @@ -9,112 +9,118 @@ OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> - 4.0.0 - cloud-marvin - Apache CloudStack marvin - pom - - org.apache.cloudstack - cloud-tools - 4.2.0-SNAPSHOT - ../pom.xml - - - install - - - maven-antrun-plugin - 1.7 - - - clean - clean - - run - - - - - Deleting ${project.artifactId} API sources - - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - compile - compile - - exec - - - ${basedir}/marvin - python - - codegenerator.py - -s - ${basedir}/../apidoc/target/commands.xml - Generating ${project.artifactId} API classes} - - - - - package - package - - exec - - - ${exec.workingdir} - python - - setup.py - sdist - - - - + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + 4.0.0 + cloud-marvin + Apache CloudStack marvin + pom + + org.apache.cloudstack + cloud-tools + 4.2.0-SNAPSHOT + ../pom.xml + + + install + + + maven-antrun-plugin + 1.7 + + + clean + clean + + run + + + + + Deleting ${project.artifactId} API sources + + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + compile + compile + + exec + + + ${basedir}/marvin + python + + codegenerator.py + -s + ${basedir}/../apidoc/target/commands.xml + Generating ${project.artifactId} API classes} + + + + + package + package + + exec + + + ${exec.workingdir} + python + + setup.py + sdist + + + + - + - - - - marvin - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - package - - exec - - - - - ${basedir}/marvin - python - - deployDataCenter.py - -i - ${user.dir}/${marvin.config} - - - - - - - + + + + marvin + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + ${basedir}/marvin + python + + deployAndRun.py + -c + ${user.dir}/${marvin.config} + -t + /tmp/t.log + -r + /tmp/r.log + -f + ${basedir}/marvin/testSetupSuccess.py + + + + + test + + exec + + + + + + + + From 02b3cd7de9438ec1cceeabd992b81995a5912981 Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Sun, 10 Mar 2013 17:29:10 +0530 Subject: [PATCH 046/123] marvin logging: write to tmp to adjust for mac-osx /var/log is not available on OSX. This breaks the simulator run. --- tools/marvin/marvin/sandbox/demo/simulator/simulator.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/marvin/marvin/sandbox/demo/simulator/simulator.cfg b/tools/marvin/marvin/sandbox/demo/simulator/simulator.cfg index 7c733ade256..ca794605540 100644 --- a/tools/marvin/marvin/sandbox/demo/simulator/simulator.cfg +++ b/tools/marvin/marvin/sandbox/demo/simulator/simulator.cfg @@ -112,11 +112,11 @@ "logger": [ { "name": "TestClient", - "file": "/var/log/testclient.log" + "file": "/tmp/testclient.log" }, { "name": "TestCase", - "file": "/var/log/testcase.log" + "file": "/tmp/testcase.log" } ], "globalConfig": [ From 7e91c475416f1b057974a3a1422cb22cf9931c7c Mon Sep 17 00:00:00 2001 From: radhikap Date: Wed, 20 Mar 2013 11:00:48 +0530 Subject: [PATCH 047/123] CLOUDSTACK-1308 and CLOUDSTACK-807 --- docs/en-US/changed-apicommands-4.1.xml | 25 ++++++++++++++++++------- docs/en-US/ipv6-support.xml | 10 +++++----- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/docs/en-US/changed-apicommands-4.1.xml b/docs/en-US/changed-apicommands-4.1.xml index f0045a56de3..13553fdf336 100644 --- a/docs/en-US/changed-apicommands-4.1.xml +++ b/docs/en-US/changed-apicommands-4.1.xml @@ -62,9 +62,20 @@ listNetworks - The following request parameters has been added: isPersistent - This parameter determines if the network or network offering listed by using this - offering are persistent or not. + The following request parameters have been added: + + + isPersistent + This parameter determines if the network or network offering listed are + persistent or not. + + + ip6gateway + + + ip6cidr + + @@ -104,7 +115,7 @@ CreateZoneCmd - The following parameter are added: ip6dns1, ip6dns2. + The following parameter have been added: ip6dns1, ip6dns2. @@ -113,7 +124,7 @@ listVirtualMachines - For nic responses, the following fields has been added. + For nic responses, the following fields have been added. ip6address @@ -132,7 +143,7 @@ listVlanIpRanges - For nic responses, the following fields has been added. + For nic responses, the following fields have been added. startipv6 @@ -166,7 +177,7 @@ - + addF5LoadBalancer diff --git a/docs/en-US/ipv6-support.xml b/docs/en-US/ipv6-support.xml index 22a5d7a5370..7367ec9ad80 100644 --- a/docs/en-US/ipv6-support.xml +++ b/docs/en-US/ipv6-support.xml @@ -106,14 +106,14 @@ Passwords - - The administrator cannot specify the IPv6 address of a VM. -
- Network Configuration for DHCPv6 - Use DUID-LL to get IPv6 address from DHCP server + Guest VM Configuration for DHCPv6 + For the guest VMs to get IPv6 address, run dhclient command manually on each of the VMs. + Use DUID-LL to set up dhclient. + The IPv6 address is lost when a VM is stopped and started. Therefore, use the same procedure + to get an IPv6 address when a VM is stopped and started. Set up dhclient by using DUID-LL. From 323c3211c814db8053cd050da69a05417b22cc03 Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Mon, 11 Mar 2013 21:09:22 +0530 Subject: [PATCH 048/123] run the test lifecycle only when config is specified --- tools/marvin/pom.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/marvin/pom.xml b/tools/marvin/pom.xml index 8cb9ec370d5..a3bd5460fd5 100644 --- a/tools/marvin/pom.xml +++ b/tools/marvin/pom.xml @@ -88,6 +88,7 @@ marvin + marvin.config From de1ac4b7cd70424a204b753cb955f3ef26ddd67c Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Sun, 10 Mar 2013 11:42:48 +0530 Subject: [PATCH 049/123] jvmArgs is invalid here jvmArgs are unaffected in the jetty:run configuration. Use MAVEN_OPTS for this instead. Signed-off-by: Prasanna Santhanam --- client/pom.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/client/pom.xml b/client/pom.xml index 382706d930f..302fe6bacbe 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -248,7 +248,6 @@ 60000 - -XX:MaxPermSize=512m -Xmx2g ${project.build.directory}/${project.build.finalName}/WEB-INF/web.xml ${project.build.directory}/${project.build.finalName} From d7f9aa637ebc953928e3d601735bc01f6fcff828 Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Wed, 13 Mar 2013 21:36:09 +0530 Subject: [PATCH 050/123] remove dependence on dsl iso No need to download the 50MB iso for every test. It is sufficient to use a dummy iso. This dummy iso was generated using mkisofs for test purposes. Signed-off-by: Prasanna Santhanam --- test/integration/component/test_project_usage.py | 2 +- test/integration/component/test_usage.py | 2 +- test/integration/component/test_volumes.py | 2 +- test/integration/smoke/test_iso.py | 4 ++-- test/integration/smoke/test_nic.py | 2 +- test/integration/smoke/test_vm_life_cycle.py | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/integration/component/test_project_usage.py b/test/integration/component/test_project_usage.py index 16d51068deb..9f0488d20ce 100644 --- a/test/integration/component/test_project_usage.py +++ b/test/integration/component/test_project_usage.py @@ -82,7 +82,7 @@ class Services: "iso": { "displaytext": "Test ISO", "name": "Test ISO", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "isextractable": True, "isfeatured": True, diff --git a/test/integration/component/test_usage.py b/test/integration/component/test_usage.py index 4251eab9555..82d13e5a9ff 100644 --- a/test/integration/component/test_usage.py +++ b/test/integration/component/test_usage.py @@ -78,7 +78,7 @@ class Services: "iso": { "displaytext": "Test ISO", "name": "Test ISO", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "isextractable": True, "isfeatured": True, diff --git a/test/integration/component/test_volumes.py b/test/integration/component/test_volumes.py index 0a7813065ae..bedf6efd8b4 100644 --- a/test/integration/component/test_volumes.py +++ b/test/integration/component/test_volumes.py @@ -77,7 +77,7 @@ class Services: { "displaytext": "Test ISO", "name": "testISO", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "ostype": 'CentOS 5.3 (64-bit)', }, diff --git a/test/integration/smoke/test_iso.py b/test/integration/smoke/test_iso.py index 8228a278cc9..5bd7bb358be 100644 --- a/test/integration/smoke/test_iso.py +++ b/test/integration/smoke/test_iso.py @@ -50,7 +50,7 @@ class Services: { "displaytext": "Test ISO 1", "name": "ISO 1", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "isextractable": True, "isfeatured": True, @@ -61,7 +61,7 @@ class Services: { "displaytext": "Test ISO 2", "name": "ISO 2", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "isextractable": True, "isfeatured": True, diff --git a/test/integration/smoke/test_nic.py b/test/integration/smoke/test_nic.py index b9dfdde8fe5..ad30122cd47 100644 --- a/test/integration/smoke/test_nic.py +++ b/test/integration/smoke/test_nic.py @@ -88,7 +88,7 @@ class Services: "iso": { "displaytext": "Test ISO", "name": "testISO", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "ostype": 'CentOS 5.3 (64-bit)', "mode": 'HTTP_DOWNLOAD', # Downloading existing ISO diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py index 8d65c00c896..0a5fbad8376 100644 --- a/test/integration/smoke/test_vm_life_cycle.py +++ b/test/integration/smoke/test_vm_life_cycle.py @@ -107,7 +107,7 @@ class Services: { "displaytext": "Test ISO", "name": "testISO", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "ostype": 'CentOS 5.3 (64-bit)', "mode": 'HTTP_DOWNLOAD', # Downloading existing ISO From 4ad5d1a4f55a1eb1dc0ebf164f0fe081aeebb2d7 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Wed, 20 Mar 2013 12:42:29 +0530 Subject: [PATCH 051/123] cli: Fix nonetype issue with cachemaker and exit after printing version Signed-off-by: Rohit Yadav --- tools/cli/cloudmonkey/cachemaker.py | 6 +++++- tools/cli/cloudmonkey/cloudmonkey.py | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/cli/cloudmonkey/cachemaker.py b/tools/cli/cloudmonkey/cachemaker.py index 8ac123caa4b..a625b014d38 100644 --- a/tools/cli/cloudmonkey/cachemaker.py +++ b/tools/cli/cloudmonkey/cachemaker.py @@ -100,7 +100,11 @@ def monkeycache(apis): cache['count'] = getvalue(apis[responsekey], 'count') cache['asyncapis'] = [] - for api in getvalue(apis[responsekey], 'api'): + apilist = getvalue(apis[responsekey], 'api') + if apilist == None: + print "[monkeycache] Server response issue, no apis found" + + for api in apilist: name = getvalue(api, 'name') verb, subject = splitverbsubject(name) diff --git a/tools/cli/cloudmonkey/cloudmonkey.py b/tools/cli/cloudmonkey/cloudmonkey.py index e94d53091ac..a95ab9eaab4 100644 --- a/tools/cli/cloudmonkey/cloudmonkey.py +++ b/tools/cli/cloudmonkey/cloudmonkey.py @@ -487,6 +487,7 @@ def main(): if options.version: print "cloudmonkey", __version__ print __description__, "(%s)" % __projecturl__ + sys.exit(0) shell = CloudMonkeyShell(sys.argv[0], options.cfile) if len(args) > 1: From 0d62549d61acf9534a7b7e7780241a217e1de20f Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Wed, 20 Mar 2013 13:14:09 +0530 Subject: [PATCH 052/123] cli: Run onecmd if any arg is passed Signed-off-by: Rohit Yadav --- tools/cli/cloudmonkey/cloudmonkey.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/cli/cloudmonkey/cloudmonkey.py b/tools/cli/cloudmonkey/cloudmonkey.py index a95ab9eaab4..94006c9577a 100644 --- a/tools/cli/cloudmonkey/cloudmonkey.py +++ b/tools/cli/cloudmonkey/cloudmonkey.py @@ -490,7 +490,7 @@ def main(): sys.exit(0) shell = CloudMonkeyShell(sys.argv[0], options.cfile) - if len(args) > 1: + if len(args) > 0: shell.onecmd(' '.join(args)) else: shell.cmdloop() From 873ec27135628a81252875693d11667c18bb1589 Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Wed, 20 Mar 2013 14:52:32 +0530 Subject: [PATCH 053/123] simulator: by default don't start the simulator discoverers Signed-off-by: Prasanna Santhanam --- client/tomcatconf/componentContext.xml.in | 6 ++++++ .../com/cloud/resource/SimulatorSecondaryDiscoverer.java | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in index b536879044b..2535c779368 100644 --- a/client/tomcatconf/componentContext.xml.in +++ b/client/tomcatconf/componentContext.xml.in @@ -207,6 +207,12 @@ + + diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java index 1dd71c5c27f..3a8cf17e24b 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java @@ -44,7 +44,7 @@ import com.cloud.storage.secondary.SecondaryStorageDiscoverer; import com.cloud.utils.exception.CloudRuntimeException; import org.springframework.stereotype.Component; -@Component + @Local(value=Discoverer.class) public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer implements ResourceStateAdapter, Listener { private static final Logger s_logger = Logger.getLogger(SimulatorSecondaryDiscoverer.class); From 3e68dd810e0d4df5213dd921f4b703befec55ecd Mon Sep 17 00:00:00 2001 From: Nitin Mehta Date: Wed, 20 Mar 2013 16:44:44 +0530 Subject: [PATCH 054/123] CLOUDSTACK-1738 : Adding code for StatsCollector initialization using spring framework. This was not initialized and hence stats were not colleced on vm, host and storage in CS. --- server/src/com/cloud/server/StatsCollector.java | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/server/src/com/cloud/server/StatsCollector.java b/server/src/com/cloud/server/StatsCollector.java index 76bae5b4aca..7dcf091f3e3 100755 --- a/server/src/com/cloud/server/StatsCollector.java +++ b/server/src/com/cloud/server/StatsCollector.java @@ -27,10 +27,15 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import javax.annotation.PostConstruct; import javax.inject.Inject; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.resource.ResourceManager; + import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -88,6 +93,7 @@ public class StatsCollector { @Inject private StoragePoolHostDao _storagePoolHostDao; @Inject private SecondaryStorageVmManager _ssvmMgr; @Inject private ResourceManager _resourceMgr; + @Inject private ConfigurationDao _configDao; private ConcurrentHashMap _hostStats = new ConcurrentHashMap(); private final ConcurrentHashMap _VmStats = new ConcurrentHashMap(); @@ -107,6 +113,7 @@ public class StatsCollector { } public static StatsCollector getInstance(Map configs) { + s_instance.init(configs); return s_instance; } @@ -114,6 +121,11 @@ public class StatsCollector { s_instance = this; } + @PostConstruct + private void init(){ + init(_configDao.getConfiguration()); + } + private void init(Map configs) { _executor = Executors.newScheduledThreadPool(3, new NamedThreadFactory("StatsCollector")); From dbfd31663c5a4d89b230efe97c9f61f96b220621 Mon Sep 17 00:00:00 2001 From: Sateesh Chodapuneedi Date: Wed, 20 Mar 2013 13:52:00 +0530 Subject: [PATCH 055/123] CLOUDSTACK-664 Health monitoring for NetScaler load balanced instances Fixing class names added to command list. Signed-off-by: Sateesh Chodapuneedi --- server/src/com/cloud/server/ManagementServerImpl.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index b689f93f8aa..191157a4db8 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -2104,13 +2104,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(QueryAsyncJobResultCmd.class); cmdList.add(AssignToLoadBalancerRuleCmd.class); cmdList.add(CreateLBStickinessPolicyCmd.class); - cmdList.add(CreateLBHealthCheckPolicyCmd .class); + cmdList.add(CreateLBHealthCheckPolicyCmd.class); cmdList.add(CreateLoadBalancerRuleCmd.class); cmdList.add(DeleteLBStickinessPolicyCmd.class); - cmdList.add(DeleteLBHealthCheckPolicyCmd .class); + cmdList.add(DeleteLBHealthCheckPolicyCmd.class); cmdList.add(DeleteLoadBalancerRuleCmd.class); cmdList.add(ListLBStickinessPoliciesCmd.class); - cmdList.add(ListLBHealthCheckPoliciesCmd .class); + cmdList.add(ListLBHealthCheckPoliciesCmd.class); cmdList.add(ListLoadBalancerRuleInstancesCmd.class); cmdList.add(ListLoadBalancerRulesCmd.class); cmdList.add(RemoveFromLoadBalancerRuleCmd.class); From 918a7c7481454e78a648389ed3173c0037fc715b Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Wed, 20 Mar 2013 11:58:17 -0700 Subject: [PATCH 056/123] CLOUDSTACK-1065: cloudstack UI - AWS Style Regions - implement region switching action triggered by region dropdown on top menu. --- ui/scripts/cloud.core.callbacks.js | 25 +++++++++++++++++++------ ui/scripts/cloudStack.js | 9 +++++++-- ui/scripts/sharedFunctions.js | 1 + ui/scripts/ui-custom/regions.js | 4 ++-- 4 files changed, 29 insertions(+), 10 deletions(-) diff --git a/ui/scripts/cloud.core.callbacks.js b/ui/scripts/cloud.core.callbacks.js index 857c247d9d1..6384f9bd006 100644 --- a/ui/scripts/cloud.core.callbacks.js +++ b/ui/scripts/cloud.core.callbacks.js @@ -52,17 +52,29 @@ Below is a sample login attempt var clientApiUrl = "/client/api"; var clientConsoleUrl = "/client/console"; -$(document).ready(function() { +$(document).ready(function() { + /* + condition 1: If window.location.href contains parameter 'loginUrl', save the parameter's value to a cookie, then reload the page without any URL parameter. + (After the page is reloaded without any URL parameter, it will fall in condition 2.) + */ + if ($.urlParam('loginUrl') != 0) { + $.cookie('loginUrl', $.urlParam('loginUrl'), { expires: 1}); + document.location.href = '/client/'; + } - var url = $.urlParam("loginUrl"); - if (url != undefined && url != null && url.length > 0) { - url = unescape(clientApiUrl+"?"+url); + /* + condition 2: If window.location.href does not contain parameter 'loginUrl' but cookie 'loginUrl' exists, + save the cookie's value to g_regionUrlParam (a global variable for switching regions), + then call login API to set g_loginResponse (a global variable for single-sign-on). + */ + else if($.cookie('loginUrl') != null) { + g_regionUrlParam = '?loginUrl=' + $.cookie('loginUrl'); $.ajax({ - url: url, + url: unescape(clientApiUrl + "?" + $.cookie('loginUrl')), dataType: "json", async: false, success: function(json) { - g_loginResponse = json.loginresponse; + g_loginResponse = json.loginresponse; }, error: function() { onLogoutCallback(); @@ -73,6 +85,7 @@ $(document).ready(function() { } }); } + }); diff --git a/ui/scripts/cloudStack.js b/ui/scripts/cloudStack.js index f9b5a58545c..00b06ab0e61 100644 --- a/ui/scripts/cloudStack.js +++ b/ui/scripts/cloudStack.js @@ -251,6 +251,9 @@ array1.push("&domain=" + encodeURIComponent("/")); } + g_regionUrlParam = '?loginUrl=' + escape("command=login" + array1.join("") + "&response=json"); + $.cookie('loginUrl', escape("command=login" + array1.join("") + "&response=json"), { expires: 1}); + $.ajax({ type: "POST", data: "command=login" + array1.join("") + "&response=json", @@ -382,8 +385,9 @@ g_domainid = null; g_timezoneoffset = null; g_timezone = null; - g_supportELB = null; - + g_supportELB = null; + g_regionUrlParam = null; + $.cookie('JSESSIONID', null); $.cookie('sessionKey', null); $.cookie('username', null); @@ -394,6 +398,7 @@ $.cookie('timezoneoffset', null); $.cookie('timezone', null); $.cookie('supportELB', null); + $.cookie('loginUrl', null); if(onLogoutCallback()) { //onLogoutCallback() will set g_loginResponse(single-sign-on variable) to null, then bypassLoginCheck() will show login screen. document.location.reload(); //when onLogoutCallback() returns true, reload the current document. diff --git a/ui/scripts/sharedFunctions.js b/ui/scripts/sharedFunctions.js index 8bcdff91574..dbcb781a6fa 100644 --- a/ui/scripts/sharedFunctions.js +++ b/ui/scripts/sharedFunctions.js @@ -20,6 +20,7 @@ var g_role = null; // roles - root, domain-admin, ro-admin, user var g_username = null; var g_account = null; var g_domainid = null; +var g_regionUrlParam = null; var g_enableLogging = false; var g_timezoneoffset = null; var g_timezone = null; diff --git a/ui/scripts/ui-custom/regions.js b/ui/scripts/ui-custom/regions.js index ac52776d49f..579cdceb488 100644 --- a/ui/scripts/ui-custom/regions.js +++ b/ui/scripts/ui-custom/regions.js @@ -81,8 +81,8 @@ closeRegionSelector({ complete: function() { $('#container').prepend($('
').addClass('loading-overlay')); - - document.location.href = url; + + document.location.href = url + g_regionUrlParam; } }); }; From ae7e5b025e25e36bef8a8d9f2becfbfa5614196f Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Wed, 20 Mar 2013 14:34:37 -0700 Subject: [PATCH 057/123] CLOUDSTACK-1065: cloudstack UI - AWS Style Regions - set current region (whose end point matches current URL) to region button and region dropdown on top menu. --- ui/scripts/regions.js | 3 +-- ui/scripts/ui-custom/regions.js | 19 ++++++++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/ui/scripts/regions.js b/ui/scripts/regions.js index 902d3f80ed3..79557ad595b 100644 --- a/ui/scripts/regions.js +++ b/ui/scripts/regions.js @@ -29,8 +29,7 @@ data: regions ? regions : [ { id: -1, name: '(Default)' } ], - activeRegionID: cloudStack.context.users.regionid ? - cloudStack.context.users.regionid : 1 + activeRegionID: cloudStack.context.users[0].regionid }); } }); diff --git a/ui/scripts/ui-custom/regions.js b/ui/scripts/ui-custom/regions.js index 579cdceb488..354ecee33de 100644 --- a/ui/scripts/ui-custom/regions.js +++ b/ui/scripts/ui-custom/regions.js @@ -29,23 +29,28 @@ var data = args.data; var activeRegionID = args.activeRegionID; + var currentRegion; $(data).each(function() { var region = this; var regionName = region.name; var $li = $('
  • ').append($('').html(_s(region.name))); $li.data('region-data', region); - + + if(document.location.href == region.endpoint) { + currentRegion = region; + $li.addClass('active'); + } + /* if (region.id == activeRegionID) { $li.addClass('active'); } - - $regionSwitcherButton.find('.title') - .html(regionName) - .attr('title', regionName); - - $regionList.append($li); + */ + + $regionList.append($li); }); + + $regionSwitcherButton.find('.title').html(_s(currentRegion.name)).attr('title', _s(currentRegion.name)); } } }); From 75a6e009b9d2b69c534a4a8e0d0ea1a63c10c3c5 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Wed, 20 Mar 2013 15:19:36 -0700 Subject: [PATCH 058/123] CLOUDSTACK-1065: cloudstack UI - AWS Style Regions - remove region action - removing the region that you are currently in is not allowed. --- ui/scripts/regions.js | 12 ++++++++++-- ui/scripts/ui-custom/regions.js | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/ui/scripts/regions.js b/ui/scripts/regions.js index 79557ad595b..72940e18876 100644 --- a/ui/scripts/regions.js +++ b/ui/scripts/regions.js @@ -70,7 +70,7 @@ success: function(json) { var item = json.addregionresponse.region; args.response.success({data: item}); - //$(window).trigger('cloudStack.refreshRegions'); + $(window).trigger('cloudStack.refreshRegions'); }, error: function(json) { args.response.error(parseXMLHttpResponse(json)); @@ -123,7 +123,15 @@ messages: { notification: function() { return 'label.remove.region'; }, confirm: function() { return 'message.remove.region'; } - }, + }, + preAction: function(args) { + var region = args.context.regions[0]; + if(region.endpoint == document.location.href) { + cloudStack.dialog.notice({ message: _l('You can not remove the region that you are currently in.') }); + return false; + } + return true; + }, action: function(args) { var region = args.context.regions[0]; diff --git a/ui/scripts/ui-custom/regions.js b/ui/scripts/ui-custom/regions.js index 354ecee33de..474e49817cd 100644 --- a/ui/scripts/ui-custom/regions.js +++ b/ui/scripts/ui-custom/regions.js @@ -37,7 +37,7 @@ $li.data('region-data', region); - if(document.location.href == region.endpoint) { + if(region.endpoint == document.location.href) { currentRegion = region; $li.addClass('active'); } From 6cb1486f299ee2f5d761a9c4ec6bf4d2cce4963d Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Wed, 20 Mar 2013 15:39:30 -0700 Subject: [PATCH 059/123] CLOUDSTACK-1065: cloudstack UI - AWS Style Regions - implement Edit Region action, ID field shouldn't be editable since ID is the base when searching for an entry in the database. --- ui/scripts/regions.js | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/ui/scripts/regions.js b/ui/scripts/regions.js index 72940e18876..68191c04361 100644 --- a/ui/scripts/regions.js +++ b/ui/scripts/regions.js @@ -105,9 +105,15 @@ edit: { label: 'label.edit.region', action: function(args) { + var data = { + id: args.context.regions[0].id, + name: args.data.name, + endpoint: args.data.endpoint + }; + $.ajax({ url: createURL('updateRegion'), - data: args.data, + data: data, success: function(json) { args.response.success(); $(window).trigger('cloudStack.refreshRegions'); @@ -154,11 +160,11 @@ title: 'label.details', fields: [ { - name: { label: 'label.name', isEditable: true }, + id: { label: 'label.id' } }, { - endpoint: { label: 'label.endpoint', isEditable: true }, - id: { label: 'label.id', isEditable: true } + name: { label: 'label.name', isEditable: true }, + endpoint: { label: 'label.endpoint', isEditable: true } } ], dataProvider: function(args) { From 5a9ccce0f3a131bcbb53f8c1daa57e3309127537 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Wed, 20 Mar 2013 16:41:26 -0700 Subject: [PATCH 060/123] LOUDSTACK-1065: cloudstack UI - AWS Style Regions - handle the case that document.location.href includes bookmark # that doesn't match region's end point completely. --- ui/scripts/regions.js | 10 +++++++--- ui/scripts/ui-custom/regions.js | 24 ++++++++++++------------ 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/ui/scripts/regions.js b/ui/scripts/regions.js index 68191c04361..0959e3b4aef 100644 --- a/ui/scripts/regions.js +++ b/ui/scripts/regions.js @@ -28,8 +28,7 @@ args.response.success({ data: regions ? regions : [ { id: -1, name: '(Default)' } - ], - activeRegionID: cloudStack.context.users[0].regionid + ] }); } }); @@ -132,7 +131,12 @@ }, preAction: function(args) { var region = args.context.regions[0]; - if(region.endpoint == document.location.href) { + + /* e.g. + region.endpoint == "http://localhost:8080/client/" + document.location.href == "http://localhost:8080/client/#" + */ + if(document.location.href.indexOf(region.endpoint) != -1) { cloudStack.dialog.notice({ message: _l('You can not remove the region that you are currently in.') }); return false; } diff --git a/ui/scripts/ui-custom/regions.js b/ui/scripts/ui-custom/regions.js index 474e49817cd..4620ea451ea 100644 --- a/ui/scripts/ui-custom/regions.js +++ b/ui/scripts/ui-custom/regions.js @@ -27,9 +27,8 @@ response: { success: function(args) { var data = args.data; - var activeRegionID = args.activeRegionID; - - var currentRegion; + + var currentRegion = null; $(data).each(function() { var region = this; var regionName = region.name; @@ -37,20 +36,21 @@ $li.data('region-data', region); - if(region.endpoint == document.location.href) { + /* e.g. + region.endpoint == "http://localhost:8080/client/" + document.location.href == "http://localhost:8080/client/#" + */ + if(document.location.href.indexOf(region.endpoint) != -1) { currentRegion = region; $li.addClass('active'); } - /* - if (region.id == activeRegionID) { - $li.addClass('active'); - } - */ - + $regionList.append($li); }); - - $regionSwitcherButton.find('.title').html(_s(currentRegion.name)).attr('title', _s(currentRegion.name)); + + if(currentRegion != null) { + $regionSwitcherButton.find('.title').html(_s(currentRegion.name)).attr('title', _s(currentRegion.name)); + } } } }); From 9270b4335c12e410be52092b02da723b1a0c7b5b Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Wed, 20 Mar 2013 16:47:52 -0700 Subject: [PATCH 061/123] CLOUDSTACK-1065: cloudstack UI - AWS Style Regions - remove region field from Add Account dialog since createAccount API does not take in regionid parameter. --- ui/scripts/accounts.js | 36 ++++-------------------------------- 1 file changed, 4 insertions(+), 32 deletions(-) diff --git a/ui/scripts/accounts.js b/ui/scripts/accounts.js index 3727f8ee0b1..3403337a834 100644 --- a/ui/scripts/accounts.js +++ b/ui/scripts/accounts.js @@ -112,32 +112,7 @@ label: 'label.last.name', validation: { required: true }, docID: 'helpAccountLastName' - }, - regionid: { - label: 'label.region', - select: function(args) { - $.ajax({ - url: createURL('listRegions&listAll=true'), - success: function(json) { - var regions = json.listregionsresponse.region; - var regionOptions; - - if (!regions) { - regionOptions = [{ id: 0, description: '0 - Default' }]; - } else { - regionOptions = $(regions).map(function(index, region) { - return { - id: region.id, - description: region.id + ' - ' + region.name - }; - }); - } - - args.response.success({ data: regionOptions }); - } - }); - } - }, + }, domainid: { label: 'label.domain', docID: 'helpAccountDomain', @@ -220,8 +195,7 @@ $.extend(data, { email: args.data.email, firstname: args.data.firstname, - lastname: args.data.lastname, - regionid: args.data.regionid, + lastname: args.data.lastname, domainid: args.data.domainid }); @@ -316,8 +290,7 @@ domainid: accountObj.domainid, account: accountObj.name, newname: args.data.name, - networkdomain: args.data.networkdomain, - regionid: accountObj.regionid ? accountObj.regionid : 0 + networkdomain: args.data.networkdomain }; $.ajax({ @@ -660,8 +633,7 @@ converter: function(args){ return cloudStack.converters.toRole(args); } - }, - regionid: { label: 'label.region' }, + }, domain: { label: 'label.domain' }, state: { label: 'label.state' }, networkdomain: { From 3ed6200ef8cce4c00600bf9567e52f9787a2c296 Mon Sep 17 00:00:00 2001 From: Edison Su Date: Wed, 20 Mar 2013 17:20:17 -0700 Subject: [PATCH 062/123] move default primary storage plugin into its own pom --- .../storage/DataStoreProviderApiService.java | 28 + api/src/com/cloud/storage/StoragePool.java | 2 +- .../org/apache/cloudstack/api/BaseCmd.java | 2 + .../admin/storage/CreateStoragePoolCmd.java | 8 +- .../storage/ListStorageProvidersCmd.java | 72 ++ .../api/response/StorageProviderResponse.java | 62 ++ .../agent/test/BackupSnapshotCommandTest.java | 2 +- .../api/agent/test/SnapshotCommandTest.java | 2 +- .../api/test/ResizeVolumeCommandTest.java | 2 +- client/pom.xml | 5 + client/tomcatconf/commands.properties.in | 1 + client/tomcatconf/componentContext.xml.in | 3 + .../datacenter/entity/api/StorageEntity.java | 1 + .../api/storage/DataStoreLifeCycle.java | 6 +- .../api/storage/DataStoreProvider.java | 13 +- .../api/storage/DataStoreProviderManager.java | 6 +- .../api/storage}/ImageDataStoreProvider.java | 3 +- .../storage/PrimaryDataStoreParameters.java | 220 ++++ .../api/storage/PrimaryDataStoreProvider.java | 4 + .../storage/datastore/db/StoragePoolVO.java | 12 +- .../manager/ImageDataStoreManagerImpl.java | 14 +- .../store/AncientImageDataStoreProvider.java | 47 +- .../store/DefaultImageDataStoreImpl.java | 2 +- .../store/DefaultImageDataStoreProvider.java | 39 +- .../DefaultImageDataStoreLifeCycle.java | 11 +- .../allocator/StorageAllocatorTest.java | 4 +- .../storage/test/volumeServiceTest.java | 12 +- .../datastore/PrimaryDataStoreEntityImpl.java | 12 +- .../PrimaryDataStoreProviderManager.java | 4 +- .../DataStoreProviderManagerImpl.java | 110 +- .../provider/PrimaryDataStoreProvider.java | 23 - .../image/datastore/ImageDataStoreHelper.java | 4 +- .../storage/image/db/ImageDataStoreVO.java | 12 +- .../datastore/PrimaryDataStoreHelper.java | 172 +++- .../datastore/DefaultPrimaryDataStore.java | 10 +- .../AncientPrimaryDataStoreLifeCycleImpl.java | 963 ------------------ .../DefaultPrimaryDataStoreLifeCycleImpl.java | 42 +- ...ltPrimaryDataStoreProviderManagerImpl.java | 16 +- .../DefaultPrimaryDatastoreProviderImpl.java | 32 +- .../storage/volume/test/ConfiguratorTest.java | 2 +- plugins/pom.xml | 1 + plugins/storage/volume/default/pom.xml | 56 + .../CloudStackPrimaryDataStoreDriverImpl.java | 4 +- ...oudStackPrimaryDataStoreLifeCycleImpl.java | 542 ++++++++++ ...loudStackPrimaryDataStoreProviderImpl.java | 52 +- .../cloudstack/storage/test/VolumeTest.java | 2 +- .../cloud/server/ManagementServerImpl.java | 1 + .../com/cloud/storage/StorageManagerImpl.java | 68 +- .../cloud/storage/StoragePoolAutomation.java | 26 + .../storage/StoragePoolAutomationImpl.java | 456 +++++++++ setup/db/db/schema-410to420.sql | 6 +- tools/apidoc/gen_toc.py | 1 + 52 files changed, 1931 insertions(+), 1269 deletions(-) create mode 100644 api/src/com/cloud/storage/DataStoreProviderApiService.java create mode 100644 api/src/org/apache/cloudstack/api/command/admin/storage/ListStorageProvidersCmd.java create mode 100644 api/src/org/apache/cloudstack/api/response/StorageProviderResponse.java rename engine/{storage/src/org/apache/cloudstack/storage/datastore/provider => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/ImageDataStoreProvider.java (86%) create mode 100644 engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java delete mode 100644 engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java delete mode 100644 engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java create mode 100644 plugins/storage/volume/default/pom.xml rename engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java => plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java (98%) create mode 100644 plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java rename engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java => plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/provider/CloudStackPrimaryDataStoreProviderImpl.java (58%) create mode 100644 server/src/com/cloud/storage/StoragePoolAutomation.java create mode 100644 server/src/com/cloud/storage/StoragePoolAutomationImpl.java diff --git a/api/src/com/cloud/storage/DataStoreProviderApiService.java b/api/src/com/cloud/storage/DataStoreProviderApiService.java new file mode 100644 index 00000000000..f81a9960be1 --- /dev/null +++ b/api/src/com/cloud/storage/DataStoreProviderApiService.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +import java.util.List; + +import org.apache.cloudstack.api.response.StorageProviderResponse; + +public interface DataStoreProviderApiService { + public List getDataStoreProviders(String type); + +} diff --git a/api/src/com/cloud/storage/StoragePool.java b/api/src/com/cloud/storage/StoragePool.java index 091eef182cc..8b95383c537 100644 --- a/api/src/com/cloud/storage/StoragePool.java +++ b/api/src/com/cloud/storage/StoragePool.java @@ -99,7 +99,7 @@ public interface StoragePool extends Identity, InternalIdentity { /** * @return */ - Long getStorageProviderId(); + String getStorageProviderName(); boolean isInMaintenance(); } diff --git a/api/src/org/apache/cloudstack/api/BaseCmd.java b/api/src/org/apache/cloudstack/api/BaseCmd.java index 816b6deed77..8b7f43fd104 100644 --- a/api/src/org/apache/cloudstack/api/BaseCmd.java +++ b/api/src/org/apache/cloudstack/api/BaseCmd.java @@ -61,6 +61,7 @@ import com.cloud.projects.ProjectService; import com.cloud.resource.ResourceService; import com.cloud.server.ManagementService; import com.cloud.server.TaggedResourceService; +import com.cloud.storage.DataStoreProviderApiService; import com.cloud.storage.StorageService; import com.cloud.storage.VolumeApiService; import com.cloud.storage.snapshot.SnapshotService; @@ -131,6 +132,7 @@ public abstract class BaseCmd { @Inject public UsageService _usageService; @Inject public NetworkUsageService _networkUsageService; @Inject public VMSnapshotService _vmSnapshotService; + @Inject public DataStoreProviderApiService dataStoreProviderApiService; public abstract void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException; diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java index b86784ed0b0..5178d685889 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java @@ -73,8 +73,8 @@ public class CreateStoragePoolCmd extends BaseCmd { private Long zoneId; @Parameter(name=ApiConstants.PROVIDER, type=CommandType.STRING, - required=false, description="the storage provider uuid") - private String storageProviderUuid; + required=false, description="the storage provider name") + private String storageProviderName; @Parameter(name=ApiConstants.SCOPE, type=CommandType.STRING, required=false, description="the scope of the storage: cluster or zone") @@ -112,8 +112,8 @@ public class CreateStoragePoolCmd extends BaseCmd { return zoneId; } - public String getStorageProviderUuid() { - return this.storageProviderUuid; + public String getStorageProviderName() { + return this.storageProviderName; } public String getScope() { diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/ListStorageProvidersCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/ListStorageProvidersCmd.java new file mode 100644 index 00000000000..0dfc6633c6f --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/admin/storage/ListStorageProvidersCmd.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.api.command.admin.storage; + +import java.util.List; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.StorageProviderResponse; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; + +@APICommand(name = "listStorageProviders", description="Lists storage providers.", responseObject=StorageProviderResponse.class) +public class ListStorageProvidersCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(ListStorageProvidersCmd.class.getName()); + private static final String s_name = "liststorageprovidersresponse"; + + @Parameter(name=ApiConstants.TYPE, type=CommandType.STRING, description="the type of storage provider: either primary or image", required = true) + private String type; + + @Override + public String getCommandName() { + return s_name; + } + + public String getType() { + return this.type; + } + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, + NetworkRuleConflictException { + if (getType() == null) { + throw new ServerApiException(ApiErrorCode.MALFORMED_PARAMETER_ERROR, "need to specify type: either primary or image"); + } + + List providers = this.dataStoreProviderApiService.getDataStoreProviders(getType()); + ListResponse responses = new ListResponse(); + for (StorageProviderResponse provider : providers) { + provider.setObjectName("dataStoreProvider"); + } + responses.setResponses(providers); + responses.setResponseName(this.getCommandName()); + this.setResponseObject(responses); + } +} diff --git a/api/src/org/apache/cloudstack/api/response/StorageProviderResponse.java b/api/src/org/apache/cloudstack/api/response/StorageProviderResponse.java new file mode 100644 index 00000000000..4baf48629ff --- /dev/null +++ b/api/src/org/apache/cloudstack/api/response/StorageProviderResponse.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.BaseResponse; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class StorageProviderResponse extends BaseResponse { + @SerializedName("name") @Param(description="the name of the storage provider") + private String name; + + @SerializedName("type") @Param(description="the type of the storage provider: primary or image provider") + private String type; + + /** + * @return the type + */ + public String getType() { + return type; + } + + /** + * @param type the type to set + */ + public void setType(String type) { + this.type = type; + } + + /** + * @return the name + */ + public String getName() { + return name; + } + + /** + * @param name the name to set + */ + public void setName(String name) { + this.name = name; + } + + +} diff --git a/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java b/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java index 06697c4384f..44d53aaa175 100644 --- a/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java +++ b/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java @@ -133,7 +133,7 @@ public class BackupSnapshotCommandTest { } @Override - public Long getStorageProviderId() { + public String getStorageProviderName() { // TODO Auto-generated method stub return null; } diff --git a/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java b/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java index 767d7c37c5e..c2d69c0b0fd 100644 --- a/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java +++ b/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java @@ -115,7 +115,7 @@ public class SnapshotCommandTest { } @Override - public Long getStorageProviderId() { + public String getStorageProviderName() { // TODO Auto-generated method stub return null; } diff --git a/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java b/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java index 852e52b1b86..02085f577b6 100644 --- a/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java +++ b/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java @@ -134,7 +134,7 @@ public class ResizeVolumeCommandTest { } @Override - public Long getStorageProviderId() { + public String getStorageProviderName() { // TODO Auto-generated method stub return null; } diff --git a/client/pom.xml b/client/pom.xml index 302fe6bacbe..7ad2eff4cd9 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -224,6 +224,11 @@ cloud-plugin-hypervisor-simulator ${project.version} + + org.apache.cloudstack + cloud-plugin-storage-volume-default + ${project.version} + install diff --git a/client/tomcatconf/commands.properties.in b/client/tomcatconf/commands.properties.in index 382573b864c..492f7f909f6 100644 --- a/client/tomcatconf/commands.properties.in +++ b/client/tomcatconf/commands.properties.in @@ -278,6 +278,7 @@ listAsyncJobs=15 #### storage pools commands listStoragePools=3 +listStorageProviders=3 createStoragePool=1 updateStoragePool=1 deleteStoragePool=1 diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in index 2535c779368..a98a41ffd5a 100644 --- a/client/tomcatconf/componentContext.xml.in +++ b/client/tomcatconf/componentContext.xml.in @@ -329,5 +329,8 @@ + + + diff --git a/engine/api/src/org/apache/cloudstack/engine/datacenter/entity/api/StorageEntity.java b/engine/api/src/org/apache/cloudstack/engine/datacenter/entity/api/StorageEntity.java index 2c7f443e567..872931b1c8b 100755 --- a/engine/api/src/org/apache/cloudstack/engine/datacenter/entity/api/StorageEntity.java +++ b/engine/api/src/org/apache/cloudstack/engine/datacenter/entity/api/StorageEntity.java @@ -21,4 +21,5 @@ package org.apache.cloudstack.engine.datacenter.entity.api; import com.cloud.storage.StoragePool; public interface StorageEntity extends DataCenterResourceEntity, StoragePool { + } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java index 95e3d0b2ef8..280e02e2a32 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java @@ -34,9 +34,9 @@ public interface DataStoreLifeCycle { public boolean unmanaged(); - public boolean maintain(long storeId); + public boolean maintain(DataStore store); - public boolean cancelMaintain(long storeId); + public boolean cancelMaintain(DataStore store); - public boolean deleteDataStore(long storeId); + public boolean deleteDataStore(DataStore store); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProvider.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProvider.java index d29c4828713..115a52f92ac 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProvider.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProvider.java @@ -19,12 +19,19 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import java.util.Map; +import java.util.Set; public interface DataStoreProvider { - public DataStoreLifeCycle getLifeCycle(); + public static enum DataStoreProviderType { + PRIMARY, + IMAGE + } + public DataStoreLifeCycle getDataStoreLifeCycle(); + public DataStoreDriver getDataStoreDriver(); + public HypervisorHostListener getHostListener(); public String getName(); - public String getUuid(); - public long getId(); public boolean configure(Map params); + public Set getTypes(); + } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java index 94998133196..906720a1f41 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java @@ -20,12 +20,12 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import java.util.List; +import com.cloud.storage.DataStoreProviderApiService; import com.cloud.utils.component.Manager; -public interface DataStoreProviderManager extends Manager { - public DataStoreProvider getDataStoreProviderByUuid(String uuid); - public DataStoreProvider getDataStoreProviderById(long id); +public interface DataStoreProviderManager extends Manager, DataStoreProviderApiService { public DataStoreProvider getDataStoreProvider(String name); public DataStoreProvider getDefaultPrimaryDataStoreProvider(); public List getDataStoreProviders(); + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ImageDataStoreProvider.java similarity index 86% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ImageDataStoreProvider.java index d44a40e971f..1fb987e81cd 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ImageDataStoreProvider.java @@ -16,9 +16,8 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.datastore.provider; +package org.apache.cloudstack.engine.subsystem.api.storage; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; public interface ImageDataStoreProvider extends DataStoreProvider { diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java new file mode 100644 index 00000000000..b2b787cc133 --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.engine.subsystem.api.storage; + +import java.util.Map; + +import com.cloud.storage.Storage.StoragePoolType; + +public class PrimaryDataStoreParameters { + private Long zoneId; + private Long podId; + private Long clusterId; + private String providerName; + private Map details; + private String tags; + private StoragePoolType type; + private String host; + private String path; + private int port; + private String uuid; + private String name; + private String userInfo; + /** + * @return the userInfo + */ + public String getUserInfo() { + return userInfo; + } + + /** + * @param userInfo the userInfo to set + */ + public void setUserInfo(String userInfo) { + this.userInfo = userInfo; + } + + /** + * @return the name + */ + public String getName() { + return name; + } + + /** + * @param name the name to set + */ + public void setName(String name) { + this.name = name; + } + + /** + * @return the uuid + */ + public String getUuid() { + return uuid; + } + + /** + * @param uuid the uuid to set + */ + public void setUuid(String uuid) { + this.uuid = uuid; + } + + /** + * @return the port + */ + public int getPort() { + return port; + } + + /** + * @param port the port to set + */ + public void setPort(int port) { + this.port = port; + } + + /** + * @return the path + */ + public String getPath() { + return path; + } + + /** + * @param path the path to set + */ + public void setPath(String path) { + this.path = path; + } + + /** + * @return the host + */ + public String getHost() { + return host; + } + + /** + * @param host the host to set + */ + public void setHost(String host) { + this.host = host; + } + + /** + * @return the type + */ + public StoragePoolType getType() { + return type; + } + + /** + * @param type the type to set + */ + public void setType(StoragePoolType type) { + this.type = type; + } + + /** + * @return the tags + */ + public String getTags() { + return tags; + } + + /** + * @param tags the tags to set + */ + public void setTags(String tags) { + this.tags = tags; + } + + /** + * @return the details + */ + public Map getDetails() { + return details; + } + + /** + * @param details the details to set + */ + public void setDetails(Map details) { + this.details = details; + } + + /** + * @return the providerName + */ + public String getProviderName() { + return providerName; + } + + /** + * @param providerName the providerName to set + */ + public void setProviderName(String providerName) { + this.providerName = providerName; + } + + /** + * @return the clusterId + */ + public Long getClusterId() { + return clusterId; + } + + /** + * @param clusterId the clusterId to set + */ + public void setClusterId(Long clusterId) { + this.clusterId = clusterId; + } + + /** + * @return the podId + */ + public Long getPodId() { + return podId; + } + + /** + * @param podId the podId to set + */ + public void setPodId(Long podId) { + this.podId = podId; + } + + /** + * @return the zoneId + */ + public Long getZoneId() { + return zoneId; + } + + /** + * @param zoneId the zoneId to set + */ + public void setZoneId(Long zoneId) { + this.zoneId = zoneId; + } +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreProvider.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreProvider.java index b248758bc12..b349ac9ad71 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreProvider.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreProvider.java @@ -14,3 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +package org.apache.cloudstack.engine.subsystem.api.storage; + +public interface PrimaryDataStoreProvider extends DataStoreProvider { +} diff --git a/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java index 579eaefe329..55b2314f0fd 100644 --- a/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java +++ b/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java @@ -80,8 +80,8 @@ public class StoragePoolVO implements StoragePool{ @Enumerated(value = EnumType.STRING) private StoragePoolStatus status; - @Column(name = "storage_provider_id", updatable = true, nullable = false) - private Long storageProviderId; + @Column(name = "storage_provider_name", updatable = true, nullable = false) + private String storageProviderName; @Column(name = "host_address") private String hostAddress; @@ -180,12 +180,12 @@ public class StoragePoolVO implements StoragePool{ return availableBytes; } - public Long getStorageProviderId() { - return storageProviderId; + public String getStorageProviderName() { + return storageProviderName; } - public void setStorageProviderId(Long provider) { - storageProviderId = provider; + public void setStorageProviderName(String providerName) { + storageProviderName = providerName; } public long getCapacityBytes() { diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java index 2771f78e381..bc546f8d0c1 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java @@ -28,7 +28,7 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; -import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageDataStore; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; @@ -57,21 +57,21 @@ public class ImageDataStoreManagerImpl implements ImageDataStoreManager { @Override public ImageDataStore getImageDataStore(long dataStoreId) { ImageDataStoreVO dataStore = dataStoreDao.findById(dataStoreId); - long providerId = dataStore.getProvider(); - ImageDataStoreProvider provider = (ImageDataStoreProvider)providerManager.getDataStoreProviderById(providerId); + String providerName = dataStore.getProviderName(); + ImageDataStoreProvider provider = (ImageDataStoreProvider)providerManager.getDataStoreProvider(providerName); ImageDataStore imgStore = DefaultImageDataStoreImpl.getDataStore(dataStore, - driverMaps.get(provider.getUuid()), provider + driverMaps.get(provider.getName()), provider ); // TODO Auto-generated method stub return imgStore; } @Override - public boolean registerDriver(String uuid, ImageDataStoreDriver driver) { - if (driverMaps.containsKey(uuid)) { + public boolean registerDriver(String providerName, ImageDataStoreDriver driver) { + if (driverMaps.containsKey(providerName)) { return false; } - driverMaps.put(uuid, driver); + driverMaps.put(providerName, driver); return true; } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/AncientImageDataStoreProvider.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/AncientImageDataStoreProvider.java index b2ee9ab853d..2715dc7e0e9 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/AncientImageDataStoreProvider.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/AncientImageDataStoreProvider.java @@ -19,14 +19,18 @@ package org.apache.cloudstack.storage.image.store; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import java.util.UUID; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; -import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreHelper; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; @@ -47,10 +51,9 @@ public class AncientImageDataStoreProvider implements ImageDataStoreProvider { ImageDataStoreManager storeMgr; @Inject ImageDataStoreHelper helper; - long id; - String uuid; + @Override - public DataStoreLifeCycle getLifeCycle() { + public DataStoreLifeCycle getDataStoreLifeCycle() { return lifeCycle; } @@ -59,23 +62,12 @@ public class AncientImageDataStoreProvider implements ImageDataStoreProvider { return this.name; } - @Override - public String getUuid() { - return this.uuid; - } - - @Override - public long getId() { - return this.id; - } - @Override public boolean configure(Map params) { lifeCycle = ComponentContext.inject(DefaultImageDataStoreLifeCycle.class); driver = ComponentContext.inject(AncientImageDataStoreDriverImpl.class); - uuid = (String)params.get("uuid"); - id = (Long)params.get("id"); - storeMgr.registerDriver(uuid, driver); + + storeMgr.registerDriver(this.getName(), driver); Map infos = new HashMap(); String dataStoreName = UUID.nameUUIDFromBytes(this.name.getBytes()).toString(); @@ -83,10 +75,27 @@ public class AncientImageDataStoreProvider implements ImageDataStoreProvider { infos.put("uuid", dataStoreName); infos.put("protocol", "http"); infos.put("scope", ScopeType.GLOBAL); - infos.put("provider", this.getId()); - DataStoreLifeCycle lifeCycle = this.getLifeCycle(); + infos.put("providerName", this.getName()); + DataStoreLifeCycle lifeCycle = this.getDataStoreLifeCycle(); lifeCycle.initialize(infos); return true; } + @Override + public DataStoreDriver getDataStoreDriver() { + return this.driver; + } + + @Override + public HypervisorHostListener getHostListener() { + return null; + } + + @Override + public Set getTypes() { + Set types = new HashSet(); + types.add(DataStoreProviderType.IMAGE); + return types; + } + } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreImpl.java index a6e961a0a83..6eefc6f43f8 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreImpl.java @@ -25,13 +25,13 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageDataStore; import org.apache.cloudstack.storage.image.db.ImageDataStoreVO; diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java index efbb999bdcf..0b5de858819 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java @@ -18,12 +18,16 @@ */ package org.apache.cloudstack.storage.image.store; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; -import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; import org.apache.cloudstack.storage.image.driver.DefaultImageDataStoreDriverImpl; @@ -41,7 +45,7 @@ public class DefaultImageDataStoreProvider implements ImageDataStoreProvider { long id; String uuid; @Override - public DataStoreLifeCycle getLifeCycle() { + public DataStoreLifeCycle getDataStoreLifeCycle() { return lifeCycle; } @@ -50,24 +54,29 @@ public class DefaultImageDataStoreProvider implements ImageDataStoreProvider { return this.name; } - @Override - public String getUuid() { - return this.uuid; - } - - @Override - public long getId() { - return this.id; - } - @Override public boolean configure(Map params) { lifeCycle = ComponentContext.inject(DefaultImageDataStoreLifeCycle.class); driver = ComponentContext.inject(DefaultImageDataStoreDriverImpl.class); - uuid = (String)params.get("uuid"); - id = (Long)params.get("id"); - storeMgr.registerDriver(uuid, driver); + + storeMgr.registerDriver(this.getName(), driver); return true; } + @Override + public Set getTypes() { + Set types = new HashSet(); + types.add(DataStoreProviderType.IMAGE); + return types; + } + + @Override + public DataStoreDriver getDataStoreDriver() { + return this.driver; + } + + @Override + public HypervisorHostListener getHostListener() { + return null; + } } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java index 17aabca3921..ba29c1a14b0 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java @@ -86,27 +86,22 @@ public class DefaultImageDataStoreLifeCycle implements ImageDataStoreLifeCycle { @Override - public boolean maintain(long storeId) { + public boolean maintain(DataStore store) { // TODO Auto-generated method stub return false; } @Override - public boolean cancelMaintain(long storeId) { + public boolean cancelMaintain(DataStore store) { // TODO Auto-generated method stub return false; } @Override - public boolean deleteDataStore(long storeId) { + public boolean deleteDataStore(DataStore store) { // TODO Auto-generated method stub return false; } - - - - - } diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java index 414e2319465..9444fa5246e 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java @@ -133,7 +133,7 @@ public class StorageAllocatorTest { storage.setCapacityBytes(20000); storage.setHostAddress(UUID.randomUUID().toString()); storage.setPath(UUID.randomUUID().toString()); - storage.setStorageProviderId(provider.getId()); + storage.setStorageProviderName(provider.getName()); storage = storagePoolDao.persist(storage); storagePoolId = storage.getId(); @@ -176,7 +176,7 @@ public class StorageAllocatorTest { storage.setCapacityBytes(20000); storage.setHostAddress(UUID.randomUUID().toString()); storage.setPath(UUID.randomUUID().toString()); - storage.setStorageProviderId(provider.getId()); + storage.setStorageProviderName(provider.getName()); StoragePoolVO newStorage = storagePoolDao.persist(storage); newStorageId = newStorage.getId(); diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java index d8d187c0ce5..35a1790a0a9 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java @@ -281,9 +281,9 @@ public class volumeServiceTest extends CloudStackTestNGBase { params.put("port", "1"); params.put("roles", DataStoreRole.Primary.toString()); params.put("uuid", UUID.nameUUIDFromBytes(this.getPrimaryStorageUrl().getBytes()).toString()); - params.put("providerId", String.valueOf(provider.getId())); + params.put("providerName", String.valueOf(provider.getName())); - DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); this.primaryStore = lifeCycle.initialize(params); ClusterScope scope = new ClusterScope(clusterId, podId, dcId); lifeCycle.attachCluster(this.primaryStore, scope); @@ -297,8 +297,8 @@ public class volumeServiceTest extends CloudStackTestNGBase { params.put("uuid", name); params.put("protocol", "http"); params.put("scope", ScopeType.GLOBAL.toString()); - params.put("provider", Long.toString(provider.getId())); - DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + params.put("providerName", name); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); DataStore store = lifeCycle.initialize(params); return store; } @@ -323,9 +323,9 @@ public class volumeServiceTest extends CloudStackTestNGBase { params.put("port", "1"); params.put("roles", DataStoreRole.Primary.toString()); params.put("uuid", UUID.nameUUIDFromBytes(this.getPrimaryStorageUrl().getBytes()).toString()); - params.put("providerId", String.valueOf(provider.getId())); + params.put("providerName", String.valueOf(provider.getName())); - DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); DataStore store = lifeCycle.initialize(params); ClusterScope scope = new ClusterScope(clusterId, podId, dcId); lifeCycle.attachCluster(store, scope); diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java index e70f803ee81..2dc3e255b38 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java @@ -242,16 +242,16 @@ public class PrimaryDataStoreEntityImpl implements StorageEntity { } - @Override - public Long getStorageProviderId() { - // TODO Auto-generated method stub - return null; - } - @Override public boolean isInMaintenance() { // TODO Auto-generated method stub return false; } + @Override + public String getStorageProviderName() { + // TODO Auto-generated method stub + return null; + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java index d1c26e1a272..b3ed0aaab68 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java @@ -26,6 +26,6 @@ public interface PrimaryDataStoreProviderManager { public PrimaryDataStore getPrimaryDataStore(long dataStoreId); public PrimaryDataStore getPrimaryDataStore(String uuid); - boolean registerDriver(String uuid, PrimaryDataStoreDriver driver); - boolean registerHostListener(String uuid, HypervisorHostListener listener); + boolean registerDriver(String providerName, PrimaryDataStoreDriver driver); + boolean registerHostListener(String providerName, HypervisorHostListener listener); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java index 96d2da357f5..91b6c6329bb 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java @@ -18,21 +18,28 @@ */ package org.apache.cloudstack.storage.datastore.provider; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.UUID; +import java.util.Set; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.response.StorageProviderResponse; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider.DataStoreProviderType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao; -import org.apache.cloudstack.storage.datastore.db.DataStoreProviderVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.utils.component.ManagerBase; @Component @@ -44,15 +51,11 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto @Inject DataStoreProviderDao providerDao; protected Map providerMap = new HashMap(); - @Override - public DataStoreProvider getDataStoreProviderByUuid(String uuid) { - return providerMap.get(uuid); - } - + @Inject + PrimaryDataStoreProviderManager primaryDataStoreProviderMgr; @Override public DataStoreProvider getDataStoreProvider(String name) { - DataStoreProviderVO dspv = providerDao.findByName(name); - return providerMap.get(dspv.getUuid()); + return providerMap.get(name); } @Override @@ -60,59 +63,86 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto // TODO Auto-generated method stub return null; } + + public List getPrimayrDataStoreProviders() { + List providers = new ArrayList(); + for (DataStoreProvider provider : providerMap.values()) { + if (provider instanceof PrimaryDataStoreProvider) { + StorageProviderResponse response = new StorageProviderResponse(); + response.setName(provider.getName()); + response.setType(DataStoreProvider.DataStoreProviderType.PRIMARY.toString()); + providers.add(response); + } + } + return providers; + } + + public List getImageDataStoreProviders() { + List providers = new ArrayList(); + for (DataStoreProvider provider : providerMap.values()) { + if (provider instanceof ImageDataStoreProvider) { + StorageProviderResponse response = new StorageProviderResponse(); + response.setName(provider.getName()); + response.setType(DataStoreProvider.DataStoreProviderType.IMAGE.toString()); + providers.add(response); + } + } + return providers; + } @Override public boolean configure(String name, Map params) throws ConfigurationException { Map copyParams = new HashMap(params); - //TODO: hold global lock - List providerVos = providerDao.listAll(); for (DataStoreProvider provider : providers) { - boolean existingProvider = false; - DataStoreProviderVO providerVO = null; - for (DataStoreProviderVO prov : providerVos) { - if (prov.getName().equalsIgnoreCase(provider.getName())) { - existingProvider = true; - providerVO = prov; - break; - } + String providerName = provider.getName(); + if (providerMap.get(providerName) != null) { + s_logger.debug("Failed to register data store provider, provider name: " + providerName + " is not unique"); + return false; } - String uuid = null; - if (!existingProvider) { - uuid = UUID.nameUUIDFromBytes(provider.getName().getBytes()).toString(); - providerVO = new DataStoreProviderVO(); - providerVO.setName(provider.getName()); - providerVO.setUuid(uuid); - providerVO = providerDao.persist(providerVO); - } else { - uuid = providerVO.getUuid(); - } - copyParams.put("uuid", uuid); - copyParams.put("id", providerVO.getId()); - providerMap.put(uuid, provider); + + s_logger.debug("registering data store provider:" + provider.getName()); + + providerMap.put(providerName, provider); try { boolean registrationResult = provider.configure(copyParams); if (!registrationResult) { - providerMap.remove(uuid); + providerMap.remove(providerName); + s_logger.debug("Failed to register data store provider: " + providerName); + return false; + } + + Set types = provider.getTypes(); + if (types.contains(DataStoreProviderType.PRIMARY)) { + primaryDataStoreProviderMgr.registerDriver(provider.getName(), (PrimaryDataStoreDriver)provider.getDataStoreDriver()); + primaryDataStoreProviderMgr.registerHostListener(provider.getName(), provider.getHostListener()); } } catch(Exception e) { s_logger.debug("configure provider failed", e); - providerMap.remove(uuid); + providerMap.remove(providerName); } } return true; } - @Override - public DataStoreProvider getDataStoreProviderById(long id) { - DataStoreProviderVO provider = providerDao.findById(id); - return providerMap.get(provider.getUuid()); - } - @Override public DataStoreProvider getDefaultPrimaryDataStoreProvider() { return this.getDataStoreProvider("ancient primary data store provider"); } + + @Override + public List getDataStoreProviders(String type) { + if (type == null) { + throw new InvalidParameterValueException("Invalid parameter, need to specify type: either primary or image"); + } + if (type.equalsIgnoreCase(DataStoreProvider.DataStoreProviderType.PRIMARY.toString())) { + return this.getPrimayrDataStoreProviders(); + } else if (type.equalsIgnoreCase(DataStoreProvider.DataStoreProviderType.IMAGE.toString())) { + return this.getImageDataStoreProviders(); + } else { + throw new InvalidParameterValueException("Invalid parameter: " + type); + } + } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java deleted file mode 100644 index fdf5958f1ab..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java +++ /dev/null @@ -1,23 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.storage.datastore.provider; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; - - -public interface PrimaryDataStoreProvider extends DataStoreProvider { -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreHelper.java index ba267af6984..3f1632cf13c 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreHelper.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreHelper.java @@ -34,14 +34,14 @@ public class ImageDataStoreHelper { @Inject ImageDataStoreDao imageStoreDao; public ImageDataStoreVO createImageDataStore(Map params) { - ImageDataStoreVO store = imageStoreDao.findByUuid((String)params.get("uuid")); + ImageDataStoreVO store = imageStoreDao.findByName((String)params.get("name")); if (store != null) { return store; } store = new ImageDataStoreVO(); store.setName((String)params.get("name")); store.setProtocol((String)params.get("protocol")); - store.setProvider((Long)params.get("provider")); + store.setProviderName((String)params.get("providerName")); store.setScope((ScopeType)params.get("scope")); store.setUuid((String)params.get("uuid")); store = imageStoreDao.persist(store); diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataStoreVO.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataStoreVO.java index c7b8e2d1228..4cb402a1271 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataStoreVO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataStoreVO.java @@ -45,8 +45,8 @@ public class ImageDataStoreVO { @Column(name = "protocol", nullable = false) private String protocol; - @Column(name = "image_provider_id", nullable = false) - private long provider; + @Column(name = "image_provider_name", nullable = false) + private String providerName; @Column(name = "data_center_id") private long dcId; @@ -64,16 +64,16 @@ public class ImageDataStoreVO { return this.name; } - public long getProvider() { - return this.provider; + public String getProviderName() { + return this.providerName; } public void setName(String name) { this.name = name; } - public void setProvider(long provider) { - this.provider = provider; + public void setProviderName(String provider) { + this.providerName = provider; } public void setProtocol(String protocol) { diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index c6ca90d1641..5f8daf42bb3 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -18,57 +18,181 @@ */ package org.apache.cloudstack.storage.volume.datastore; +import java.util.List; import java.util.Map; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.alert.AlertManager; +import com.cloud.capacity.Capacity; +import com.cloud.capacity.CapacityVO; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.StoragePoolStatus; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; @Component public class PrimaryDataStoreHelper { + private static final Logger s_logger = Logger + .getLogger(PrimaryDataStoreHelper.class); @Inject private PrimaryDataStoreDao dataStoreDao; - public StoragePoolVO createPrimaryDataStore(Map params) { - StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID((String)params.get("uuid")); + @Inject + DataStoreManager dataStoreMgr; + @Inject + StorageManager storageMgr; + @Inject + protected CapacityDao _capacityDao; + @Inject + protected StoragePoolHostDao storagePoolHostDao; + public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) { + StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID(params.getUuid()); if (dataStoreVO != null) { - throw new CloudRuntimeException("duplicate uuid: " + params.get("uuid")); + throw new CloudRuntimeException("duplicate uuid: " + params.getUuid()); } dataStoreVO = new StoragePoolVO(); - dataStoreVO.setStorageProviderId(Long.parseLong((String)params.get("providerId"))); - dataStoreVO.setHostAddress((String)params.get("server")); - dataStoreVO.setPath((String)params.get("path")); - dataStoreVO.setPoolType((StoragePoolType)params.get("protocol")); - dataStoreVO.setPort(Integer.parseInt((String)params.get("port"))); - dataStoreVO.setName((String)params.get("name")); - dataStoreVO.setUuid((String)params.get("uuid")); - dataStoreVO = dataStoreDao.persist(dataStoreVO); - return dataStoreVO; + dataStoreVO.setStorageProviderName(params.getProviderName()); + dataStoreVO.setHostAddress(params.getHost()); + dataStoreVO.setPath(params.getPath()); + dataStoreVO.setPoolType(params.getType()); + dataStoreVO.setPort(params.getPort()); + dataStoreVO.setName(params.getName()); + dataStoreVO.setUuid(params.getUuid()); + dataStoreVO.setDataCenterId(params.getZoneId()); + dataStoreVO.setPodId(params.getPodId()); + dataStoreVO.setClusterId(params.getClusterId()); + dataStoreVO.setStatus(StoragePoolStatus.Initialized); + dataStoreVO.setUserInfo(params.getUserInfo()); + + Map details = params.getDetails(); + String tags = params.getTags(); + if (tags != null) { + String[] tokens = tags.split(","); + + for (String tag : tokens) { + tag = tag.trim(); + if (tag.length() == 0) { + continue; + } + details.put(tag, "true"); + } + } + + dataStoreVO = dataStoreDao.persist(dataStoreVO, details); + + return dataStoreMgr.getDataStore(dataStoreVO.getId(), DataStoreRole.Primary); } - public boolean deletePrimaryDataStore(long id) { - StoragePoolVO dataStoreVO = dataStoreDao.findById(id); - if (dataStoreVO == null) { - throw new CloudRuntimeException("can't find store: " + id); + public DataStore attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { + StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId()); + if (poolHost == null) { + poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath()); + storagePoolHostDao.persist(poolHost); } - dataStoreDao.remove(id); + + StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); + pool.setScope(scope.getScopeType()); + pool.setAvailableBytes(existingInfo.getAvailableBytes()); + pool.setCapacityBytes(existingInfo.getCapacityBytes()); + pool.setStatus(StoragePoolStatus.Up); + this.dataStoreDao.update(pool.getId(), pool); + this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes()); + return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + } + + public DataStore attachCluster(DataStore store) { + StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); + + storageMgr.createCapacityEntry(pool.getId()); + + pool.setScope(ScopeType.CLUSTER); + pool.setStatus(StoragePoolStatus.Up); + this.dataStoreDao.update(pool.getId(), pool); + return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); + } + + public DataStore attachZone(DataStore store) { + StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); + pool.setScope(ScopeType.ZONE); + pool.setStatus(StoragePoolStatus.Up); + this.dataStoreDao.update(pool.getId(), pool); + return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); + } + + public boolean maintain(DataStore store) { + StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); + pool.setStatus(StoragePoolStatus.Maintenance); + this.dataStoreDao.update(pool.getId(), pool); return true; } - public void attachCluster(DataStore dataStore) { - //send down AttachPrimaryDataStoreCmd command to all the hosts in the cluster - AttachPrimaryDataStoreCmd cmd = new AttachPrimaryDataStoreCmd(dataStore.getUri()); - /*for (EndPoint ep : dataStore.getEndPoints()) { - ep.sendMessage(cmd); - } */ + public boolean cancelMaintain(DataStore store) { + StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); + pool.setStatus(StoragePoolStatus.Up); + dataStoreDao.update(store.getId(), pool); + return true; } + + protected boolean deletePoolStats(Long poolId) { + CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId, + CapacityVO.CAPACITY_TYPE_STORAGE); + CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId, + CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED); + if (capacity1 != null) { + _capacityDao.remove(capacity1.getId()); + } + + if (capacity2 != null) { + _capacityDao.remove(capacity2.getId()); + } + + return true; + } + public boolean deletePrimaryDataStore(DataStore store) { + List hostPoolRecords = this.storagePoolHostDao + .listByPoolId(store.getId()); + StoragePoolVO poolVO = this.dataStoreDao.findById(store.getId()); + Transaction txn = Transaction.currentTxn(); + txn.start(); + for (StoragePoolHostVO host : hostPoolRecords) { + storagePoolHostDao.deleteStoragePoolHostDetails( + host.getHostId(), host.getPoolId()); + } + poolVO.setUuid(null); + this.dataStoreDao.update(poolVO.getId(), poolVO); + dataStoreDao.remove(poolVO.getId()); + deletePoolStats(poolVO.getId()); + // Delete op_host_capacity entries + this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, + null, null, null, poolVO.getId()); + txn.commit(); + + s_logger.debug("Storage pool id=" + poolVO.getId() + + " is removed successfully"); + return true; + } + } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java index 7b8741c87c2..31e6908e28f 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java @@ -331,13 +331,13 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { return this.pdsv.getPodId(); } - @Override - public Long getStorageProviderId() { - return this.pdsv.getStorageProviderId(); - } - @Override public boolean isInMaintenance() { return this.getStatus() == StoragePoolStatus.Maintenance ? true : false; } + + @Override + public String getStorageProviderName() { + return this.pdsv.getStorageProviderName(); + } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java deleted file mode 100644 index 6154a666b24..00000000000 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java +++ /dev/null @@ -1,963 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.datastore.lifecycle; - -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus; -import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; -import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; - -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.CreateStoragePoolCommand; -import com.cloud.agent.api.DeleteStoragePoolCommand; -import com.cloud.agent.api.ModifyStoragePoolCommand; -import com.cloud.agent.api.StoragePoolInfo; -import com.cloud.alert.AlertManager; -import com.cloud.capacity.Capacity; -import com.cloud.capacity.CapacityVO; -import com.cloud.capacity.dao.CapacityDao; -import com.cloud.exception.DiscoveryException; -import com.cloud.exception.InvalidParameterValueException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.Status; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.resource.ResourceManager; -import com.cloud.server.ManagementServer; -import com.cloud.storage.OCFS2Manager; -import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.storage.StorageManager; -import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolDiscoverer; -import com.cloud.storage.StoragePoolHostVO; -import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolWorkVO; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.storage.dao.StoragePoolWorkDao; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.user.Account; -import com.cloud.user.User; -import com.cloud.user.UserContext; -import com.cloud.user.dao.UserDao; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.UriUtils; -import com.cloud.utils.db.DB; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.exception.ExecutionException; -import com.cloud.vm.ConsoleProxyVO; -import com.cloud.vm.DomainRouterVO; -import com.cloud.vm.SecondaryStorageVmVO; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.VirtualMachineManager; -import com.cloud.vm.dao.ConsoleProxyDao; -import com.cloud.vm.dao.DomainRouterDao; -import com.cloud.vm.dao.SecondaryStorageVmDao; -import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.dao.VMInstanceDao; - -public class AncientPrimaryDataStoreLifeCycleImpl implements - PrimaryDataStoreLifeCycle { - private static final Logger s_logger = Logger - .getLogger(AncientPrimaryDataStoreLifeCycleImpl.class); - @Inject - protected ResourceManager _resourceMgr; - protected List _discoverers; - @Inject - PrimaryDataStoreDao primaryDataStoreDao; - @Inject - protected OCFS2Manager _ocfs2Mgr; - @Inject - DataStoreManager dataStoreMgr; - @Inject - AgentManager agentMgr; - @Inject - StorageManager storageMgr; - @Inject - protected CapacityDao _capacityDao; - - @Inject - VolumeDao volumeDao; - @Inject - VMInstanceDao vmDao; - @Inject - ManagementServer server; - @Inject - protected VirtualMachineManager vmMgr; - @Inject - protected SecondaryStorageVmDao _secStrgDao; - @Inject - UserVmDao userVmDao; - @Inject - protected UserDao _userDao; - @Inject - protected DomainRouterDao _domrDao; - @Inject - protected StoragePoolHostDao _storagePoolHostDao; - @Inject - protected AlertManager _alertMgr; - @Inject - protected ConsoleProxyDao _consoleProxyDao; - - @Inject - protected StoragePoolWorkDao _storagePoolWorkDao; - - @Override - public DataStore initialize(Map dsInfos) { - Long clusterId = (Long) dsInfos.get("clusterId"); - Long podId = (Long) dsInfos.get("podId"); - Long zoneId = (Long) dsInfos.get("zoneId"); - String url = (String) dsInfos.get("url"); - Long providerId = (Long)dsInfos.get("providerId"); - if (clusterId != null && podId == null) { - throw new InvalidParameterValueException( - "Cluster id requires pod id"); - } - - URI uri = null; - try { - uri = new URI(UriUtils.encodeURIComponent(url)); - if (uri.getScheme() == null) { - throw new InvalidParameterValueException("scheme is null " - + url + ", add nfs:// as a prefix"); - } else if (uri.getScheme().equalsIgnoreCase("nfs")) { - String uriHost = uri.getHost(); - String uriPath = uri.getPath(); - if (uriHost == null || uriPath == null - || uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) { - throw new InvalidParameterValueException( - "host or path is null, should be nfs://hostname/path"); - } - } else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) { - String uriPath = uri.getPath(); - if (uriPath == null) { - throw new InvalidParameterValueException( - "host or path is null, should be sharedmountpoint://localhost/path"); - } - } else if (uri.getScheme().equalsIgnoreCase("rbd")) { - String uriPath = uri.getPath(); - if (uriPath == null) { - throw new InvalidParameterValueException( - "host or path is null, should be rbd://hostname/pool"); - } - } - } catch (URISyntaxException e) { - throw new InvalidParameterValueException(url - + " is not a valid uri"); - } - - String tags = (String) dsInfos.get("tags"); - Map details = (Map) dsInfos - .get("details"); - if (tags != null) { - String[] tokens = tags.split(","); - - for (String tag : tokens) { - tag = tag.trim(); - if (tag.length() == 0) { - continue; - } - details.put(tag, "true"); - } - } - - String scheme = uri.getScheme(); - String storageHost = uri.getHost(); - String hostPath = uri.getPath(); - Object localStorage = dsInfos.get("localStorage"); - if (localStorage != null) { - hostPath = hostPath.replace("/", ""); - } - String userInfo = uri.getUserInfo(); - int port = uri.getPort(); - StoragePoolVO pool = null; - if (s_logger.isDebugEnabled()) { - s_logger.debug("createPool Params @ scheme - " + scheme - + " storageHost - " + storageHost + " hostPath - " - + hostPath + " port - " + port); - } - if (scheme.equalsIgnoreCase("nfs")) { - if (port == -1) { - port = 2049; - } - pool = new StoragePoolVO(StoragePoolType.NetworkFilesystem, - storageHost, port, hostPath); - } else if (scheme.equalsIgnoreCase("file")) { - if (port == -1) { - port = 0; - } - pool = new StoragePoolVO(StoragePoolType.Filesystem, - "localhost", 0, hostPath); - } else if (scheme.equalsIgnoreCase("sharedMountPoint")) { - pool = new StoragePoolVO(StoragePoolType.SharedMountPoint, - storageHost, 0, hostPath); - } else if (scheme.equalsIgnoreCase("clvm")) { - pool = new StoragePoolVO(StoragePoolType.CLVM, storageHost, 0, - hostPath.replaceFirst("/", "")); - } else if (scheme.equalsIgnoreCase("rbd")) { - if (port == -1) { - port = 6789; - } - pool = new StoragePoolVO(StoragePoolType.RBD, storageHost, - port, hostPath.replaceFirst("/", "")); - pool.setUserInfo(userInfo); - } else if (scheme.equalsIgnoreCase("PreSetup")) { - pool = new StoragePoolVO(StoragePoolType.PreSetup, - storageHost, 0, hostPath); - } else if (scheme.equalsIgnoreCase("iscsi")) { - String[] tokens = hostPath.split("/"); - int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1); - if (port == -1) { - port = 3260; - } - if (lun != -1) { - if (clusterId == null) { - throw new IllegalArgumentException( - "IscsiLUN need to have clusters specified"); - } - hostPath.replaceFirst("/", ""); - pool = new StoragePoolVO(StoragePoolType.IscsiLUN, - storageHost, port, hostPath); - } else { - for (StoragePoolDiscoverer discoverer : _discoverers) { - Map> pools; - try { - pools = discoverer.find(zoneId, podId, uri, details); - } catch (DiscoveryException e) { - throw new IllegalArgumentException( - "Not enough information for discovery " + uri, - e); - } - if (pools != null) { - Map.Entry> entry = pools - .entrySet().iterator().next(); - pool = entry.getKey(); - details = entry.getValue(); - break; - } - } - } - } else if (scheme.equalsIgnoreCase("iso")) { - if (port == -1) { - port = 2049; - } - pool = new StoragePoolVO(StoragePoolType.ISO, storageHost, - port, hostPath); - } else if (scheme.equalsIgnoreCase("vmfs")) { - pool = new StoragePoolVO(StoragePoolType.VMFS, - "VMFS datastore: " + hostPath, 0, hostPath); - } else if (scheme.equalsIgnoreCase("ocfs2")) { - port = 7777; - pool = new StoragePoolVO(StoragePoolType.OCFS2, "clustered", - port, hostPath); - } else { - StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme); - - if (type != null) { - pool = new StoragePoolVO(type, storageHost, - 0, hostPath); - } else { - s_logger.warn("Unable to figure out the scheme for URI: " + uri); - throw new IllegalArgumentException( - "Unable to figure out the scheme for URI: " + uri); - } - } - - if (pool == null) { - s_logger.warn("Unable to figure out the scheme for URI: " + uri); - throw new IllegalArgumentException( - "Unable to figure out the scheme for URI: " + uri); - } - - if (localStorage == null) { - List pools = primaryDataStoreDao - .listPoolByHostPath(storageHost, hostPath); - if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) { - Long oldPodId = pools.get(0).getPodId(); - throw new CloudRuntimeException("Storage pool " + uri - + " already in use by another pod (id=" + oldPodId + ")"); - } - } - - long poolId = primaryDataStoreDao.getNextInSequence(Long.class, "id"); - Object existingUuid = dsInfos.get("uuid"); - String uuid = null; - - if (existingUuid != null) { - uuid = (String)existingUuid; - } else if (scheme.equalsIgnoreCase("sharedmountpoint") - || scheme.equalsIgnoreCase("clvm")) { - uuid = UUID.randomUUID().toString(); - } else if (scheme.equalsIgnoreCase("PreSetup")) { - uuid = hostPath.replace("/", ""); - } else { - uuid = UUID.nameUUIDFromBytes( - new String(storageHost + hostPath).getBytes()).toString(); - } - - List spHandles = primaryDataStoreDao - .findIfDuplicatePoolsExistByUUID(uuid); - if ((spHandles != null) && (spHandles.size() > 0)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Another active pool with the same uuid already exists"); - } - throw new CloudRuntimeException( - "Another active pool with the same uuid already exists"); - } - - String poolName = (String) dsInfos.get("name"); - if (s_logger.isDebugEnabled()) { - s_logger.debug("In createPool Setting poolId - " + poolId - + " uuid - " + uuid + " zoneId - " + zoneId + " podId - " - + podId + " poolName - " + poolName); - } - - pool.setId(poolId); - pool.setUuid(uuid); - pool.setDataCenterId(zoneId); - pool.setPodId(podId); - pool.setName(poolName); - pool.setClusterId(clusterId); - pool.setStorageProviderId(providerId); - pool.setStatus(StoragePoolStatus.Initialized); - pool = primaryDataStoreDao.persist(pool, details); - - return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); - } - - protected boolean createStoragePool(long hostId, StoragePool pool) { - s_logger.debug("creating pool " + pool.getName() + " on host " - + hostId); - if (pool.getPoolType() != StoragePoolType.NetworkFilesystem - && pool.getPoolType() != StoragePoolType.Filesystem - && pool.getPoolType() != StoragePoolType.IscsiLUN - && pool.getPoolType() != StoragePoolType.Iscsi - && pool.getPoolType() != StoragePoolType.VMFS - && pool.getPoolType() != StoragePoolType.SharedMountPoint - && pool.getPoolType() != StoragePoolType.PreSetup - && pool.getPoolType() != StoragePoolType.OCFS2 - && pool.getPoolType() != StoragePoolType.RBD - && pool.getPoolType() != StoragePoolType.CLVM) { - s_logger.warn(" Doesn't support storage pool type " - + pool.getPoolType()); - return false; - } - CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); - final Answer answer = agentMgr.easySend(hostId, cmd); - if (answer != null && answer.getResult()) { - return true; - } else { - primaryDataStoreDao.expunge(pool.getId()); - String msg = ""; - if (answer != null) { - msg = "Can not create storage pool through host " + hostId - + " due to " + answer.getDetails(); - s_logger.warn(msg); - } else { - msg = "Can not create storage pool through host " + hostId - + " due to CreateStoragePoolCommand returns null"; - s_logger.warn(msg); - } - throw new CloudRuntimeException(msg); - } - } - - @Override - public boolean attachCluster(DataStore store, ClusterScope scope) { - PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store; - // Check if there is host up in this cluster - List allHosts = _resourceMgr.listAllUpAndEnabledHosts( - Host.Type.Routing, primarystore.getClusterId(), - primarystore.getPodId(), primarystore.getDataCenterId()); - if (allHosts.isEmpty()) { - throw new CloudRuntimeException( - "No host up to associate a storage pool with in cluster " - + primarystore.getClusterId()); - } - - if (primarystore.getPoolType() == StoragePoolType.OCFS2 - && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) { - s_logger.warn("Can not create storage pool " + primarystore - + " on cluster " + primarystore.getClusterId()); - primaryDataStoreDao.expunge(primarystore.getId()); - return false; - } - - boolean success = false; - for (HostVO h : allHosts) { - success = createStoragePool(h.getId(), primarystore); - if (success) { - break; - } - } - - s_logger.debug("In createPool Adding the pool to each of the hosts"); - List poolHosts = new ArrayList(); - for (HostVO h : allHosts) { - try { - this.storageMgr.connectHostToSharedPool(h.getId(), - primarystore.getId()); - poolHosts.add(h); - } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + h - + " and " + primarystore, e); - } - } - - if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool " + primarystore - + " on cluster " + primarystore.getClusterId()); - primaryDataStoreDao.expunge(primarystore.getId()); - return false; - } else { - storageMgr.createCapacityEntry(primarystore.getId()); - } - StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId()); - pool.setScope(ScopeType.CLUSTER); - pool.setStatus(StoragePoolStatus.Up); - this.primaryDataStoreDao.update(pool.getId(), pool); - return true; - } - - @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope) { - List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); - for (HostVO host : hosts) { - try { - this.storageMgr.connectHostToSharedPool(host.getId(), - dataStore.getId()); - } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + host - + " and " + dataStore, e); - } - } - StoragePoolVO pool = this.primaryDataStoreDao.findById(dataStore.getId()); - - pool.setScope(ScopeType.ZONE); - pool.setStatus(StoragePoolStatus.Up); - this.primaryDataStoreDao.update(pool.getId(), pool); - return true; - } - - @Override - public boolean dettach() { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean unmanaged() { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean maintain(long storeId) { - Long userId = UserContext.current().getCallerUserId(); - User user = _userDao.findById(userId); - Account account = UserContext.current().getCaller(); - StoragePoolVO pool = this.primaryDataStoreDao.findById(storeId); - try { - StoragePool storagePool = (StoragePool) this.dataStoreMgr - .getDataStore(storeId, DataStoreRole.Primary); - List hosts = _resourceMgr.listHostsInClusterByStatus( - pool.getClusterId(), Status.Up); - if (hosts == null || hosts.size() == 0) { - pool.setStatus(StoragePoolStatus.Maintenance); - primaryDataStoreDao.update(pool.getId(), pool); - return true; - } else { - // set the pool state to prepare for maintenance - pool.setStatus(StoragePoolStatus.PrepareForMaintenance); - primaryDataStoreDao.update(pool.getId(), pool); - } - // remove heartbeat - for (HostVO host : hosts) { - ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand( - false, storagePool); - final Answer answer = agentMgr.easySend(host.getId(), cmd); - if (answer == null || !answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool false failed due to " - + ((answer == null) ? "answer null" : answer - .getDetails())); - } - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool false secceeded"); - } - } - } - // check to see if other ps exist - // if they do, then we can migrate over the system vms to them - // if they dont, then just stop all vms on this one - List upPools = primaryDataStoreDao - .listByStatusInZone(pool.getDataCenterId(), - StoragePoolStatus.Up); - boolean restart = true; - if (upPools == null || upPools.size() == 0) { - restart = false; - } - - // 2. Get a list of all the ROOT volumes within this storage pool - List allVolumes = this.volumeDao.findByPoolId(pool - .getId()); - - // 3. Enqueue to the work queue - for (VolumeVO volume : allVolumes) { - VMInstanceVO vmInstance = vmDao - .findById(volume.getInstanceId()); - - if (vmInstance == null) { - continue; - } - - // enqueue sp work - if (vmInstance.getState().equals(State.Running) - || vmInstance.getState().equals(State.Starting) - || vmInstance.getState().equals(State.Stopping)) { - - try { - StoragePoolWorkVO work = new StoragePoolWorkVO( - vmInstance.getId(), pool.getId(), false, false, - server.getId()); - _storagePoolWorkDao.persist(work); - } catch (Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Work record already exists, re-using by re-setting values"); - } - StoragePoolWorkVO work = _storagePoolWorkDao - .findByPoolIdAndVmId(pool.getId(), - vmInstance.getId()); - work.setStartedAfterMaintenance(false); - work.setStoppedForMaintenance(false); - work.setManagementServerId(server.getId()); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // 4. Process the queue - List pendingWork = _storagePoolWorkDao - .listPendingWorkForPrepareForMaintenanceByPoolId(pool - .getId()); - - for (StoragePoolWorkVO work : pendingWork) { - // shut down the running vms - VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); - - if (vmInstance == null) { - continue; - } - - // if the instance is of type consoleproxy, call the console - // proxy - if (vmInstance.getType().equals( - VirtualMachine.Type.ConsoleProxy)) { - // call the consoleproxymanager - ConsoleProxyVO consoleProxy = _consoleProxyDao - .findById(vmInstance.getId()); - if (!vmMgr.advanceStop(consoleProxy, true, user, account)) { - String errorMsg = "There was an error stopping the console proxy id: " - + vmInstance.getId() - + " ,cannot enable storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - if (restart) { - - if (this.vmMgr.advanceStart(consoleProxy, null, user, - account) == null) { - String errorMsg = "There was an error starting the console proxy id: " - + vmInstance.getId() - + " on another storage pool, cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - } else { - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // if the instance is of type uservm, call the user vm manager - if (vmInstance.getType().equals(VirtualMachine.Type.User)) { - UserVmVO userVm = userVmDao.findById(vmInstance.getId()); - if (!vmMgr.advanceStop(userVm, true, user, account)) { - String errorMsg = "There was an error stopping the user vm id: " - + vmInstance.getId() - + " ,cannot enable storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type secondary storage vm, call the - // secondary storage vm manager - if (vmInstance.getType().equals( - VirtualMachine.Type.SecondaryStorageVm)) { - SecondaryStorageVmVO secStrgVm = _secStrgDao - .findById(vmInstance.getId()); - if (!vmMgr.advanceStop(secStrgVm, true, user, account)) { - String errorMsg = "There was an error stopping the ssvm id: " - + vmInstance.getId() - + " ,cannot enable storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - if (restart) { - if (vmMgr.advanceStart(secStrgVm, null, user, account) == null) { - String errorMsg = "There was an error starting the ssvm id: " - + vmInstance.getId() - + " on another storage pool, cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - } else { - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // if the instance is of type domain router vm, call the network - // manager - if (vmInstance.getType().equals( - VirtualMachine.Type.DomainRouter)) { - DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); - if (!vmMgr.advanceStop(domR, true, user, account)) { - String errorMsg = "There was an error stopping the domain router id: " - + vmInstance.getId() - + " ,cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - if (restart) { - if (vmMgr.advanceStart(domR, null, user, account) == null) { - String errorMsg = "There was an error starting the domain router id: " - + vmInstance.getId() - + " on another storage pool, cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - } else { - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - } - - // 5. Update the status - pool.setStatus(StoragePoolStatus.Maintenance); - this.primaryDataStoreDao.update(pool.getId(), pool); - - return true; - } catch (Exception e) { - s_logger.error( - "Exception in enabling primary storage maintenance:", e); - setPoolStateToError(pool); - throw new CloudRuntimeException(e.getMessage()); - } - } - - private void setPoolStateToError(StoragePoolVO primaryStorage) { - primaryStorage.setStatus(StoragePoolStatus.ErrorInMaintenance); - this.primaryDataStoreDao.update(primaryStorage.getId(), primaryStorage); - } - - @Override - public boolean cancelMaintain(long storageId) { - // Change the storage state back to up - Long userId = UserContext.current().getCallerUserId(); - User user = _userDao.findById(userId); - Account account = UserContext.current().getCaller(); - StoragePoolVO poolVO = this.primaryDataStoreDao - .findById(storageId); - StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore( - storageId, DataStoreRole.Primary); - poolVO.setStatus(StoragePoolStatus.Up); - primaryDataStoreDao.update(storageId, poolVO); - - List hosts = _resourceMgr.listHostsInClusterByStatus( - pool.getClusterId(), Status.Up); - if (hosts == null || hosts.size() == 0) { - return true; - } - // add heartbeat - for (HostVO host : hosts) { - ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand( - true, pool); - final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd); - if (answer == null || !answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool add failed due to " - + ((answer == null) ? "answer null" : answer - .getDetails())); - } - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool add secceeded"); - } - } - } - - // 2. Get a list of pending work for this queue - List pendingWork = _storagePoolWorkDao - .listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId()); - - // 3. work through the queue - for (StoragePoolWorkVO work : pendingWork) { - try { - VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); - - if (vmInstance == null) { - continue; - } - - // if the instance is of type consoleproxy, call the console - // proxy - if (vmInstance.getType().equals( - VirtualMachine.Type.ConsoleProxy)) { - - ConsoleProxyVO consoleProxy = _consoleProxyDao - .findById(vmInstance.getId()); - if (vmMgr.advanceStart(consoleProxy, null, user, account) == null) { - String msg = "There was an error starting the console proxy id: " - + vmInstance.getId() - + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type ssvm, call the ssvm manager - if (vmInstance.getType().equals( - VirtualMachine.Type.SecondaryStorageVm)) { - SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance - .getId()); - if (vmMgr.advanceStart(ssVm, null, user, account) == null) { - String msg = "There was an error starting the ssvm id: " - + vmInstance.getId() - + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type ssvm, call the ssvm manager - if (vmInstance.getType().equals( - VirtualMachine.Type.DomainRouter)) { - DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); - if (vmMgr.advanceStart(domR, null, user, account) == null) { - String msg = "There was an error starting the domR id: " - + vmInstance.getId() - + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type user vm, call the user vm manager - if (vmInstance.getType().equals(VirtualMachine.Type.User)) { - UserVmVO userVm = userVmDao.findById(vmInstance.getId()); - - if (vmMgr.advanceStart(userVm, null, user, account) == null) { - - String msg = "There was an error starting the user vm id: " - + vmInstance.getId() - + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } catch (Exception e) { - s_logger.debug("Failed start vm", e); - throw new CloudRuntimeException(e.toString()); - } - } - return true; - } - - @DB - @Override - public boolean deleteDataStore(long storeId) { - // for the given pool id, find all records in the storage_pool_host_ref - List hostPoolRecords = this._storagePoolHostDao - .listByPoolId(storeId); - StoragePoolVO poolVO = this.primaryDataStoreDao.findById(storeId); - StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(storeId, DataStoreRole.Primary); - boolean deleteFlag = false; - Transaction txn = Transaction.currentTxn(); - try { - // if not records exist, delete the given pool (base case) - if (hostPoolRecords.size() == 0) { - - txn.start(); - poolVO.setUuid(null); - this.primaryDataStoreDao.update(poolVO.getId(), poolVO); - primaryDataStoreDao.remove(poolVO.getId()); - deletePoolStats(poolVO.getId()); - txn.commit(); - - deleteFlag = true; - return true; - } else { - // Remove the SR associated with the Xenserver - for (StoragePoolHostVO host : hostPoolRecords) { - DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand( - pool); - final Answer answer = agentMgr.easySend(host.getHostId(), - deleteCmd); - - if (answer != null && answer.getResult()) { - deleteFlag = true; - break; - } - } - } - } finally { - if (deleteFlag) { - // now delete the storage_pool_host_ref and storage_pool records - txn.start(); - for (StoragePoolHostVO host : hostPoolRecords) { - _storagePoolHostDao.deleteStoragePoolHostDetails( - host.getHostId(), host.getPoolId()); - } - poolVO.setUuid(null); - this.primaryDataStoreDao.update(poolVO.getId(), poolVO); - primaryDataStoreDao.remove(poolVO.getId()); - deletePoolStats(poolVO.getId()); - // Delete op_host_capacity entries - this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, - null, null, null, poolVO.getId()); - txn.commit(); - - s_logger.debug("Storage pool id=" + poolVO.getId() - + " is removed successfully"); - return true; - } else { - // alert that the storage cleanup is required - s_logger.warn("Failed to Delete storage pool id: " + poolVO.getId()); - _alertMgr - .sendAlert(AlertManager.ALERT_TYPE_STORAGE_DELETE, - poolVO.getDataCenterId(), poolVO.getPodId(), - "Unable to delete storage pool id= " + poolVO.getId(), - "Delete storage pool command failed. Please check logs."); - } - } - return false; - } - - @DB - private boolean deletePoolStats(Long poolId) { - CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId, - CapacityVO.CAPACITY_TYPE_STORAGE); - CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId, - CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED); - Transaction txn = Transaction.currentTxn(); - txn.start(); - if (capacity1 != null) { - _capacityDao.remove(capacity1.getId()); - } - - if (capacity2 != null) { - _capacityDao.remove(capacity2.getId()); - } - - txn.commit(); - return true; - } - - @Override - public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { - StoragePoolHostVO poolHost = _storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId()); - if (poolHost == null) { - poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath()); - _storagePoolHostDao.persist(poolHost); - } - - StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId()); - pool.setScope(scope.getScopeType()); - pool.setAvailableBytes(existingInfo.getAvailableBytes()); - pool.setCapacityBytes(existingInfo.getCapacityBytes()); - pool.setStatus(StoragePoolStatus.Up); - this.primaryDataStoreDao.update(pool.getId(), pool); - this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes()); - - return true; - } - -} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java index 5e8727a316a..fea02e8d1ed 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java @@ -60,8 +60,8 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif @Override public DataStore initialize(Map dsInfos) { - StoragePoolVO storeVO = primaryStoreHelper.createPrimaryDataStore(dsInfos); - return providerMgr.getPrimaryDataStore(storeVO.getId()); + DataStore store = primaryStoreHelper.createPrimaryDataStore(null); + return providerMgr.getPrimaryDataStore(store.getId()); } protected void attachCluster(DataStore store) { @@ -113,26 +113,6 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif return false; } - @Override - public boolean maintain(long storeId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean cancelMaintain(long storeId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean deleteDataStore(long storeId) { - // TODO Auto-generated method stub - return false; - } - - - @Override public boolean attachZone(DataStore dataStore, ZoneScope scope) { // TODO Auto-generated method stub @@ -146,4 +126,22 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif return false; } + @Override + public boolean maintain(DataStore store) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean cancelMaintain(DataStore store) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean deleteDataStore(DataStore store) { + // TODO Auto-generated method stub + return false; + } + } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java index e181adabb5b..e38c3b306fa 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java @@ -57,18 +57,18 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt @Override public PrimaryDataStore getPrimaryDataStore(long dataStoreId) { StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStoreId); - long providerId = dataStoreVO.getStorageProviderId(); - DataStoreProvider provider = providerManager.getDataStoreProviderById(providerId); - DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, driverMaps.get(provider.getUuid()), provider); + String providerName = dataStoreVO.getStorageProviderName(); + DataStoreProvider provider = providerManager.getDataStoreProvider(providerName); + DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, driverMaps.get(provider.getName()), provider); return dataStore; } @Override - public boolean registerDriver(String uuid, PrimaryDataStoreDriver driver) { - if (driverMaps.get(uuid) != null) { + public boolean registerDriver(String providerName, PrimaryDataStoreDriver driver) { + if (driverMaps.get(providerName) != null) { return false; } - driverMaps.put(uuid, driver); + driverMaps.put(providerName, driver); return true; } @@ -79,7 +79,7 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt } @Override - public boolean registerHostListener(String uuid, HypervisorHostListener listener) { - return storageMgr.registerHostListener(uuid, listener); + public boolean registerHostListener(String providerName, HypervisorHostListener listener) { + return storageMgr.registerHostListener(providerName, listener); } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java index a1402c13b3d..46fa738e294 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java @@ -16,24 +16,29 @@ // under the License. package org.apache.cloudstack.storage.datastore.provider; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider.DataStoreProviderType; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl; import org.apache.cloudstack.storage.datastore.lifecycle.DefaultPrimaryDataStoreLifeCycleImpl; -import org.springframework.stereotype.Component; import com.cloud.utils.component.ComponentContext; -@Component + public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider { private final String providerName = "default primary data store provider"; protected PrimaryDataStoreDriver driver; + protected HypervisorHostListener listener; @Inject PrimaryDataStoreProviderManager storeMgr; @@ -46,7 +51,7 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv } @Override - public DataStoreLifeCycle getLifeCycle() { + public DataStoreLifeCycle getDataStoreLifeCycle() { return this.lifecyle; } @@ -54,22 +59,25 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv public boolean configure(Map params) { lifecyle = ComponentContext.inject(DefaultPrimaryDataStoreLifeCycleImpl.class); driver = ComponentContext.inject(DefaultPrimaryDataStoreDriverImpl.class); - HypervisorHostListener listener = ComponentContext.inject(DefaultHostListener.class); - uuid = (String)params.get("uuid"); - id = (Long)params.get("id"); - storeMgr.registerDriver(uuid, this.driver); - storeMgr.registerHostListener(uuid, listener); + listener = ComponentContext.inject(DefaultHostListener.class); return true; } @Override - public String getUuid() { - return this.uuid; + public PrimaryDataStoreDriver getDataStoreDriver() { + return this.driver; } @Override - public long getId() { - return this.id; + public HypervisorHostListener getHostListener() { + return this.listener; + } + + @Override + public Set getTypes() { + Set types = new HashSet(); + types.add(DataStoreProviderType.PRIMARY); + return types; } } diff --git a/engine/storage/volume/test/org/apache/cloudstack/storage/volume/test/ConfiguratorTest.java b/engine/storage/volume/test/org/apache/cloudstack/storage/volume/test/ConfiguratorTest.java index 829694bd753..122c3532a09 100644 --- a/engine/storage/volume/test/org/apache/cloudstack/storage/volume/test/ConfiguratorTest.java +++ b/engine/storage/volume/test/org/apache/cloudstack/storage/volume/test/ConfiguratorTest.java @@ -28,7 +28,7 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; diff --git a/plugins/pom.xml b/plugins/pom.xml index 5d31a72ee91..39d99073f09 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -57,6 +57,7 @@ network-elements/dns-notifier storage/image/s3 storage/volume/solidfire + storage/volume/default alert-handlers/snmp-alerts diff --git a/plugins/storage/volume/default/pom.xml b/plugins/storage/volume/default/pom.xml new file mode 100644 index 00000000000..1eb2e12a816 --- /dev/null +++ b/plugins/storage/volume/default/pom.xml @@ -0,0 +1,56 @@ + + + 4.0.0 + cloud-plugin-storage-volume-default + Apache CloudStack Plugin - Storage Volume default provider + + org.apache.cloudstack + cloudstack-plugins + 4.2.0-SNAPSHOT + ../../../pom.xml + + + + org.apache.cloudstack + cloud-engine-storage-volume + ${project.version} + + + mysql + mysql-connector-java + ${cs.mysql.version} + provided + + + + install + src + test + + + maven-surefire-plugin + + true + + + + integration-test + + test + + + + + + + diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java similarity index 98% rename from engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java rename to plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java index 440cb8c5ea0..04869020468 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java @@ -71,9 +71,9 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; import com.cloud.vm.dao.VMInstanceDao; -public class AncientPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { +public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { private static final Logger s_logger = Logger - .getLogger(AncientPrimaryDataStoreDriverImpl.class); + .getLogger(CloudStackPrimaryDataStoreDriverImpl.class); @Inject DiskOfferingDao diskOfferingDao; @Inject VMTemplateDao templateDao; @Inject VolumeDao volumeDao; diff --git a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java new file mode 100644 index 00000000000..2991574d021 --- /dev/null +++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -0,0 +1,542 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.lifecycle; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CreateStoragePoolCommand; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.alert.AlertManager; +import com.cloud.capacity.Capacity; +import com.cloud.capacity.CapacityVO; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.exception.DiscoveryException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.resource.ResourceManager; +import com.cloud.server.ManagementServer; +import com.cloud.storage.OCFS2Manager; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolAutomation; +import com.cloud.storage.StoragePoolDiscoverer; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.StoragePoolStatus; +import com.cloud.storage.StoragePoolWorkVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.StoragePoolWorkDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.User; +import com.cloud.user.UserContext; +import com.cloud.user.dao.UserDao; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.UriUtils; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExecutionException; +import com.cloud.vm.ConsoleProxyVO; +import com.cloud.vm.DomainRouterVO; +import com.cloud.vm.SecondaryStorageVmVO; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.dao.ConsoleProxyDao; +import com.cloud.vm.dao.DomainRouterDao; +import com.cloud.vm.dao.SecondaryStorageVmDao; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +public class CloudStackPrimaryDataStoreLifeCycleImpl implements + PrimaryDataStoreLifeCycle { + private static final Logger s_logger = Logger + .getLogger(CloudStackPrimaryDataStoreLifeCycleImpl.class); + @Inject + protected ResourceManager _resourceMgr; + protected List _discoverers; + @Inject + PrimaryDataStoreDao primaryDataStoreDao; + @Inject + protected OCFS2Manager _ocfs2Mgr; + @Inject + DataStoreManager dataStoreMgr; + @Inject + AgentManager agentMgr; + @Inject + StorageManager storageMgr; + + + @Inject + VolumeDao volumeDao; + @Inject + VMInstanceDao vmDao; + @Inject + ManagementServer server; + @Inject + protected VirtualMachineManager vmMgr; + @Inject + protected SecondaryStorageVmDao _secStrgDao; + @Inject + UserVmDao userVmDao; + @Inject + protected UserDao _userDao; + @Inject + protected DomainRouterDao _domrDao; + @Inject + protected StoragePoolHostDao _storagePoolHostDao; + @Inject + protected AlertManager _alertMgr; + @Inject + protected ConsoleProxyDao _consoleProxyDao; + + @Inject + protected StoragePoolWorkDao _storagePoolWorkDao; + @Inject + PrimaryDataStoreHelper dataStoreHelper; + @Inject + StoragePoolAutomation storagePoolAutmation; + + @Override + public DataStore initialize(Map dsInfos) { + Long clusterId = (Long) dsInfos.get("clusterId"); + Long podId = (Long) dsInfos.get("podId"); + Long zoneId = (Long) dsInfos.get("zoneId"); + String url = (String) dsInfos.get("url"); + String providerName = (String)dsInfos.get("providerName"); + if (clusterId != null && podId == null) { + throw new InvalidParameterValueException( + "Cluster id requires pod id"); + } + + PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); + + URI uri = null; + try { + uri = new URI(UriUtils.encodeURIComponent(url)); + if (uri.getScheme() == null) { + throw new InvalidParameterValueException("scheme is null " + + url + ", add nfs:// as a prefix"); + } else if (uri.getScheme().equalsIgnoreCase("nfs")) { + String uriHost = uri.getHost(); + String uriPath = uri.getPath(); + if (uriHost == null || uriPath == null + || uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) { + throw new InvalidParameterValueException( + "host or path is null, should be nfs://hostname/path"); + } + } else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) { + String uriPath = uri.getPath(); + if (uriPath == null) { + throw new InvalidParameterValueException( + "host or path is null, should be sharedmountpoint://localhost/path"); + } + } else if (uri.getScheme().equalsIgnoreCase("rbd")) { + String uriPath = uri.getPath(); + if (uriPath == null) { + throw new InvalidParameterValueException( + "host or path is null, should be rbd://hostname/pool"); + } + } + } catch (URISyntaxException e) { + throw new InvalidParameterValueException(url + + " is not a valid uri"); + } + + String tags = (String) dsInfos.get("tags"); + Map details = (Map) dsInfos + .get("details"); + + parameters.setTags(tags); + parameters.setDetails(details); + + String scheme = uri.getScheme(); + String storageHost = uri.getHost(); + String hostPath = uri.getPath(); + Object localStorage = dsInfos.get("localStorage"); + if (localStorage != null) { + hostPath = hostPath.replace("/", ""); + } + String userInfo = uri.getUserInfo(); + int port = uri.getPort(); + StoragePoolVO pool = null; + if (s_logger.isDebugEnabled()) { + s_logger.debug("createPool Params @ scheme - " + scheme + + " storageHost - " + storageHost + " hostPath - " + + hostPath + " port - " + port); + } + if (scheme.equalsIgnoreCase("nfs")) { + if (port == -1) { + port = 2049; + } + parameters.setType(StoragePoolType.NetworkFilesystem); + parameters.setHost(storageHost); + parameters.setPort(port); + parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("file")) { + if (port == -1) { + port = 0; + } + parameters.setType(StoragePoolType.Filesystem); + parameters.setHost("localhost"); + parameters.setPort(0); + parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("sharedMountPoint")) { + parameters.setType(StoragePoolType.SharedMountPoint); + parameters.setHost(storageHost); + parameters.setPort(0); + parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("clvm")) { + parameters.setType(StoragePoolType.CLVM); + parameters.setHost(storageHost); + parameters.setPort(0); + parameters.setPath(hostPath.replaceFirst("/", "")); + } else if (scheme.equalsIgnoreCase("rbd")) { + if (port == -1) { + port = 6789; + } + parameters.setType(StoragePoolType.RBD); + parameters.setHost(storageHost); + parameters.setPort(port); + parameters.setPath(hostPath.replaceFirst("/", "")); + parameters.setUserInfo(userInfo); + } else if (scheme.equalsIgnoreCase("PreSetup")) { + parameters.setType(StoragePoolType.PreSetup); + parameters.setHost(storageHost); + parameters.setPort(0); + parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("iscsi")) { + String[] tokens = hostPath.split("/"); + int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1); + if (port == -1) { + port = 3260; + } + if (lun != -1) { + if (clusterId == null) { + throw new IllegalArgumentException( + "IscsiLUN need to have clusters specified"); + } + hostPath.replaceFirst("/", ""); + parameters.setType(StoragePoolType.IscsiLUN); + parameters.setHost(storageHost); + parameters.setPort(port); + parameters.setPath(hostPath); + } else { + for (StoragePoolDiscoverer discoverer : _discoverers) { + Map> pools; + try { + pools = discoverer.find(zoneId, podId, uri, details); + } catch (DiscoveryException e) { + throw new IllegalArgumentException( + "Not enough information for discovery " + uri, + e); + } + if (pools != null) { + Map.Entry> entry = pools + .entrySet().iterator().next(); + pool = entry.getKey(); + details = entry.getValue(); + break; + } + } + } + } else if (scheme.equalsIgnoreCase("iso")) { + if (port == -1) { + port = 2049; + } + parameters.setType(StoragePoolType.ISO); + parameters.setHost(storageHost); + parameters.setPort(port); + parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("vmfs")) { + parameters.setType(StoragePoolType.VMFS); + parameters.setHost("VMFS datastore: " + hostPath); + parameters.setPort(0); + parameters.setPath(hostPath); + } else if (scheme.equalsIgnoreCase("ocfs2")) { + port = 7777; + parameters.setType(StoragePoolType.OCFS2); + parameters.setHost("clustered"); + parameters.setPort(port); + parameters.setPath(hostPath); + } else { + StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme); + + if (type != null) { + parameters.setType(type); + parameters.setHost(storageHost); + parameters.setPort(0); + parameters.setPath(hostPath); + } else { + s_logger.warn("Unable to figure out the scheme for URI: " + uri); + throw new IllegalArgumentException( + "Unable to figure out the scheme for URI: " + uri); + } + } + + if (localStorage == null) { + List pools = primaryDataStoreDao + .listPoolByHostPath(storageHost, hostPath); + if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) { + Long oldPodId = pools.get(0).getPodId(); + throw new CloudRuntimeException("Storage pool " + uri + + " already in use by another pod (id=" + oldPodId + ")"); + } + } + + Object existingUuid = dsInfos.get("uuid"); + String uuid = null; + + if (existingUuid != null) { + uuid = (String)existingUuid; + } else if (scheme.equalsIgnoreCase("sharedmountpoint") + || scheme.equalsIgnoreCase("clvm")) { + uuid = UUID.randomUUID().toString(); + } else if (scheme.equalsIgnoreCase("PreSetup")) { + uuid = hostPath.replace("/", ""); + } else { + uuid = UUID.nameUUIDFromBytes( + new String(storageHost + hostPath).getBytes()).toString(); + } + + List spHandles = primaryDataStoreDao + .findIfDuplicatePoolsExistByUUID(uuid); + if ((spHandles != null) && (spHandles.size() > 0)) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Another active pool with the same uuid already exists"); + } + throw new CloudRuntimeException( + "Another active pool with the same uuid already exists"); + } + + String poolName = (String) dsInfos.get("name"); + + parameters.setUuid(uuid); + parameters.setZoneId(zoneId); + parameters.setPodId(podId); + parameters.setName(poolName); + parameters.setClusterId(clusterId); + parameters.setProviderName(providerName); + + return dataStoreHelper.createPrimaryDataStore(parameters); + } + + protected boolean createStoragePool(long hostId, StoragePool pool) { + s_logger.debug("creating pool " + pool.getName() + " on host " + + hostId); + if (pool.getPoolType() != StoragePoolType.NetworkFilesystem + && pool.getPoolType() != StoragePoolType.Filesystem + && pool.getPoolType() != StoragePoolType.IscsiLUN + && pool.getPoolType() != StoragePoolType.Iscsi + && pool.getPoolType() != StoragePoolType.VMFS + && pool.getPoolType() != StoragePoolType.SharedMountPoint + && pool.getPoolType() != StoragePoolType.PreSetup + && pool.getPoolType() != StoragePoolType.OCFS2 + && pool.getPoolType() != StoragePoolType.RBD + && pool.getPoolType() != StoragePoolType.CLVM) { + s_logger.warn(" Doesn't support storage pool type " + + pool.getPoolType()); + return false; + } + CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); + final Answer answer = agentMgr.easySend(hostId, cmd); + if (answer != null && answer.getResult()) { + return true; + } else { + primaryDataStoreDao.expunge(pool.getId()); + String msg = ""; + if (answer != null) { + msg = "Can not create storage pool through host " + hostId + + " due to " + answer.getDetails(); + s_logger.warn(msg); + } else { + msg = "Can not create storage pool through host " + hostId + + " due to CreateStoragePoolCommand returns null"; + s_logger.warn(msg); + } + throw new CloudRuntimeException(msg); + } + } + + @Override + public boolean attachCluster(DataStore store, ClusterScope scope) { + PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store; + // Check if there is host up in this cluster + List allHosts = _resourceMgr.listAllUpAndEnabledHosts( + Host.Type.Routing, primarystore.getClusterId(), + primarystore.getPodId(), primarystore.getDataCenterId()); + if (allHosts.isEmpty()) { + throw new CloudRuntimeException( + "No host up to associate a storage pool with in cluster " + + primarystore.getClusterId()); + } + + if (primarystore.getPoolType() == StoragePoolType.OCFS2 + && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) { + s_logger.warn("Can not create storage pool " + primarystore + + " on cluster " + primarystore.getClusterId()); + primaryDataStoreDao.expunge(primarystore.getId()); + return false; + } + + boolean success = false; + for (HostVO h : allHosts) { + success = createStoragePool(h.getId(), primarystore); + if (success) { + break; + } + } + + s_logger.debug("In createPool Adding the pool to each of the hosts"); + List poolHosts = new ArrayList(); + for (HostVO h : allHosts) { + try { + this.storageMgr.connectHostToSharedPool(h.getId(), + primarystore.getId()); + poolHosts.add(h); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + h + + " and " + primarystore, e); + } + } + + if (poolHosts.isEmpty()) { + s_logger.warn("No host can access storage pool " + primarystore + + " on cluster " + primarystore.getClusterId()); + primaryDataStoreDao.expunge(primarystore.getId()); + return false; + } + + this.dataStoreHelper.attachCluster(store); + return true; + } + + @Override + public boolean attachZone(DataStore dataStore, ZoneScope scope) { + List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); + for (HostVO host : hosts) { + try { + this.storageMgr.connectHostToSharedPool(host.getId(), + dataStore.getId()); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + host + + " and " + dataStore, e); + } + } + this.dataStoreHelper.attachZone(dataStore); + return true; + } + + @Override + public boolean dettach() { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean unmanaged() { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean maintain(DataStore dataStore) { + storagePoolAutmation.maintain(dataStore); + this.dataStoreHelper.maintain(dataStore); + return true; + } + + @Override + public boolean cancelMaintain(DataStore store) { + this.dataStoreHelper.cancelMaintain(store); + storagePoolAutmation.cancelMaintain(store); + return true; + } + + @DB + @Override + public boolean deleteDataStore(DataStore store) { + List hostPoolRecords = this._storagePoolHostDao + .listByPoolId(store.getId()); + StoragePool pool = (StoragePool)store; + boolean deleteFlag = false; + // Remove the SR associated with the Xenserver + for (StoragePoolHostVO host : hostPoolRecords) { + DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand( + pool); + final Answer answer = agentMgr.easySend(host.getHostId(), + deleteCmd); + + if (answer != null && answer.getResult()) { + deleteFlag = true; + break; + } else { + if (answer != null) { + s_logger.debug("Failed to delete storage pool: " + answer.getResult()); + } + } + } + + if (!deleteFlag) { + throw new CloudRuntimeException("Failed to delete storage pool on host"); + } + + this.dataStoreHelper.deletePrimaryDataStore(store); + return false; + } + + @Override + public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { + this.dataStoreHelper.attachHost(store, scope, existingInfo); + return true; + } +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/provider/CloudStackPrimaryDataStoreProviderImpl.java similarity index 58% rename from engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java rename to plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/provider/CloudStackPrimaryDataStoreProviderImpl.java index 09e78e45659..4d46d99fab3 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java +++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/provider/CloudStackPrimaryDataStoreProviderImpl.java @@ -18,61 +18,63 @@ */ package org.apache.cloudstack.storage.datastore.provider; +import java.util.HashSet; import java.util.Map; - -import javax.inject.Inject; +import java.util.Set; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; -import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; -import org.apache.cloudstack.storage.datastore.driver.AncientPrimaryDataStoreDriverImpl; -import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCycleImpl; -import org.springframework.stereotype.Component; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.storage.datastore.driver.CloudStackPrimaryDataStoreDriverImpl; +import org.apache.cloudstack.storage.datastore.lifecycle.CloudStackPrimaryDataStoreLifeCycleImpl; import com.cloud.utils.component.ComponentContext; -@Component -public class AncientPrimaryDataStoreProviderImpl implements +public class CloudStackPrimaryDataStoreProviderImpl implements PrimaryDataStoreProvider { private final String providerName = "ancient primary data store provider"; protected PrimaryDataStoreDriver driver; - @Inject - PrimaryDataStoreProviderManager storeMgr; + protected HypervisorHostListener listener; protected DataStoreLifeCycle lifecyle; - protected String uuid; - protected long id; + + CloudStackPrimaryDataStoreProviderImpl() { + + } + @Override public String getName() { return providerName; } @Override - public DataStoreLifeCycle getLifeCycle() { + public DataStoreLifeCycle getDataStoreLifeCycle() { return this.lifecyle; } @Override public boolean configure(Map params) { - lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCycleImpl.class); - driver = ComponentContext.inject(AncientPrimaryDataStoreDriverImpl.class); - uuid = (String)params.get("uuid"); - id = (Long)params.get("id"); - storeMgr.registerDriver(uuid, this.driver); - HypervisorHostListener listener = ComponentContext.inject(DefaultHostListener.class); - storeMgr.registerHostListener(uuid, listener); + lifecyle = ComponentContext.inject(CloudStackPrimaryDataStoreLifeCycleImpl.class); + driver = ComponentContext.inject(CloudStackPrimaryDataStoreDriverImpl.class); + listener = ComponentContext.inject(DefaultHostListener.class); return true; } @Override - public String getUuid() { - return this.uuid; + public PrimaryDataStoreDriver getDataStoreDriver() { + return this.driver; } @Override - public long getId() { - return this.id; + public HypervisorHostListener getHostListener() { + return this.listener; + } + + @Override + public Set getTypes() { + Set types = new HashSet(); + types.add(DataStoreProviderType.PRIMARY); + return types; } - } diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java index 91c446fe5ae..dc29fb835d2 100644 --- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java +++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java @@ -26,10 +26,10 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProvider; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 191157a4db8..efdee207f4d 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -2242,6 +2242,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(DeleteAlertsCmd.class); cmdList.add(ArchiveEventsCmd.class); cmdList.add(DeleteEventsCmd.class); + cmdList.add(ListStorageProvidersCmd.class); return cmdList; } diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index b0a1da14eb8..f37654bb317 100755 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -712,7 +712,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } DataStoreProvider provider = this.dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider(); - DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); if (pool == null) { Map params = new HashMap(); String name = (host.getName() + " Local Storage"); @@ -724,7 +724,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C params.put("localStorage", true); params.put("details", pInfo.getDetails()); params.put("uuid", pInfo.getUuid()); - params.put("providerId", provider.getId()); + params.put("providerName", provider.getName()); store = lifeCycle.initialize(params); } else { @@ -748,15 +748,15 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException { - String providerUuid = cmd.getStorageProviderUuid(); + String providerName = cmd.getStorageProviderName(); DataStoreProvider storeProvider = dataStoreProviderMgr - .getDataStoreProviderByUuid(providerUuid); + .getDataStoreProvider(providerName); if (storeProvider == null) { storeProvider = dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider(); if (storeProvider == null) { throw new InvalidParameterValueException( - "can't find storage provider: " + providerUuid); + "can't find storage provider: " + providerName); } } @@ -821,9 +821,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C params.put("tags", cmd.getTags()); params.put("name", cmd.getStoragePoolName()); params.put("details", details); - params.put("providerId", storeProvider.getId()); + params.put("providerName", storeProvider.getName()); - DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle(); + DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle(); DataStore store = null; try { store = lifeCycle.initialize(params); @@ -948,9 +948,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C s_logger.trace("Released lock for storage pool " + id); DataStoreProvider storeProvider = dataStoreProviderMgr - .getDataStoreProviderById(sPool.getStorageProviderId()); - DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle(); - lifeCycle.deleteDataStore(id); + .getDataStoreProvider(sPool.getStorageProviderName()); + DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle(); + DataStore store = dataStoreMgr.getDataStore( + sPool.getId(), DataStoreRole.Primary); + lifeCycle.deleteDataStore(store); return false; } @@ -963,8 +965,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C s_logger.debug("Adding pool " + pool.getName() + " to host " + hostId); DataStoreProvider provider = dataStoreProviderMgr - .getDataStoreProviderById(pool.getStorageProviderId()); - HypervisorHostListener listener = hostListeners.get(provider.getUuid()); + .getDataStoreProvider(pool.getStorageProviderName()); + HypervisorHostListener listener = hostListeners.get(provider.getName()); listener.hostConnect(hostId, pool.getId()); } @@ -1415,19 +1417,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } DataStoreProvider provider = dataStoreProviderMgr - .getDataStoreProviderById(primaryStorage.getStorageProviderId()); - DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); - lifeCycle.maintain(primaryStorage.getId()); + .getDataStoreProvider(primaryStorage.getStorageProviderName()); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); + DataStore store = dataStoreMgr.getDataStore( + primaryStorage.getId(), DataStoreRole.Primary); + lifeCycle.maintain(store); return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore( primaryStorage.getId(), DataStoreRole.Primary); } - private void setPoolStateToError(StoragePoolVO primaryStorage) { - primaryStorage.setStatus(StoragePoolStatus.ErrorInMaintenance); - _storagePoolDao.update(primaryStorage.getId(), primaryStorage); - } - @Override @DB public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance( @@ -1457,29 +1456,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } DataStoreProvider provider = dataStoreProviderMgr - .getDataStoreProviderById(primaryStorage.getStorageProviderId()); - DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); - lifeCycle.cancelMaintain(primaryStorage.getId()); + .getDataStoreProvider(primaryStorage.getStorageProviderName()); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); + DataStore store = dataStoreMgr.getDataStore( + primaryStorage.getId(), DataStoreRole.Primary); + lifeCycle.cancelMaintain(store); + return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore( primaryStorage.getId(), DataStoreRole.Primary); } - private boolean sendToVmResidesOn(StoragePoolVO PrimaryDataStoreVO, - Command cmd) { - ClusterVO cluster = _clusterDao.findById(PrimaryDataStoreVO - .getClusterId()); - if ((cluster.getHypervisorType() == HypervisorType.KVM || cluster - .getHypervisorType() == HypervisorType.VMware) - && ((cmd instanceof ManageSnapshotCommand) || (cmd instanceof BackupSnapshotCommand))) { - return true; - } else { - return false; - } - } - - - - protected class StorageGarbageCollector implements Runnable { public StorageGarbageCollector() { @@ -1845,9 +1831,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override - public synchronized boolean registerHostListener(String providerUuid, + public synchronized boolean registerHostListener(String providerName, HypervisorHostListener listener) { - hostListeners.put(providerUuid, listener); + hostListeners.put(providerName, listener); return true; } diff --git a/server/src/com/cloud/storage/StoragePoolAutomation.java b/server/src/com/cloud/storage/StoragePoolAutomation.java new file mode 100644 index 00000000000..e8eb9b79cd5 --- /dev/null +++ b/server/src/com/cloud/storage/StoragePoolAutomation.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; + +public interface StoragePoolAutomation { + public boolean maintain(DataStore store); + public boolean cancelMaintain(DataStore store); +} diff --git a/server/src/com/cloud/storage/StoragePoolAutomationImpl.java b/server/src/com/cloud/storage/StoragePoolAutomationImpl.java new file mode 100644 index 00000000000..9bba979b9c0 --- /dev/null +++ b/server/src/com/cloud/storage/StoragePoolAutomationImpl.java @@ -0,0 +1,456 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.alert.AlertManager; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.resource.ResourceManager; +import com.cloud.server.ManagementServer; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.StoragePoolWorkDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.User; +import com.cloud.user.UserContext; +import com.cloud.user.dao.UserDao; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExecutionException; +import com.cloud.vm.ConsoleProxyVO; +import com.cloud.vm.DomainRouterVO; +import com.cloud.vm.SecondaryStorageVmVO; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.dao.ConsoleProxyDao; +import com.cloud.vm.dao.DomainRouterDao; +import com.cloud.vm.dao.SecondaryStorageVmDao; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +@Component +public class StoragePoolAutomationImpl implements StoragePoolAutomation { + private static final Logger s_logger = Logger + .getLogger(StoragePoolAutomationImpl.class); + @Inject + protected VirtualMachineManager vmMgr; + @Inject + protected SecondaryStorageVmDao _secStrgDao; + @Inject + UserVmDao userVmDao; + @Inject + protected UserDao _userDao; + @Inject + protected DomainRouterDao _domrDao; + @Inject + protected StoragePoolHostDao _storagePoolHostDao; + @Inject + protected AlertManager _alertMgr; + @Inject + protected ConsoleProxyDao _consoleProxyDao; + + @Inject + protected StoragePoolWorkDao _storagePoolWorkDao; + @Inject + PrimaryDataStoreDao primaryDataStoreDao; + @Inject + DataStoreManager dataStoreMgr; + @Inject + protected ResourceManager _resourceMgr; + @Inject + AgentManager agentMgr; + @Inject + VolumeDao volumeDao; + @Inject + VMInstanceDao vmDao; + @Inject + ManagementServer server; + @Inject DataStoreProviderManager providerMgr; + + @Override + public boolean maintain(DataStore store) { + Long userId = UserContext.current().getCallerUserId(); + User user = _userDao.findById(userId); + Account account = UserContext.current().getCaller(); + StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId()); + try { + StoragePool storagePool = (StoragePool) store; + List hosts = _resourceMgr.listHostsInClusterByStatus( + pool.getClusterId(), Status.Up); + if (hosts == null || hosts.size() == 0) { + pool.setStatus(StoragePoolStatus.Maintenance); + primaryDataStoreDao.update(pool.getId(), pool); + return true; + } else { + // set the pool state to prepare for maintenance + pool.setStatus(StoragePoolStatus.PrepareForMaintenance); + primaryDataStoreDao.update(pool.getId(), pool); + } + // remove heartbeat + for (HostVO host : hosts) { + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand( + false, storagePool); + final Answer answer = agentMgr.easySend(host.getId(), cmd); + if (answer == null || !answer.getResult()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool false failed due to " + + ((answer == null) ? "answer null" : answer + .getDetails())); + } + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool false secceeded"); + } + } + } + // check to see if other ps exist + // if they do, then we can migrate over the system vms to them + // if they dont, then just stop all vms on this one + List upPools = primaryDataStoreDao + .listByStatusInZone(pool.getDataCenterId(), + StoragePoolStatus.Up); + boolean restart = true; + if (upPools == null || upPools.size() == 0) { + restart = false; + } + + // 2. Get a list of all the ROOT volumes within this storage pool + List allVolumes = this.volumeDao.findByPoolId(pool + .getId()); + + // 3. Enqueue to the work queue + for (VolumeVO volume : allVolumes) { + VMInstanceVO vmInstance = vmDao + .findById(volume.getInstanceId()); + + if (vmInstance == null) { + continue; + } + + // enqueue sp work + if (vmInstance.getState().equals(State.Running) + || vmInstance.getState().equals(State.Starting) + || vmInstance.getState().equals(State.Stopping)) { + + try { + StoragePoolWorkVO work = new StoragePoolWorkVO( + vmInstance.getId(), pool.getId(), false, false, + server.getId()); + _storagePoolWorkDao.persist(work); + } catch (Exception e) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Work record already exists, re-using by re-setting values"); + } + StoragePoolWorkVO work = _storagePoolWorkDao + .findByPoolIdAndVmId(pool.getId(), + vmInstance.getId()); + work.setStartedAfterMaintenance(false); + work.setStoppedForMaintenance(false); + work.setManagementServerId(server.getId()); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + + // 4. Process the queue + List pendingWork = _storagePoolWorkDao + .listPendingWorkForPrepareForMaintenanceByPoolId(pool + .getId()); + + for (StoragePoolWorkVO work : pendingWork) { + // shut down the running vms + VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); + + if (vmInstance == null) { + continue; + } + + // if the instance is of type consoleproxy, call the console + // proxy + if (vmInstance.getType().equals( + VirtualMachine.Type.ConsoleProxy)) { + // call the consoleproxymanager + ConsoleProxyVO consoleProxy = _consoleProxyDao + .findById(vmInstance.getId()); + if (!vmMgr.advanceStop(consoleProxy, true, user, account)) { + String errorMsg = "There was an error stopping the console proxy id: " + + vmInstance.getId() + + " ,cannot enable storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + if (restart) { + + if (this.vmMgr.advanceStart(consoleProxy, null, user, + account) == null) { + String errorMsg = "There was an error starting the console proxy id: " + + vmInstance.getId() + + " on another storage pool, cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + } else { + // update work status + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + + // if the instance is of type uservm, call the user vm manager + if (vmInstance.getType().equals(VirtualMachine.Type.User)) { + UserVmVO userVm = userVmDao.findById(vmInstance.getId()); + if (!vmMgr.advanceStop(userVm, true, user, account)) { + String errorMsg = "There was an error stopping the user vm id: " + + vmInstance.getId() + + " ,cannot enable storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type secondary storage vm, call the + // secondary storage vm manager + if (vmInstance.getType().equals( + VirtualMachine.Type.SecondaryStorageVm)) { + SecondaryStorageVmVO secStrgVm = _secStrgDao + .findById(vmInstance.getId()); + if (!vmMgr.advanceStop(secStrgVm, true, user, account)) { + String errorMsg = "There was an error stopping the ssvm id: " + + vmInstance.getId() + + " ,cannot enable storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + if (restart) { + if (vmMgr.advanceStart(secStrgVm, null, user, account) == null) { + String errorMsg = "There was an error starting the ssvm id: " + + vmInstance.getId() + + " on another storage pool, cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + } else { + // update work status + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + + // if the instance is of type domain router vm, call the network + // manager + if (vmInstance.getType().equals( + VirtualMachine.Type.DomainRouter)) { + DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); + if (!vmMgr.advanceStop(domR, true, user, account)) { + String errorMsg = "There was an error stopping the domain router id: " + + vmInstance.getId() + + " ,cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + if (restart) { + if (vmMgr.advanceStart(domR, null, user, account) == null) { + String errorMsg = "There was an error starting the domain router id: " + + vmInstance.getId() + + " on another storage pool, cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + } else { + // update work status + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + } + + } catch(Exception e) { + s_logger.error( + "Exception in enabling primary storage maintenance:", e); + pool.setStatus(StoragePoolStatus.ErrorInMaintenance); + this.primaryDataStoreDao.update(pool.getId(), pool); + throw new CloudRuntimeException(e.getMessage()); + } + return true; + } + + @Override + public boolean cancelMaintain(DataStore store) { + // Change the storage state back to up + Long userId = UserContext.current().getCallerUserId(); + User user = _userDao.findById(userId); + Account account = UserContext.current().getCaller(); + StoragePoolVO poolVO = this.primaryDataStoreDao + .findById(store.getId()); + StoragePool pool = (StoragePool)store; + + List hosts = _resourceMgr.listHostsInClusterByStatus( + pool.getClusterId(), Status.Up); + if (hosts == null || hosts.size() == 0) { + return true; + } + // add heartbeat + for (HostVO host : hosts) { + ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand( + true, pool); + final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd); + if (answer == null || !answer.getResult()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool add failed due to " + + ((answer == null) ? "answer null" : answer + .getDetails())); + } + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool add secceeded"); + } + } + } + + // 2. Get a list of pending work for this queue + List pendingWork = _storagePoolWorkDao + .listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId()); + + // 3. work through the queue + for (StoragePoolWorkVO work : pendingWork) { + try { + VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); + + if (vmInstance == null) { + continue; + } + + // if the instance is of type consoleproxy, call the console + // proxy + if (vmInstance.getType().equals( + VirtualMachine.Type.ConsoleProxy)) { + + ConsoleProxyVO consoleProxy = _consoleProxyDao + .findById(vmInstance.getId()); + if (vmMgr.advanceStart(consoleProxy, null, user, account) == null) { + String msg = "There was an error starting the console proxy id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type ssvm, call the ssvm manager + if (vmInstance.getType().equals( + VirtualMachine.Type.SecondaryStorageVm)) { + SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance + .getId()); + if (vmMgr.advanceStart(ssVm, null, user, account) == null) { + String msg = "There was an error starting the ssvm id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type ssvm, call the ssvm manager + if (vmInstance.getType().equals( + VirtualMachine.Type.DomainRouter)) { + DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); + if (vmMgr.advanceStart(domR, null, user, account) == null) { + String msg = "There was an error starting the domR id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type user vm, call the user vm manager + if (vmInstance.getType().equals(VirtualMachine.Type.User)) { + UserVmVO userVm = userVmDao.findById(vmInstance.getId()); + + if (vmMgr.advanceStart(userVm, null, user, account) == null) { + + String msg = "There was an error starting the user vm id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + return true; + } catch (Exception e) { + s_logger.debug("Failed start vm", e); + throw new CloudRuntimeException(e.toString()); + } + } + return false; + } + +} diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql index 4e39a71ef9d..eb650cc9da1 100644 --- a/setup/db/db/schema-410to420.sql +++ b/setup/db/db/schema-410to420.sql @@ -29,6 +29,7 @@ DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max'; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen'); ALTER TABLE `cloud`.`load_balancer_vm_map` ADD state VARCHAR(40) NULL COMMENT 'service status updated by LB healthcheck manager'; +alter table storage_pool change storage_provider_id storage_provider_name varchar(255); alter table template_host_ref add state varchar(255); alter table template_host_ref add update_count bigint unsigned; alter table template_host_ref add updated datetime; @@ -70,13 +71,12 @@ CREATE TABLE `cloud`.`data_store_provider` ( CREATE TABLE `cloud`.`image_data_store` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', `name` varchar(255) NOT NULL COMMENT 'name of data store', - `image_provider_id` bigint unsigned NOT NULL COMMENT 'id of image_data_store_provider', + `image_provider_name` varchar(255) NOT NULL COMMENT 'id of image_data_store_provider', `protocol` varchar(255) NOT NULL COMMENT 'protocol of data store', `data_center_id` bigint unsigned COMMENT 'datacenter id of data store', `scope` varchar(255) COMMENT 'scope of data store', `uuid` varchar(255) COMMENT 'uuid of data store', - PRIMARY KEY(`id`), - CONSTRAINT `fk_tags__image_data_store_provider_id` FOREIGN KEY(`image_provider_id`) REFERENCES `data_store_provider`(`id`) + PRIMARY KEY(`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; ALTER TABLE `cloud`.`vm_template` ADD COLUMN `image_data_store_id` bigint unsigned; diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 1fe5e1641f4..ab2456dd3eb 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -95,6 +95,7 @@ known_categories = { 'InstanceGroup': 'VM Group', 'StorageMaintenance': 'Storage Pool', 'StoragePool': 'Storage Pool', + 'StorageProvider': 'Storage Pool', 'SecurityGroup': 'Security Group', 'SSH': 'SSH', 'register': 'Registration', From 0d1cd121c2e6306e02fdd0c4290dc9c33102170f Mon Sep 17 00:00:00 2001 From: Jessica Tomechak Date: Wed, 20 Mar 2013 22:41:34 -0700 Subject: [PATCH 063/123] Docs. Remove "new" from a feature description. The feature has been in the software awhile. --- docs/en-US/about-password-encryption.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en-US/about-password-encryption.xml b/docs/en-US/about-password-encryption.xml index 3cd84d19508..6c11c579ed2 100644 --- a/docs/en-US/about-password-encryption.xml +++ b/docs/en-US/about-password-encryption.xml @@ -52,7 +52,7 @@ Of course, the database secret key itself can not be stored in the open – it must be encrypted. How then does &PRODUCT; read it? A second secret key must be provided from an external source during Management Server startup. This key can be provided in one of two ways: - loaded from a file or provided by the &PRODUCT; administrator. The &PRODUCT; database has a new + loaded from a file or provided by the &PRODUCT; administrator. The &PRODUCT; database has a configuration setting that lets it know which of these methods will be used. If the encryption type is set to "file," the key must be in a file in a known location. If the encryption type is set to "web," the administrator runs the utility From 61d8dde033317fe7559bebe0d050444ffc1fabfb Mon Sep 17 00:00:00 2001 From: Vijay venkatachalam Date: Tue, 19 Mar 2013 21:04:35 +0530 Subject: [PATCH 064/123] CLOUDSTACK-1537. Fixing Network Restart case for AutoScale --- .../cloud/network/lb/LoadBalancingRule.java | 8 +++ .../network/resource/NetscalerResource.java | 27 ++++----- .../com/cloud/network/NetworkManagerImpl.java | 21 +------ .../network/lb/LoadBalancingRulesManager.java | 1 + .../lb/LoadBalancingRulesManagerImpl.java | 59 +++++++++++++------ 5 files changed, 61 insertions(+), 55 deletions(-) diff --git a/api/src/com/cloud/network/lb/LoadBalancingRule.java b/api/src/com/cloud/network/lb/LoadBalancingRule.java index 84526c5ea45..3e11e8c7c2c 100644 --- a/api/src/com/cloud/network/lb/LoadBalancingRule.java +++ b/api/src/com/cloud/network/lb/LoadBalancingRule.java @@ -131,6 +131,10 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { return lb; } + public void setDestinations(List destinations) { + this.destinations = destinations; + } + public List getDestinations() { return destinations; } @@ -139,6 +143,10 @@ public class LoadBalancingRule implements FirewallRule, LoadBalancer { return stickinessPolicies; } + public void setHealthCheckPolicies(List healthCheckPolicies) { + this.healthCheckPolicies = healthCheckPolicies; + } + public List getHealthCheckPolicies() { return healthCheckPolicies; } diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java b/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java index 4eb0ce2065d..c09869b996a 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java @@ -1756,23 +1756,11 @@ public class NetscalerResource implements ServerResource { if(!isAutoScaleSupportedInNetScaler()) { throw new ExecutionException("AutoScale not supported in this version of NetScaler"); } - if(vmGroupTO.getState().equals("new")) { - assert !loadBalancer.isRevoked(); - createAutoScaleConfig(loadBalancer); - } - else if(loadBalancer.isRevoked() || vmGroupTO.getState().equals("revoke")) { + if(loadBalancer.isRevoked() || vmGroupTO.getState().equals("revoke")) { removeAutoScaleConfig(loadBalancer); } - else if(vmGroupTO.getState().equals("enabled")) { - assert !loadBalancer.isRevoked(); - enableAutoScaleConfig(loadBalancer, false); - } - else if(vmGroupTO.getState().equals("disabled")) { - assert !loadBalancer.isRevoked(); - disableAutoScaleConfig(loadBalancer, false); - } else { - ///// This should never happen - throw new ExecutionException("Unknown AutoScale Vm Group State :" + vmGroupTO.getState()); + else { + createAutoScaleConfig(loadBalancer); } // AutoScale APIs are successful executed, now save the configuration. saveConfiguration(); @@ -1827,7 +1815,14 @@ public class NetscalerResource implements ServerResource { } // Create the autoscale config - enableAutoScaleConfig(loadBalancerTO, false); + if(!loadBalancerTO.getAutoScaleVmGroupTO().getState().equals("disabled")) { + // on restart of network, there might be vmgrps in disabled state, no need to create autoscale config for them + enableAutoScaleConfig(loadBalancerTO, false); + } + else if(loadBalancerTO.getAutoScaleVmGroupTO().getState().equals("disabled")) { + disableAutoScaleConfig(loadBalancerTO, false); + } + return true; } diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index 591910b13c6..5136572182b 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -3091,27 +3091,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L success = false; } - // remove all LB rules for the network - List lbs = _lbDao.listByNetworkId(networkId); - List lbRules = new ArrayList(); - for (LoadBalancerVO lb : lbs) { - s_logger.trace("Marking lb rule " + lb + " with Revoke state"); - lb.setState(FirewallRule.State.Revoke); - List dstList = _lbMgr.getExistingDestinations(lb.getId()); - List policyList = _lbMgr.getStickinessPolicies(lb.getId()); - List hcPolicyList = _lbMgr.getHealthCheckPolicies (lb.getId()); - // mark all destination with revoke state - for (LbDestination dst : dstList) { - s_logger.trace("Marking lb destination " + dst + " with Revoke state"); - dst.setRevoked(true); - } - - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList, hcPolicyList); - lbRules.add(loadBalancing); - } - try { - if (!_lbMgr.applyRules(network, Purpose.LoadBalancing, lbRules)) { + if (!_lbMgr.revokeLoadBalancersForNetwork(networkId)) { s_logger.warn("Failed to cleanup lb rules as a part of shutdownNetworkRules"); success = false; } diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManager.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManager.java index da19f86c21f..d98872a0906 100644 --- a/server/src/com/cloud/network/lb/LoadBalancingRulesManager.java +++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManager.java @@ -51,4 +51,5 @@ public interface LoadBalancingRulesManager extends LoadBalancingRulesService { boolean applyLoadBalancersForNetwork(long networkId) throws ResourceUnavailableException; String getLBCapability(long networkid, String capabilityName); boolean configureLbAutoScaleVmGroup(long vmGroupid, String currentState) throws ResourceUnavailableException; + boolean revokeLoadBalancersForNetwork(long networkId) throws ResourceUnavailableException; } diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index a06cbc5ca99..80e75cd3d66 100755 --- a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -338,7 +338,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements * Regular config like destinations need not be packed for applying * autoscale config as of today. */ - LoadBalancingRule rule = new LoadBalancingRule(lb, null, null, null); + List policyList = getStickinessPolicies(lb.getId()); + LoadBalancingRule rule = new LoadBalancingRule(lb, null, policyList, null); rule.setAutoScaleVmGroup(lbAutoScaleVmGroup); if (!isRollBackAllowedForProvider(lb)) { @@ -1199,18 +1200,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (apply) { try { - if (_autoScaleVmGroupDao.isAutoScaleLoadBalancer(loadBalancerId)) { - // Get the associated VmGroup - AutoScaleVmGroupVO vmGroup = _autoScaleVmGroupDao.listByAll(loadBalancerId, null).get(0); - if (!applyAutoScaleConfig(lb, vmGroup, vmGroup.getState())) { - s_logger.warn("Unable to apply the autoscale config"); - return false; - } - } else { - if (!applyLoadBalancerConfig(loadBalancerId)) { - s_logger.warn("Unable to apply the load balancer config"); - return false; - } + if (!applyLoadBalancerConfig(loadBalancerId)) { + s_logger.warn("Unable to apply the load balancer config"); + return false; } } catch (ResourceUnavailableException e) { if (rollBack && isRollBackAllowedForProvider(lb)) { @@ -1470,6 +1462,20 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return applyLoadBalancerRules(lbs, true); } + @Override + public boolean revokeLoadBalancersForNetwork(long networkId) throws ResourceUnavailableException { + List lbs = _lbDao.listByNetworkId(networkId); + if (lbs != null) { + for(LoadBalancerVO lb : lbs) { // called during restart, not persisting state in db + lb.setState(FirewallRule.State.Revoke); + } + return applyLoadBalancerRules(lbs, false); // called during restart, not persisting state in db + } else { + s_logger.info("Network id=" + networkId + " doesn't have load balancer rules, nothing to revoke"); + return true; + } + } + @Override public boolean applyLoadBalancersForNetwork(long networkId) throws ResourceUnavailableException { List lbs = _lbDao.listByNetworkId(networkId); @@ -1500,18 +1506,33 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements return handled; } + private LoadBalancingRule getLoadBalancerRuleToApply(LoadBalancerVO lb) { + + List policyList = getStickinessPolicies(lb.getId()); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, null, policyList, null); + + if (_autoScaleVmGroupDao.isAutoScaleLoadBalancer(lb.getId())) { + // Get the associated VmGroup + AutoScaleVmGroupVO vmGroup = _autoScaleVmGroupDao.listByAll(lb.getId(), null).get(0); + LbAutoScaleVmGroup lbAutoScaleVmGroup = getLbAutoScaleVmGroup(vmGroup, vmGroup.getState(), lb); + loadBalancing.setAutoScaleVmGroup(lbAutoScaleVmGroup); + } else { + List dstList = getExistingDestinations(lb.getId()); + loadBalancing.setDestinations(dstList); + List hcPolicyList = getHealthCheckPolicies(lb.getId()); + loadBalancing.setHealthCheckPolicies(hcPolicyList); + } + + return loadBalancing; + } + @DB protected boolean applyLoadBalancerRules(List lbs, boolean updateRulesInDB) throws ResourceUnavailableException { Transaction txn = Transaction.currentTxn(); List rules = new ArrayList(); for (LoadBalancerVO lb : lbs) { - List dstList = getExistingDestinations(lb.getId()); - List policyList = getStickinessPolicies(lb.getId()); - List hcPolicyList = getHealthCheckPolicies(lb.getId()); - - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList, policyList, hcPolicyList); - rules.add(loadBalancing); + rules.add(getLoadBalancerRuleToApply(lb)); } if (!_networkMgr.applyRules(rules, FirewallRule.Purpose.LoadBalancing, this, false)) { From 62fd9115c2fd0a742aff15f43b401b34553ffe04 Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Thu, 21 Mar 2013 13:24:22 -0700 Subject: [PATCH 065/123] UI Dialog: Don't show dialog with empty message --- ui/scripts/ui/dialog.js | 46 ++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/ui/scripts/ui/dialog.js b/ui/scripts/ui/dialog.js index 88dba3fa498..bb372fbf3d6 100644 --- a/ui/scripts/ui/dialog.js +++ b/ui/scripts/ui/dialog.js @@ -585,27 +585,31 @@ * Notice dialog */ notice: function(args) { - return $( - $('').addClass('message').html( - _l(args.message) - ) - ).dialog({ - title: _l('label.status'), - dialogClass: 'notice', - closeOnEscape: false, - zIndex: 5000, - buttons: [ - { - text: _l('Close'), - 'class': 'close', - click: function() { - $(this).dialog('destroy'); - if (args.clickAction) args.clickAction(); - $('.hovered-elem').hide(); - } - } - ] - }); + if (args.message) { + return $( + $('').addClass('message').html( + _l(args.message) + ) + ).dialog({ + title: _l('label.status'), + dialogClass: 'notice', + closeOnEscape: false, + zIndex: 5000, + buttons: [ + { + text: _l('Close'), + 'class': 'close', + click: function() { + $(this).dialog('destroy'); + if (args.clickAction) args.clickAction(); + $('.hovered-elem').hide(); + } + } + ] + }); + } + + return false; } }; })(window.jQuery, window.cloudStack); From 85ef51170299533cce8c6793c68be69258da9fa7 Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Thu, 21 Mar 2013 13:25:49 -0700 Subject: [PATCH 066/123] Instances UI: Better start instance notification -Don't show dialog if no password enabled on start -- inhibits 'Start instance' dialog from appearing for every VM. -Use standard dialog (not alert) for VM password notification --- ui/scripts/instances.js | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ui/scripts/instances.js b/ui/scripts/instances.js index 787239d9528..1e3ce45ce22 100644 --- a/ui/scripts/instances.js +++ b/ui/scripts/instances.js @@ -338,11 +338,12 @@ notification: function(args) { return 'label.action.start.instance'; }, - complete: function(args) { + complete: function(args) { if(args.password != null) { - alert('Password of the VM is ' + args.password); + return 'Password of the VM is ' + args.password; } - return 'label.action.start.instance'; + + return false; } }, notification: { From 0fbf1b91b057036d9015d679866d7f938686375e Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Thu, 21 Mar 2013 15:27:44 -0700 Subject: [PATCH 067/123] Regions single-sign-on: Hide login form on redirect If login URL is passed to management server on load, show loading screen in place of login form. --- ui/scripts/cloudStack.js | 6 ++++++ ui/scripts/ui-custom/login.js | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/ui/scripts/cloudStack.js b/ui/scripts/cloudStack.js index 00b06ab0e61..5b6d5195e1d 100644 --- a/ui/scripts/cloudStack.js +++ b/ui/scripts/cloudStack.js @@ -460,6 +460,12 @@ }; document.title = 'CloudStack'; + + if ($.cookie('loginUrl') != null || $.urlParam('loginUrl') != 0) { + // SSO + loginArgs.hideLoginScreen = true; + } + cloudStack.uiCustom.login(loginArgs); // Localization diff --git a/ui/scripts/ui-custom/login.js b/ui/scripts/ui-custom/login.js index c092b82ec0e..8769609e2a5 100644 --- a/ui/scripts/ui-custom/login.js +++ b/ui/scripts/ui-custom/login.js @@ -128,5 +128,11 @@ }); $languageSelect.val($.cookie('lang')); + + // Hide login screen, mainly for SSO + if (args.hideLoginScreen) { + $login.children().hide(); + $login.append($('
    ').addClass('loading-overlay')); + } }; })(jQuery, cloudStack); From 6ad7060b40015229f022d1cd323de266434d9279 Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Thu, 21 Mar 2013 15:56:34 -0700 Subject: [PATCH 068/123] Support labels in loading overlay Correctly style if placed in a loading overlay, to be positioned under loading icon --- ui/css/cloudstack3.css | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index e1dd02d4126..9b77b9f3a4f 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -467,6 +467,13 @@ div.list-view div.toolbar div.section-switcher div.section-select label { opacity: 0.7; } +.loading-overlay span { + display: block; + text-align: center; + margin: 155px 0 0 5px; + color: #4B4B4B; +} + .detail-view .ui-tabs-panel .loading-overlay { background-position: 50% 250px; } From c9f4e5130060fd7b8a2fa15febb070eba1f01519 Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Thu, 21 Mar 2013 15:57:05 -0700 Subject: [PATCH 069/123] Regions UI: Add 'redirecting to region' text during login loading --- client/WEB-INF/classes/resources/messages.properties | 1 + ui/dictionary.jsp | 1 + ui/scripts/ui-custom/login.js | 8 +++++++- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/client/WEB-INF/classes/resources/messages.properties b/client/WEB-INF/classes/resources/messages.properties index f0b6b363bc9..66b32acfc6c 100644 --- a/client/WEB-INF/classes/resources/messages.properties +++ b/client/WEB-INF/classes/resources/messages.properties @@ -17,6 +17,7 @@ #new labels (begin) ********************************************************************************************** +message.redirecting.region=Redirecting to region... label.use.vm.ip=Use VM IP: label.menu.regions=Regions label.region=Region diff --git a/ui/dictionary.jsp b/ui/dictionary.jsp index 6c06a10ed98..dcc7898e089 100644 --- a/ui/dictionary.jsp +++ b/ui/dictionary.jsp @@ -25,6 +25,7 @@ under the License. <% long now = System.currentTimeMillis(); %>