From 9535dc8f4da88991c0c8c1b23e49fda224c6ec0f Mon Sep 17 00:00:00 2001 From: abhishek Date: Wed, 1 Sep 2010 10:38:22 -0700 Subject: [PATCH 001/145] bug 6023: Implemented the search by group status 6023: resolved fixed --- .../xen/resource/CitrixResourceBase.java | 2 +- core/src/com/cloud/server/Criteria.java | 3 ++- core/src/com/cloud/vm/dao/UserVmDaoImpl.java | 2 ++ .../com/cloud/api/commands/ListVMsCmd.java | 10 ++++++++ .../cloud/server/ManagementServerImpl.java | 23 ++++++++++++++++--- 5 files changed, 35 insertions(+), 5 deletions(-) diff --git a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index 069e31b26c9..f04aa4dd702 100644 --- a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -1,5 +1,5 @@ /** -: * Copyright (C) 2010 Cloud.com, Inc. All rights reserved. + * Copyright (C) 2010 Cloud.com, Inc. All rights reserved. * * This software is licensed under the GNU General Public License v3 or later. * diff --git a/core/src/com/cloud/server/Criteria.java b/core/src/com/cloud/server/Criteria.java index f2d7090341c..6d41b967be9 100644 --- a/core/src/com/cloud/server/Criteria.java +++ b/core/src/com/cloud/server/Criteria.java @@ -76,7 +76,8 @@ public class Criteria { public static final String TARGET_IQN = "targetiqn"; public static final String SCOPE = "scope"; public static final String NETWORKGROUP = "networkGroup"; - + public static final String GROUP = "group"; + public static final String EMPTY_GROUP = "emptyGroup"; public Criteria(String orderBy, Boolean ascending, Long offset, Long limit) { this.offset = offset; diff --git a/core/src/com/cloud/vm/dao/UserVmDaoImpl.java b/core/src/com/cloud/vm/dao/UserVmDaoImpl.java index 6ab439486af..4e909b7b9f3 100755 --- a/core/src/com/cloud/vm/dao/UserVmDaoImpl.java +++ b/core/src/com/cloud/vm/dao/UserVmDaoImpl.java @@ -116,6 +116,8 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use DestroySearch.and("updateTime", DestroySearch.entity().getUpdateTime(), SearchCriteria.Op.LT); DestroySearch.done(); + + _updateTimeAttr = _allAttributes.get("updateTime"); assert _updateTimeAttr != null : "Couldn't get this updateTime attribute"; } diff --git a/server/src/com/cloud/api/commands/ListVMsCmd.java b/server/src/com/cloud/api/commands/ListVMsCmd.java index 0a61217a95c..5989d982630 100644 --- a/server/src/com/cloud/api/commands/ListVMsCmd.java +++ b/server/src/com/cloud/api/commands/ListVMsCmd.java @@ -53,6 +53,7 @@ public class ListVMsCmd extends BaseCmd { s_properties.add(new Pair(BaseCmd.Properties.STATE, Boolean.FALSE)); s_properties.add(new Pair(BaseCmd.Properties.ZONE_ID, Boolean.FALSE)); s_properties.add(new Pair(BaseCmd.Properties.POD_ID, Boolean.FALSE)); + s_properties.add(new Pair(BaseCmd.Properties.GROUP, Boolean.FALSE)); s_properties.add(new Pair(BaseCmd.Properties.HOST_ID, Boolean.FALSE)); s_properties.add(new Pair(BaseCmd.Properties.KEYWORD, Boolean.FALSE)); s_properties.add(new Pair(BaseCmd.Properties.ACCOUNT, Boolean.FALSE)); @@ -82,6 +83,7 @@ public class ListVMsCmd extends BaseCmd { Long zoneId = (Long)params.get(BaseCmd.Properties.ZONE_ID.getName()); Long podId = (Long)params.get(BaseCmd.Properties.POD_ID.getName()); Long hostId = (Long)params.get(BaseCmd.Properties.HOST_ID.getName()); + String group = (String)params.get(BaseCmd.Properties.GROUP.getName()); String keyword = (String)params.get(BaseCmd.Properties.KEYWORD.getName()); Integer page = (Integer)params.get(BaseCmd.Properties.PAGE.getName()); Integer pageSize = (Integer)params.get(BaseCmd.Properties.PAGESIZE.getName()); @@ -140,6 +142,14 @@ public class ListVMsCmd extends BaseCmd { if(zoneId != null) c.addCriteria(Criteria.DATACENTERID, zoneId); + if(group != null) + { + if(group.equals("")) + c.addCriteria(Criteria.EMPTY_GROUP, group); + else + c.addCriteria(Criteria.GROUP, group); + } + // ignore these search requests if it's not an admin if (isAdmin == true) { c.addCriteria(Criteria.DOMAINID, domainId); diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 3c65b3033e3..a1ce58b1599 100644 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -4989,7 +4989,6 @@ public class ManagementServerImpl implements ManagementServer { public List searchForUserVMs(Criteria c) { Filter searchFilter = new Filter(UserVmVO.class, c.getOrderBy(), c.getAscending(), c.getOffset(), c.getLimit()); SearchBuilder sb = _userVmDao.createSearchBuilder(); - // some criteria matter for generating the join condition Object[] accountIds = (Object[]) c.getCriteria(Criteria.ACCOUNTID); Object domainId = c.getCriteria(Criteria.DOMAINID); @@ -5006,7 +5005,8 @@ public class ManagementServerImpl implements ManagementServer { Object keyword = c.getCriteria(Criteria.KEYWORD); Object isAdmin = c.getCriteria(Criteria.ISADMIN); Object ipAddress = c.getCriteria(Criteria.IPADDRESS); - + Object vmGroup = c.getCriteria(Criteria.GROUP); + Object emptyGroup = c.getCriteria(Criteria.EMPTY_GROUP); sb.and("displayName", sb.entity().getDisplayName(), SearchCriteria.Op.LIKE); sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); sb.and("accountIdEQ", sb.entity().getAccountId(), SearchCriteria.Op.EQ); @@ -5020,7 +5020,8 @@ public class ManagementServerImpl implements ManagementServer { sb.and("hostIdEQ", sb.entity().getHostId(), SearchCriteria.Op.EQ); sb.and("hostIdIN", sb.entity().getHostId(), SearchCriteria.Op.IN); sb.and("guestIP", sb.entity().getGuestIpAddress(), SearchCriteria.Op.EQ); - + sb.and("groupEQ", sb.entity().getGroup(),SearchCriteria.Op.EQ); + if ((accountIds == null) && (domainId != null)) { // if accountId isn't specified, we can do a domain match for the admin case SearchBuilder domainSearch = _domainDao.createSearchBuilder(); @@ -5109,7 +5110,23 @@ public class ManagementServerImpl implements ManagementServer { if (ipAddress != null) { sc.setParameters("guestIP", ipAddress); } + + if(vmGroup!=null) + sc.setParameters("groupEQ", vmGroup); + + if (emptyGroup!= null) + { + SearchBuilder emptyGroupSearch = _userVmDao.createSearchBuilder(); + emptyGroupSearch.and("group", emptyGroupSearch.entity().getGroup(), SearchCriteria.Op.EQ); + emptyGroupSearch.or("null", emptyGroupSearch.entity().getGroup(), SearchCriteria.Op.NULL); + SearchCriteria sc1 = _userVmDao.createSearchCriteria(); + sc1 = emptyGroupSearch.create(); + sc1.setParameters("group", ""); + + sc.addAnd("group", SearchCriteria.Op.SC, sc1); + } + return _userVmDao.search(sc, searchFilter); } From c761746ff25f7d85431b12ddb1d886972f324f6e Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 1 Sep 2010 11:08:04 -0700 Subject: [PATCH 002/145] New UI VM wizard-add name field, group field --- ui/new/index.jsp | 2 +- ui/new/jsp/tab_instance.jsp | 25 ++++++++++++++-- ui/new/scripts/cloud.core.instance.js | 41 ++++++++++++++------------- 3 files changed, 45 insertions(+), 23 deletions(-) diff --git a/ui/new/index.jsp b/ui/new/index.jsp index 35b3e71c526..04f8230d2ae 100644 --- a/ui/new/index.jsp +++ b/ui/new/index.jsp @@ -608,7 +608,7 @@

- IP Address: + IP Address:

-
+
@@ -537,7 +537,28 @@ Network:
+
+
+
+
+
+
+ Name (optional): +
+ + +
+
+
+
+
+
+ Group (optional):
+ + +
+
@@ -570,7 +591,7 @@ diff --git a/ui/new/scripts/cloud.core.instance.js b/ui/new/scripts/cloud.core.instance.js index e5d41b4e533..94cc5e51c3f 100755 --- a/ui/new/scripts/cloud.core.instance.js +++ b/ui/new/scripts/cloud.core.instance.js @@ -518,13 +518,13 @@ function clickInstanceGroupHeader($arrowIcon) { //***** data disk offering: "no, thanks", "custom", existing disk offerings in database (begin) **************************************************** //"no, thanks" radio button (default radio button in data disk offering) var $t = $noDiskOfferingTemplate.clone(); - $t.find("input:radio").attr("name","data_disk_offering_radio").val("no"); + $t.find("input:radio").attr("name","data_disk_offering_radio"); $t.find("#name").text("no, thanks"); $dataDiskOfferingContainer.append($t.show()); //"custom" radio button var $t = $customDiskOfferingTemplate.clone(); - $t.find("input:radio").attr("name","data_disk_offering_radio").val("custom").removeAttr("checked"); + $t.find("input:radio").attr("name","data_disk_offering_radio").removeAttr("checked"); $t.find("#name").text("custom:"); $dataDiskOfferingContainer.append($t.show()); @@ -947,25 +947,17 @@ function clickInstanceGroupHeader($arrowIcon) { } - if (currentStepInVmPopup == 5) { //last step + if (currentStepInVmPopup == 5) { //last step + // validate values + var isValid = true; + isValid &= validateString("Name", $thisPopup.find("#wizard_vm_name"), $thisPopup.find("#wizard_vm_name_errormsg"), true); //optional + isValid &= validateString("Group", $thisPopup.find("#wizard_vm_group"), $thisPopup.find("#wizard_vm_group_errormsg"), true); //optional + if (!isValid) return; + // Create a new VM!!!! var moreCriteria = []; moreCriteria.push("&zoneId="+$thisPopup.find("#wizard_zone").val()); - - var name = trim($thisPopup.find("#wizard_vm_name").val()); - if (name != null && name.length > 0) - moreCriteria.push("&displayname="+encodeURIComponent(name)); - - var group = trim($thisPopup.find("#wizard_vm_group").val()); - if (group != null && group.length > 0) - moreCriteria.push("&group="+encodeURIComponent(group)); - - /* - if($thisPopup.find("#wizard_network_groups_container").css("display") != "none" && $thisPopup.find("#wizard_network_groups").val() != null) { - var networkGroupList = $thisPopup.find("#wizard_network_groups").val().join(","); - moreCriteria.push("&networkgrouplist="+encodeURIComponent(networkGroupList)); - } - */ + moreCriteria.push("&templateId="+$thisPopup.find("#step1 .rev_wiztemplistbox_selected").attr("id")); moreCriteria.push("&serviceOfferingId="+$thisPopup.find("input:radio[name=service_offering_radio]:checked").val()); @@ -976,14 +968,22 @@ function clickInstanceGroupHeader($arrowIcon) { else //template diskOfferingId = $thisPopup.find("#data_disk_offering_container input[name=data_disk_offering_radio]:checked").val(); - if(diskOfferingId != null && diskOfferingId != "" && diskOfferingId != "none" && diskOfferingId != "custom") + if(diskOfferingId != null && diskOfferingId != "" && diskOfferingId != "no" && diskOfferingId != "custom") moreCriteria.push("&diskOfferingId="+diskOfferingId); + + var name = trim($thisPopup.find("#wizard_vm_name").val()); + if (name != null && name.length > 0) + moreCriteria.push("&displayname="+encodeURIComponent(name)); + + var group = trim($thisPopup.find("#wizard_vm_group").val()); + if (group != null && group.length > 0) + moreCriteria.push("&group="+encodeURIComponent(group)); vmWizardClose(); var $t = $("#midmenu_item_vm").clone(); $t.find("#vm_name").text("Adding...."); - $t.find("#ip_address_container #label").hide(); + $t.find("#ip_address_container #label").html(" "); $t.find("#content").addClass("inaction"); $t.find("#spinning_wheel").show(); $("#midmenu_container").append($t.show()); @@ -1014,6 +1014,7 @@ function clickInstanceGroupHeader($arrowIcon) { $t.find("#spinning_wheel").hide(); if (result.jobstatus == 1) { // Succeeded + $t.find("#ip_address_container #label").text("IP Address:"); $t.find("#info_icon").removeClass("error").show(); $t.data("afterActionInfo", ("Adding succeeded.")); if("virtualmachine" in result) From e92af8c72a5aa3c9520b2432a180c3261139e92b Mon Sep 17 00:00:00 2001 From: edison Date: Wed, 1 Sep 2010 11:17:06 -0700 Subject: [PATCH 003/145] Fix the bug: Remove host when the host is in disconnected state, then you will fail to add the same host back. --- server/src/com/cloud/agent/manager/AgentManagerImpl.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/com/cloud/agent/manager/AgentManagerImpl.java b/server/src/com/cloud/agent/manager/AgentManagerImpl.java index 3d0ed144903..991f4348452 100755 --- a/server/src/com/cloud/agent/manager/AgentManagerImpl.java +++ b/server/src/com/cloud/agent/manager/AgentManagerImpl.java @@ -548,11 +548,12 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory { _dcDao.releasePrivateIpAddress(host.getPrivateIpAddress(), host.getDataCenterId(), null); AgentAttache attache = _agents.get(hostId); handleDisconnect(attache, Status.Event.Remove, false); - /* + + /*Disconnected agent needs special handling here*/ host.setGuid(null); host.setClusterId(null); _hostDao.update(host.getId(), host); - */ + _hostDao.remove(hostId); //delete the associated primary storage from db From 1d7e376b1f850b54bf29125b55084e57b8d0f279 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 10:59:31 -0700 Subject: [PATCH 004/145] Move migration/*sql to setup/db/ where migration tool needs them --- setup/db/{migration => }/data-21to22.sql | 0 setup/db/{migration => }/schema-21to22.sql | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename setup/db/{migration => }/data-21to22.sql (100%) rename setup/db/{migration => }/schema-21to22.sql (100%) diff --git a/setup/db/migration/data-21to22.sql b/setup/db/data-21to22.sql similarity index 100% rename from setup/db/migration/data-21to22.sql rename to setup/db/data-21to22.sql diff --git a/setup/db/migration/schema-21to22.sql b/setup/db/schema-21to22.sql similarity index 100% rename from setup/db/migration/schema-21to22.sql rename to setup/db/schema-21to22.sql From ae942449cf3449df8b21497b38f0216b5eb90e60 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 10:59:56 -0700 Subject: [PATCH 005/145] Add 2.1.3 migration step --- setup/bindir/cloud-migrate-databases.in | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/setup/bindir/cloud-migrate-databases.in b/setup/bindir/cloud-migrate-databases.in index dab1515d50c..5c6abd86698 100644 --- a/setup/bindir/cloud-migrate-databases.in +++ b/setup/bindir/cloud-migrate-databases.in @@ -150,6 +150,11 @@ class From21datamigratedTo21postprocessed(cloud_utils.MigrationStep): to_level = "2.1" def run(self): self.context.run_sql_resource("postprocess-20to21.sql") +class From21To213(cloud_utils.MigrationStep): + def __str__(self): return "Dropping obsolete indexes" + from_level = "2.1" + to_level = "2.1.3" + def run(self): self.context.run_sql_resource("index-212to213.sql") # command line harness functions From 98c00de7ff07b06fba73cffd076a09dbdf6b4bb2 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 11:00:05 -0700 Subject: [PATCH 006/145] Add 2.2 migration steps --- setup/bindir/cloud-migrate-databases.in | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/setup/bindir/cloud-migrate-databases.in b/setup/bindir/cloud-migrate-databases.in index 5c6abd86698..6adffa75d3d 100644 --- a/setup/bindir/cloud-migrate-databases.in +++ b/setup/bindir/cloud-migrate-databases.in @@ -156,6 +156,18 @@ class From21To213(cloud_utils.MigrationStep): to_level = "2.1.3" def run(self): self.context.run_sql_resource("index-212to213.sql") +class From213To22data(cloud_utils.MigrationStep): + def __str__(self): return "Migrating data" + from_level = "2.1.3" + to_level = "2.2-01" + def run(self): self.context.run_sql_resource("data-21to22.sql") + +class From22dataTo22(cloud_utils.MigrationStep): + def __str__(self): return "Migrating indexes" + from_level = "2.2-01" + to_level = "2.2" + def run(self): self.context.run_sql_resource("index-21to22.sql") + # command line harness functions def setup_logging(level): From b6322464ca6953dcd42e58c4285913bc79a9b2f9 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 11:30:32 -0700 Subject: [PATCH 007/145] Write the current schema level out to the database when doing deploydb or setup-databases --- setup/bindir/cloud-setup-databases.in | 7 +++++++ setup/db/schema-level.sql | 1 + wscript | 6 ++++++ 3 files changed, 14 insertions(+) create mode 100644 setup/db/schema-level.sql diff --git a/setup/bindir/cloud-setup-databases.in b/setup/bindir/cloud-setup-databases.in index 6c34575fcc0..3fe66d9c111 100755 --- a/setup/bindir/cloud-setup-databases.in +++ b/setup/bindir/cloud-setup-databases.in @@ -352,3 +352,10 @@ if rootuser: print "Applying file %s to the database on server %s:%s"%(p,host,port) try: run_mysql(text,rootuser,rootpassword,host,port) except CalledProcessError: sys.exit(22) + + p = os.path.join(dbfilepath,"schema-level.sql") + if os.path.isfile(p): + text = file(p).read() + print "Applying file %s to the database on server %s:%s"%(p,host,port) + try: run_mysql(text,rootuser,rootpassword,host,port) + except CalledProcessError: sys.exit(22) diff --git a/setup/db/schema-level.sql b/setup/db/schema-level.sql new file mode 100644 index 00000000000..4f2f0645482 --- /dev/null +++ b/setup/db/schema-level.sql @@ -0,0 +1 @@ +INSERT INTO `cloud`.`configuration` (category, instance, component, name, value, description) VALUES ('Hidden', 'DEFAULT', 'database', 'schema.level', '2.1.3', 'The schema level of this database'); diff --git a/wscript b/wscript index 9286ded759a..e04ee7e8980 100644 --- a/wscript +++ b/wscript @@ -777,6 +777,12 @@ def deploydb(ctx,virttech=None): after = after + file(p).read() Utils.pprint("GREEN","Reading database code from %s"%p) + p = _join("setup","db","schema-level.sql") + if _exists(p): + p = dev_override(p) + after = after + file(p).read() + Utils.pprint("GREEN","Reading database code from %s"%p) + cmd = ["mysql","--user=%s"%dbuser,"-h",dbhost,"--password=%s"%dbpw] Utils.pprint("GREEN","Deploying post-configuration database scripts to %s (user %s)"%(dbhost,dbuser)) Utils.pprint("BLUE"," ".join(cmd)) From 89f6cb81f5fe12262a0bfb007b47c10bc6895a61 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 11:32:21 -0700 Subject: [PATCH 008/145] The database schema level for this branch is 2.2 --- setup/db/schema-level.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/db/schema-level.sql b/setup/db/schema-level.sql index 4f2f0645482..e3b0eea48fb 100644 --- a/setup/db/schema-level.sql +++ b/setup/db/schema-level.sql @@ -1 +1 @@ -INSERT INTO `cloud`.`configuration` (category, instance, component, name, value, description) VALUES ('Hidden', 'DEFAULT', 'database', 'schema.level', '2.1.3', 'The schema level of this database'); +INSERT INTO `cloud`.`configuration` (category, instance, component, name, value, description) VALUES ('Hidden', 'DEFAULT', 'database', 'schema.level', '2.2', 'The schema level of this database'); From ef8e63c22cbcfb055f22663475003819060678c7 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 11:48:42 -0700 Subject: [PATCH 009/145] Added database migration information in the handbook --- README.html | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/README.html b/README.html index 2ece7a070e7..192e2dd79cd 100644 --- a/README.html +++ b/README.html @@ -743,6 +743,28 @@ See the files in the {{{debian/}}} folder.
Not done yet!
+
+
To support incremental migration from one version to another without having to redeploy the database, the CloudStack supports an incremental schema migration mechanism for the database.
+!!!How does it work?
+When the database is deployed for the first time with [[waf deploydb]] or the command {{{cloud-setup-databases}}}, a row is written to the {{{configuration}}} table, named {{{schema.level}}} and containing the current schema level.  This schema level row comes from the file {{{setup/db/schema-level.sql}}} in the source (refer to the [[Installation paths]] topic to find out where this file is installed in a running system).
+
+This value is used by the database migrator {{{cloud-migrate-databases}}} (source {{{setup/bindir/cloud-migrate-databases.in}}}) to determine the starting schema level.  The database migrator has a series of classes -- each class represents a step in the migration process and is usually tied to the execution of a SQL file stored in {{{setup/db}}}.  To migrate the database, the database migrator:
+# walks the list of steps it knows about,
+# generates a list of steps sorted by the order they should be executed in,
+# executes each step in order
+# at the end of each step, records the new schema level to the database table {{{configuration}}}
+For more information, refer to the database migrator source -- it is documented.
+!!!What impact does this have on me as a developer?
+Whenever you need to evolve the schema of the database:
+# write a migration SQL script and store it in {{{setup/db}}},
+# include your schema changes in the appropriate SQL file {{{create-*.sql}}} too (as the database is expected to be at its latest evolved schema level right after deploying a fresh database)
+# write a class in {{{setup/bindir/cloud-migrate-databases.in}}}, describing the migration step; in detail:
+## the schema level your migration step expects the database to be in,
+## the schema level your migration step will leave your database in (presumably the latest schema level, which you will have to choose!),
+## and the name / description of the step
+# bump the schema level in {{{setup/db/schema-level.sql}}} to the latest schema level
+Otherwise, ''end-user migration will fail catastrophically''.
+
[[Welcome]]
@@ -752,7 +774,7 @@ See the files in the {{{debian/}}} folder.
Not done yet!
-
+
Start here if you want to learn the essentials to extend, modify and enhance the CloudStack.  This assumes that you've already familiarized yourself with CloudStack concepts, installation and configuration using the [[Getting started|Welcome]] instructions.
 * [[Obtain the source|Obtaining the source]]
 * [[Prepare your environment|Preparing your development environment]]
@@ -764,6 +786,7 @@ Extra developer information:
 * [[How to integrate with Eclipse]]
 * [[Starting over]]
 * [[Making a source release|waf dist]]
+* [[How to write database migration scripts|Database migration infrastructure]]
 
From f3aa8eccfa00516df06e427a5e2057615686028f Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 11:57:25 -0700 Subject: [PATCH 010/145] Add schema-level to the package manifests --- cloud.spec | 1 + debian/cloud-setup.install | 1 + 2 files changed, 2 insertions(+) diff --git a/cloud.spec b/cloud.spec index 690c816876e..addbb87fd63 100644 --- a/cloud.spec +++ b/cloud.spec @@ -582,6 +582,7 @@ fi %{_datadir}/%{name}/setup/index-212to213.sql %{_datadir}/%{name}/setup/postprocess-20to21.sql %{_datadir}/%{name}/setup/schema-20to21.sql +%{_datadir}/%{name}/setup/schema-level.sql %doc README %doc INSTALL %doc HACKING diff --git a/debian/cloud-setup.install b/debian/cloud-setup.install index 7d35dbe9929..4b7929819d1 100644 --- a/debian/cloud-setup.install +++ b/debian/cloud-setup.install @@ -12,3 +12,4 @@ /usr/share/cloud/setup/index-212to213.sql /usr/share/cloud/setup/postprocess-20to21.sql /usr/share/cloud/setup/schema-20to21.sql +/usr/share/cloud/setup/schema-level.sql From e7d4265500fe9dd27863bdac400fa9dfc2db118e Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 11:59:21 -0700 Subject: [PATCH 011/145] Add 2.1 to 2.2 schema migration files to the package manifests --- cloud.spec | 2 ++ debian/cloud-setup.install | 2 ++ 2 files changed, 4 insertions(+) diff --git a/cloud.spec b/cloud.spec index addbb87fd63..8207e103408 100644 --- a/cloud.spec +++ b/cloud.spec @@ -583,6 +583,8 @@ fi %{_datadir}/%{name}/setup/postprocess-20to21.sql %{_datadir}/%{name}/setup/schema-20to21.sql %{_datadir}/%{name}/setup/schema-level.sql +%{_datadir}/%{name}/setup/schema-21to22.sql +%{_datadir}/%{name}/setup/data-21to22.sql %doc README %doc INSTALL %doc HACKING diff --git a/debian/cloud-setup.install b/debian/cloud-setup.install index 4b7929819d1..48969370521 100644 --- a/debian/cloud-setup.install +++ b/debian/cloud-setup.install @@ -13,3 +13,5 @@ /usr/share/cloud/setup/postprocess-20to21.sql /usr/share/cloud/setup/schema-20to21.sql /usr/share/cloud/setup/schema-level.sql +/usr/share/cloud/setup/schema-21to22.sql +/usr/share/cloud/setup/data-21to22.sql From 398e5774aefc5d065adab044f209cfa224fa94cc Mon Sep 17 00:00:00 2001 From: abhishek Date: Wed, 1 Sep 2010 13:52:48 -0700 Subject: [PATCH 012/145] Merging 6065 into master --- server/src/com/cloud/server/ManagementServerImpl.java | 3 +++ server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java | 3 +++ 2 files changed, 6 insertions(+) diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index a1ce58b1599..64b42e24412 100644 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -921,6 +921,9 @@ public class ManagementServerImpl implements ManagementServer { // Mark the account's volumes as destroyed List volumes = _volumeDao.findDetachedByAccount(accountId); for (VolumeVO volume : volumes) { + if(volume.getPoolId()==null){ + accountCleanupNeeded = true; + } _storageMgr.destroyVolume(volume); } diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index c6f82d2f567..d2cd55e2c68 100644 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -1006,6 +1006,9 @@ public class SnapshotManagerImpl implements SnapshotManager { // i.e Call them before the VMs for those volumes are destroyed. boolean success = true; for (VolumeVO volume : volumes) { + if(volume.getPoolId()==null){ + continue; + } Long volumeId = volume.getId(); Long dcId = volume.getDataCenterId(); String secondaryStoragePoolURL = _storageMgr.getSecondaryStorageURL(dcId); From 6cc9e740e4f4a23e0d27f9ac0e1ca573de808edd Mon Sep 17 00:00:00 2001 From: abhishek Date: Wed, 1 Sep 2010 14:06:35 -0700 Subject: [PATCH 013/145] organizing imports --- .../cloud/server/ManagementServerImpl.java | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 64b42e24412..053bd0484a4 100644 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -101,15 +101,15 @@ import com.cloud.async.executor.SecurityGroupParam; import com.cloud.async.executor.UpdateLoadBalancerParam; import com.cloud.async.executor.UpgradeVMParam; import com.cloud.async.executor.VMOperationParam; -import com.cloud.async.executor.VMOperationParam.VmOp; import com.cloud.async.executor.VolumeOperationParam; +import com.cloud.async.executor.VMOperationParam.VmOp; import com.cloud.async.executor.VolumeOperationParam.VolumeOp; import com.cloud.capacity.CapacityVO; import com.cloud.capacity.dao.CapacityDao; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.ConfigurationVO; -import com.cloud.configuration.ResourceCount.ResourceType; import com.cloud.configuration.ResourceLimitVO; +import com.cloud.configuration.ResourceCount.ResourceType; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.configuration.dao.ResourceLimitDao; import com.cloud.consoleproxy.ConsoleProxyManager; @@ -119,8 +119,8 @@ import com.cloud.dc.DataCenterIpAddressVO; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; import com.cloud.dc.PodVlanMapVO; -import com.cloud.dc.Vlan.VlanType; import com.cloud.dc.VlanVO; +import com.cloud.dc.Vlan.VlanType; import com.cloud.dc.dao.AccountVlanMapDao; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; @@ -175,7 +175,6 @@ import com.cloud.network.security.NetworkGroupRulesVO; import com.cloud.network.security.NetworkGroupVO; import com.cloud.network.security.dao.NetworkGroupDao; import com.cloud.offering.NetworkOffering; -import com.cloud.offering.NetworkOffering.GuestIpType; import com.cloud.offering.ServiceOffering; import com.cloud.serializer.GsonHelper; import com.cloud.server.auth.UserAuthenticator; @@ -188,13 +187,10 @@ import com.cloud.storage.GuestOSCategoryVO; import com.cloud.storage.GuestOSVO; import com.cloud.storage.LaunchPermissionVO; import com.cloud.storage.Snapshot; -import com.cloud.storage.Snapshot.SnapshotType; import com.cloud.storage.SnapshotPolicyVO; import com.cloud.storage.SnapshotScheduleVO; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; -import com.cloud.storage.Storage.FileSystem; -import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolVO; @@ -202,9 +198,12 @@ import com.cloud.storage.StorageStats; import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.Volume.VolumeType; import com.cloud.storage.VolumeStats; import com.cloud.storage.VolumeVO; +import com.cloud.storage.Snapshot.SnapshotType; +import com.cloud.storage.Storage.FileSystem; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.Volume.VolumeType; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.DiskTemplateDao; import com.cloud.storage.dao.GuestOSCategoryDao; @@ -215,9 +214,9 @@ import com.cloud.storage.dao.SnapshotPolicyDao; import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VMTemplateDao; -import com.cloud.storage.dao.VMTemplateDao.TemplateFilter; import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VMTemplateDao.TemplateFilter; import com.cloud.storage.preallocatedlun.PreallocatedLunVO; import com.cloud.storage.preallocatedlun.dao.PreallocatedLunDao; import com.cloud.storage.secondary.SecondaryStorageVmManager; @@ -239,12 +238,12 @@ import com.cloud.user.dao.UserDao; import com.cloud.user.dao.UserStatisticsDao; import com.cloud.uservm.UserVm; import com.cloud.utils.DateUtil; -import com.cloud.utils.DateUtil.IntervalType; import com.cloud.utils.EnumUtils; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.PasswordGenerator; import com.cloud.utils.StringUtils; +import com.cloud.utils.DateUtil.IntervalType; import com.cloud.utils.component.Adapters; import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.concurrency.NamedThreadFactory; From 1bbd8943e242372825b621fb28a792c6c92b6dde Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Wed, 1 Sep 2010 15:37:05 -0700 Subject: [PATCH 014/145] encode password before hashing it. --- ui/test/scripts/cloud.core.test.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ui/test/scripts/cloud.core.test.js b/ui/test/scripts/cloud.core.test.js index e9c61562647..0aacfd85a66 100644 --- a/ui/test/scripts/cloud.core.test.js +++ b/ui/test/scripts/cloud.core.test.js @@ -290,7 +290,7 @@ $(document).ready(function() { submenuContent.find("#grid_content").prepend(template.fadeIn("slow")); var username = thisDialog.find("#add_user_username").val(); - var password = $.md5(thisDialog.find("#add_user_password").val()); + var password = $.md5(encodeURIComponent(thisDialog.find("#add_user_password").val())); var email = thisDialog.find("#add_user_email").val(); if(email == "") email = username; @@ -318,7 +318,7 @@ $(document).ready(function() { $.ajax({ type: "POST", - data: createURL("command=createUser&username="+encodeURIComponent(username)+"&password="+encodeURIComponent(password)+"&email="+encodeURIComponent(email)+"&firstname="+encodeURIComponent(firstname)+"&lastname="+encodeURIComponent(lastname)+"&account="+account+"&accounttype="+accountType+"&domainid="+domainId+moreCriteria.join("")+"&response=json"), + data: createURL("command=createUser&username="+encodeURIComponent(username)+"&password="+password+"&email="+encodeURIComponent(email)+"&firstname="+encodeURIComponent(firstname)+"&lastname="+encodeURIComponent(lastname)+"&account="+account+"&accounttype="+accountType+"&domainid="+domainId+moreCriteria.join("")+"&response=json"), dataType: "json", async: false, success: function(json) { From d550ab624584b6ee3c64f65aa4b7235ef8463422 Mon Sep 17 00:00:00 2001 From: edison Date: Wed, 1 Sep 2010 15:40:19 -0700 Subject: [PATCH 015/145] fix template permission --- .../computing/LibvirtComputingResource.java | 21 +++++-------------- scripts/storage/qcow2/createtmplt.sh | 4 ++++ 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/agent/src/com/cloud/agent/resource/computing/LibvirtComputingResource.java b/agent/src/com/cloud/agent/resource/computing/LibvirtComputingResource.java index ad7c036fe31..c776001560e 100644 --- a/agent/src/com/cloud/agent/resource/computing/LibvirtComputingResource.java +++ b/agent/src/com/cloud/agent/resource/computing/LibvirtComputingResource.java @@ -1428,10 +1428,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv secondaryPool = getNfsSPbyURI(_conn, new URI(cmd.getSecondaryStoragePoolURL())); /*TODO: assuming all the storage pools mounted under _mountPoint, the mount point should be got from pool.dumpxml*/ String templatePath = _mountPoint + File.separator + secondaryPool.getUUIDString() + File.separator + templateInstallFolder; - File f = new File(templatePath); - if (!f.exists()) { - f.mkdirs(); - } + _storage.mkdirs(templatePath); + String tmplPath = templateInstallFolder + File.separator + tmplFileName; Script command = new Script(_createTmplPath, _timeout, s_logger); command.add("-t", templatePath); @@ -1487,10 +1485,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv secondaryStorage = getNfsSPbyURI(_conn, new URI(secondaryStorageURL)); /*TODO: assuming all the storage pools mounted under _mountPoint, the mount point should be got from pool.dumpxml*/ String tmpltPath = _mountPoint + File.separator + secondaryStorage.getUUIDString() + templateInstallFolder; - File mpfile = new File(tmpltPath); - if (!mpfile.exists()) { - mpfile.mkdirs(); - } + _storage.mkdirs(tmpltPath); Script command = new Script(_createTmplPath, _timeout, s_logger); command.add("-f", cmd.getSnapshotPath()); @@ -1589,10 +1584,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv if (sp == null) { try { - File tpFile = new File(targetPath); - if (!tpFile.exists()) { - tpFile.mkdir(); - } + _storage.mkdir(targetPath); LibvirtStoragePoolDef spd = new LibvirtStoragePoolDef(poolType.NFS, uuid, uuid, sourceHost, sourcePath, targetPath); s_logger.debug(spd.toString()); @@ -1702,10 +1694,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv String targetPath = _mountPoint + File.separator + pool.getUuid(); LibvirtStoragePoolDef spd = new LibvirtStoragePoolDef(poolType.NFS, pool.getUuid(), pool.getUuid(), pool.getHostAddress(), pool.getPath(), targetPath); - File tpFile = new File(targetPath); - if (!tpFile.exists()) { - tpFile.mkdirs(); - } + _storage.mkdir(targetPath); StoragePool sp = null; try { s_logger.debug(spd.toString()); diff --git a/scripts/storage/qcow2/createtmplt.sh b/scripts/storage/qcow2/createtmplt.sh index ef75c6270e9..84b081ce408 100755 --- a/scripts/storage/qcow2/createtmplt.sh +++ b/scripts/storage/qcow2/createtmplt.sh @@ -78,6 +78,7 @@ create_from_file() { then rm -f $tmpltimg fi + chmod +r /$tmpltfs/$tmpltname } create_from_snapshot() { @@ -92,6 +93,8 @@ create_from_snapshot() { printf "Failed to create template /$tmplfs/$tmpltname from snapshot $snapshotName on disk $tmpltImg " exit 2 fi + + chmod +r /$tmpltfs/$tmpltname } tflag= @@ -165,6 +168,7 @@ else fi touch /$tmpltfs/template.properties +chmod +r /$tmpltfs/template.properties echo -n "" > /$tmpltfs/template.properties today=$(date '+%m_%d_%Y') From df2455a39ed6b611027f85e638d7a93e009609bb Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 14:45:53 -0700 Subject: [PATCH 016/145] no necessity to depend on the dict hash to do the tar up --- tools/waf/tar.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/tools/waf/tar.py b/tools/waf/tar.py index c0293337b90..45c4c05746f 100644 --- a/tools/waf/tar.py +++ b/tools/waf/tar.py @@ -53,16 +53,9 @@ def apply_tar(self): node = self.path.find_resource(x) if not node:raise Utils.WafError('cannot find input file %s for processing'%x) ins.append(node) - if self.dict and not self.env['DICT_HASH']: - self.env=self.env.copy() - keys=list(self.dict.keys()) - keys.sort() - lst=[self.dict[x]for x in keys] - self.env['DICT_HASH']=str(Utils.h_list(lst)) tsk=self.create_task('tar',ins,out) tsk.fun=self.fun tsk.dict=self.dict - tsk.dep_vars=['DICT_HASH'] tsk.install_path=self.install_path tsk.chmod=self.chmod if not tsk.env: @@ -71,4 +64,4 @@ def apply_tar(self): Task.task_type_from_func('tar',func=tar_up) feature('tar')(apply_tar) -before('apply_core')(apply_tar) \ No newline at end of file +before('apply_core')(apply_tar) From b670b7f656b874e5bb97db6d74a42b0be94833a5 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 15:39:01 -0500 Subject: [PATCH 017/145] Patch generation (tar waf tool) fixed to work properly on windows and linux. --- patches/wscript_build | 2 +- tools/waf/tar.py | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/patches/wscript_build b/patches/wscript_build index 4351d4e605d..0cdfb7302a3 100644 --- a/patches/wscript_build +++ b/patches/wscript_build @@ -10,7 +10,7 @@ for virttech in Utils.to_list(bld.path.ant_glob("*",dir=True)): source = patchfiles, target = '%s-patch.tgz'%virttech, name = '%s-patch_tgz'%virttech, - root = "patches/%s"%virttech, + root = os.path.join("patches",virttech), rename = lambda x: re.sub(".subst$","",x), after = 'patchsubst', ) diff --git a/tools/waf/tar.py b/tools/waf/tar.py index 45c4c05746f..a6948d3a18c 100644 --- a/tools/waf/tar.py +++ b/tools/waf/tar.py @@ -2,7 +2,7 @@ import Utils import tarfile from TaskGen import feature, before import Task -import os +import os, sys # this is a clever little thing # given a list of nodes, build or source @@ -14,9 +14,9 @@ import os def tar_up(task): tgt = task.outputs[0].bldpath(task.env) if os.path.exists(tgt): os.unlink(tgt) - if tgt.lower().endswith(".bz2"): z = tarfile.open(tgt,"w:bz2") - elif tgt.lower().endswith(".gz"): z = tarfile.open(tgt,"w:gz") - elif tgt.lower().endswith(".tgz"): z = tarfile.open(tgt,"w:gz") + if tgt.lower().endswith(".bz2"): z = tarfile.open(tgt,"w:bz2") + elif tgt.lower().endswith(".gz"): z = tarfile.open(tgt,"w:gz") + elif tgt.lower().endswith(".tgz"): z = tarfile.open(tgt,"w:gz") else: z = tarfile.open(tgt,"w") fileset = {} for inp in task.inputs: @@ -25,16 +25,16 @@ def tar_up(task): srcname = Utils.relpath(src,os.path.join("..",".")) # file in source dir else: srcname = Utils.relpath(src,os.path.join(task.env.variant(),".")) # file in artifacts dir + srcname = srcname.split(os.path.sep,len(task.generator.root.split(os.path.sep)))[-1] if task.generator.rename: srcname = task.generator.rename(srcname) - for dummy in task.generator.root.split("/"): - splittedname = srcname.split("/") - srcname = "/".join(splittedname[1:]) fileset[srcname] = src for srcname,src in fileset.items(): ti = tarfile.TarInfo(srcname) ti.mode = 0755 ti.size = os.path.getsize(src) - f = file(src) + openmode = 'r' + if sys.platform == 'win32': openmode = openmode + 'b' + f = file(src,openmode) z.addfile(ti,fileobj=f) f.close() z.close() From 985ff79f18f5868d372d42ab7fd36622a9537153 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 14:38:57 -0700 Subject: [PATCH 018/145] only consider substituted files in construction of patches. reverse order of found files for patch, as shared goes first, so that virtspecific patch file overrides shared --- patches/wscript_build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/patches/wscript_build b/patches/wscript_build index 0cdfb7302a3..7cca9b09902 100644 --- a/patches/wscript_build +++ b/patches/wscript_build @@ -4,7 +4,7 @@ bld.substitute("*/**",name="patchsubst") for virttech in Utils.to_list(bld.path.ant_glob("*",dir=True)): if virttech in ["shared","wscript_build"]: continue - patchfiles = bld.path.ant_glob('%s/** shared/**'%virttech,src=True,bld=True,dir=False,flat=True) + patchfiles = bld.path.ant_glob('shared/** %s/**'%virttech,src=False,bld=True,dir=False,flat=True) tgen = bld( features = 'tar',#Utils.tar_up, source = patchfiles, From 073e1b0a29545013de2ef4945849d393d628b740 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 13:17:01 -0700 Subject: [PATCH 019/145] either mkisofs or genisoimage are detected. Graft points (thus rename()) support added to waf create ISO feature. Proper cross-platform support in waf create ISO feature. Give option to use genisoimage if mkisofs is not present, fixes compile on windows as long as user has cygwin Fix mkisofs to work on windows well put files in root directory of systemvm ISO put the rename func in the task generator constructor for the isos Add graft point support to tools/mkisofs --- tools/waf/mkisofs.py | 13 ++++++++----- wscript_build | 10 +++++++--- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/tools/waf/mkisofs.py b/tools/waf/mkisofs.py index eb611577208..440073d2de4 100644 --- a/tools/waf/mkisofs.py +++ b/tools/waf/mkisofs.py @@ -1,12 +1,14 @@ import Utils from TaskGen import feature, before +from Configure import ConfigurationError import Task import os # fixme: this seems to hang waf with 100% CPU def detect(conf): - conf.find_program("mkisofs",mandatory=True,var='MKISOFS') + conf.find_program("mkisofs",var='MKISOFS') + if not conf.env.MKISOFS: conf.find_program("genisoimage",mandatory=True,var='MKISOFS') def iso_up(task): tgt = task.outputs[0].bldpath(task.env) @@ -16,21 +18,22 @@ def iso_up(task): if inp.id&3==Node.BUILD: src = inp.bldpath(task.env) srcname = src - srcname = "/".join(srcname.split("/")[1:]) # chop off default/ + srcname = sep.join(srcname.split(sep)[1:]) # chop off default/ else: src = inp.srcpath(task.env) srcname = src - srcname = "/".join(srcname.split("/")[1:]) # chop off ../ - inps.append(src) + srcname = sep.join(srcname.split(sep)[1:]) # chop off ../ + if task.generator.rename: srcname = task.generator.rename(srcname) + inps.append(srcname+'='+src) ret = Utils.exec_command( [ task.generator.env.MKISOFS, "-quiet", "-r", + "-graft-points", "-o",tgt, ] + inps, shell=False) if ret != 0: return ret - if task.chmod: os.chmod(tgt,task.chmod) def apply_iso(self): Utils.def_attrs(self,fun=iso_up) diff --git a/wscript_build b/wscript_build index 8599da6cf4e..a22455d8ffa 100644 --- a/wscript_build +++ b/wscript_build @@ -268,17 +268,19 @@ def iso_up(task): if inp.id&3==Node.BUILD: src = inp.bldpath(task.env) srcname = src - srcname = "/".join(srcname.split("/")[1:]) # chop off default/ + srcname = sep.join(srcname.split(sep)[1:]) # chop off default/ else: src = inp.srcpath(task.env) srcname = src - srcname = "/".join(srcname.split("/")[1:]) # chop off ../ - inps.append(src) + srcname = sep.join(srcname.split(sep)[1:]) # chop off ../ + if task.generator.rename: srcname = task.generator.rename(srcname) + inps.append(srcname+'='+src) ret = Utils.exec_command( [ task.generator.env.MKISOFS, "-quiet", "-r", + "-graft-points", "-o",tgt, ] + inps, shell=False) if ret != 0: return ret @@ -291,6 +293,7 @@ if bld.env.DISTRO not in ["Windows","Mac"]: target = 'target/oss/systemvm.iso', name = 'systemvm_iso', after = 'systemvm_zip patch_tgz', + rename = lambda x: x.split(sep)[-1], ) bld.process_after(tgen) bld.install_as("${AGENTLIBDIR}/vms/systemvm.iso", "target/oss/systemvm.iso") @@ -302,6 +305,7 @@ if buildpremium: target = 'target/premium/systemvm.iso', name = 'systemvm-premium_iso', after = 'systemvm-premium_zip patch_tgz', + rename = lambda x: x.split(sep)[-1], ) bld.process_after(tgen) bld.install_as("${AGENTLIBDIR}/vms/systemvm-premium.iso", "target/premium/systemvm.iso") From df0e730faa364d82177eaeb0532a335b2ef1283a Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 14:36:49 -0700 Subject: [PATCH 020/145] Remove unnecessary copy of patch.tgz now that we have the capability to rename xenserver-patch.tgz within the ISO image --- patches/wscript_build | 9 --------- wscript_build | 12 ++++++------ 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/patches/wscript_build b/patches/wscript_build index 7cca9b09902..cc007c5c2ab 100644 --- a/patches/wscript_build +++ b/patches/wscript_build @@ -18,12 +18,3 @@ for virttech in Utils.to_list(bld.path.ant_glob("*",dir=True)): if virttech != "xenserver": # xenserver uses the patch.tgz file later to make an ISO, so we do not need to install it bld.install_as("${AGENTLIBDIR}/scripts/vm/hypervisor/%s/patch.tgz"%virttech, "%s-patch.tgz"%virttech) - -tgen = bld( - rule = 'cp ${SRC} ${TGT}', - source = 'xenserver-patch.tgz', - target = 'patch.tgz', - after = 'xenserver-patch_tgz', - name = 'patch_tgz' -) -bld.process_after(tgen) diff --git a/wscript_build b/wscript_build index a22455d8ffa..1c81300dff1 100644 --- a/wscript_build +++ b/wscript_build @@ -289,11 +289,11 @@ if bld.env.DISTRO not in ["Windows","Mac"]: # systemvm.zip cannot be built on Windows or Mac because system deps do not exist there tgen = bld( rule = iso_up, - source = "patches/patch.tgz target/oss/systemvm.zip", + source = "patches/xenserver-patch.tgz target/oss/systemvm.zip", target = 'target/oss/systemvm.iso', name = 'systemvm_iso', - after = 'systemvm_zip patch_tgz', - rename = lambda x: x.split(sep)[-1], + after = 'systemvm_zip xenserver-patch_tgz', + rename = lambda x: x.split(sep)[-1].replace('xenserver-',''), ) bld.process_after(tgen) bld.install_as("${AGENTLIBDIR}/vms/systemvm.iso", "target/oss/systemvm.iso") @@ -301,11 +301,11 @@ if bld.env.DISTRO not in ["Windows","Mac"]: if buildpremium: tgen = bld( rule = iso_up, - source = "patches/patch.tgz target/premium/systemvm.zip", + source = "patches/xenserver-patch.tgz target/premium/systemvm.zip", target = 'target/premium/systemvm.iso', name = 'systemvm-premium_iso', - after = 'systemvm-premium_zip patch_tgz', - rename = lambda x: x.split(sep)[-1], + after = 'systemvm-premium_zip xenserver-patch_tgz', + rename = lambda x: x.split(sep)[-1].replace('xenserver-',''), ) bld.process_after(tgen) bld.install_as("${AGENTLIBDIR}/vms/systemvm-premium.iso", "target/premium/systemvm.iso") From 763cd4481bf317eb458ec88cdc74a8fa7039a9a5 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 15:34:18 -0700 Subject: [PATCH 021/145] Build -- USE THE MAGIC! Dependencies are tracked automatically based on the files produced by each task generator. --- patches/wscript_build | 2 -- version-info.in | 1 - wscript_build | 53 ++++++++++++------------------------------- 3 files changed, 15 insertions(+), 41 deletions(-) diff --git a/patches/wscript_build b/patches/wscript_build index cc007c5c2ab..a28272fb8e4 100644 --- a/patches/wscript_build +++ b/patches/wscript_build @@ -12,9 +12,7 @@ for virttech in Utils.to_list(bld.path.ant_glob("*",dir=True)): name = '%s-patch_tgz'%virttech, root = os.path.join("patches",virttech), rename = lambda x: re.sub(".subst$","",x), - after = 'patchsubst', ) - bld.process_after(tgen) if virttech != "xenserver": # xenserver uses the patch.tgz file later to make an ISO, so we do not need to install it bld.install_as("${AGENTLIBDIR}/scripts/vm/hypervisor/%s/patch.tgz"%virttech, "%s-patch.tgz"%virttech) diff --git a/version-info.in b/version-info.in index 19b45b89907..36eaac3b0a4 100644 --- a/version-info.in +++ b/version-info.in @@ -1,6 +1,5 @@ This is the version information for the manifests in the JAR files. -JAVACFLAGS (taken from ant debuglevel property): @JAVACFLAGS@ Implementation_Version: @Implementation_Version@ Ant arguments: @ant_args@ diff --git a/wscript_build b/wscript_build index 1c81300dff1..d6da48c6934 100644 --- a/wscript_build +++ b/wscript_build @@ -47,39 +47,16 @@ bld.install_files("${DOCDIR}","configure-info") # ==================== Java compilation =========================== -sfl = lambda filespec: filelist(filespec,src=True,bld=False,dir=False,flat=True) - # discover ant targets and properties antxmlfiles = _glob(_join("build","*.xml")) + _glob(_join("build","*.properties")) if buildpremium: antxmlfiles += _glob(_join("build","premium","*.xml")) + _glob(_join("cloudstack-proprietary","build","*.xml")) targets,antprops = Utils.discover_ant_targets_and_properties(antxmlfiles) -javac_env = bld.env.copy() -# FIXME trigger recompilation / cache avoidance when debuglevel changes. ATM this does not happen. -if antprops.get('debuglevel',None): javac_env.append_value("JAVACFLAGS",["-g:%s"%antprops['debuglevel']]) Implementation_Version = bld.env.VERSION buildnumber = Utils.getbuildnumber() if buildnumber: Implementation_Version += "." + buildnumber -# compile .class files using waf -classpathentries = [ _join(builddir,x[2]) for x in targets ] -javawclasspath = pathsep.join( classpathentries + [ _abspath(x) for x in bld.env.CLASSPATH.split(pathsep) ] ) -javac_tgens = [ - bld( features='javac', name=name, srcdir=srcdir, outdir=classdir, - after=" ".join(deps), classpath=javawclasspath, env=javac_env ) - for name,srcdir,classdir,jarpath,deps in targets ] -[ bld.process_after(tgen) for tgen in javac_tgens ] - -# compile jar files using ant -# ant only needs to be reinvoked if the version with build number changes -# we here trim all the depended targets from the target list: -build_targets = [ x[0] for x in targets ] -nondep_build_targets = list ( set([ x[0] for x in targets ]) - set([ x for dependencies in [ y[4] for y in targets ] for x in dependencies ]) ) -ant_sources = " ".join(antxmlfiles + [ sfl(x[1] + "/**/*.java") for x in targets ]) -ant_jars = [ x[3] for x in targets ] - -# because build-console-viewer does not use compile-java macro, we have to hardcode it here - +# this is to trigger recompilation / cache avoidance if the relevant environment for ant changes ant_args = [ "-Dimpl.version=%s"%Implementation_Version, "-Dtarget.dir=%s"%Utils.relpath(_join(builddir,"target")), @@ -87,17 +64,20 @@ ant_args = [ ] if not buildpremium: ant_args += ["-DOSS=true"] -# this is to trigger recompilation / cache avoidance if the relevant environment for ant changes tgen = bld(features='subst', name='version-info', source="version-info.in", target="version-info") -tgen.dict = { "Implementation_Version":Implementation_Version,"JAVACFLAGS":javac_env.JAVACFLAGS,"ant_args":ant_args } +tgen.dict = { "Implementation_Version":Implementation_Version,"ant_args":ant_args } bld.install_files("${DOCDIR}","version-info") -build_targets += ["version-info"] -ant_sources += " version-info" +# compile jar files using ant +# ant only needs to be reinvoked if the version with build number changes +# we here trim all the depended targets from the target list: +build_targets = [ x[0] for x in targets ] +nondep_build_targets = list ( set([ x[0] for x in targets ]) - set([ x for dependencies in [ y[4] for y in targets ] for x in dependencies ]) ) +ant_sources = " ".join(antxmlfiles + [ filelist(x[1] + "/**/*.java",src=True,bld=False,dir=False,flat=True) for x in targets ]) + ' version-info' +ant_jars = [ x[3] for x in targets ] bld.path.ensure_dir_node_from_path("target/jar") -tgen = bld(rule=Utils.runant, name='runant', source=ant_sources, target=ant_jars, anttgts = nondep_build_targets, antargs=ant_args, after=build_targets) -bld.process_after(tgen) +tgen = bld(rule=Utils.runant, name='runant', source=ant_sources, target=ant_jars, anttgts = nondep_build_targets, antargs=ant_args) # install jar files [ bld.install_files('${JAVADIR}',jar) for jar in ant_jars if _basename(jar).startswith("cloud-") ] @@ -238,9 +218,7 @@ if bld.env.DISTRO not in ["Windows","Mac"]: source = " ".join( [sources,artifacts,deps,systems] ), target = 'target/oss/systemvm.zip', name = 'systemvm_zip', - after = 'getsystemjars runant', ) - bld.process_after(tgen) bld.install_files("${AGENTLIBDIR}/vms", "target/oss/systemvm.zip") if buildpremium: @@ -249,9 +227,7 @@ if buildpremium: source = " ".join( [premiumsources,premiumartifacts,thirdparties] ), target = 'target/premium/systemvm.zip', name = 'systemvm-premium_zip', - after = 'runant', ) - bld.process_after(tgen) #no need to install the premium one, we have ISOs below #bld.install_files("${AGENTLIBDIR}/vms", "systemvm-premium.zip") @@ -292,10 +268,8 @@ if bld.env.DISTRO not in ["Windows","Mac"]: source = "patches/xenserver-patch.tgz target/oss/systemvm.zip", target = 'target/oss/systemvm.iso', name = 'systemvm_iso', - after = 'systemvm_zip xenserver-patch_tgz', rename = lambda x: x.split(sep)[-1].replace('xenserver-',''), ) - bld.process_after(tgen) bld.install_as("${AGENTLIBDIR}/vms/systemvm.iso", "target/oss/systemvm.iso") if buildpremium: @@ -304,10 +278,8 @@ if buildpremium: source = "patches/xenserver-patch.tgz target/premium/systemvm.zip", target = 'target/premium/systemvm.iso', name = 'systemvm-premium_iso', - after = 'systemvm-premium_zip xenserver-patch_tgz', rename = lambda x: x.split(sep)[-1].replace('xenserver-',''), ) - bld.process_after(tgen) bld.install_as("${AGENTLIBDIR}/vms/systemvm-premium.iso", "target/premium/systemvm.iso") # ================== End systemvm ISO creation ==================== @@ -436,3 +408,8 @@ for vendor in _glob(_join("vendor","*")) + _glob(_join("cloudstack-proprietary", bld.install_files_filtered("${MSCONF}/%s"%vendor,filelist("%s/tomcatconf/*"%vendor)) # ====================== End vendor-specific plugins ==================== + + +# ====================== Magic! ========================================= + +bld.use_the_magic() From b2ca5c7cdf3253d4134d897167c9665465795d52 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Wed, 1 Sep 2010 16:35:41 -0700 Subject: [PATCH 022/145] new UI - vm wizard - implment close button --- ui/new/jsp/tab_instance.jsp | 10 +++++----- ui/new/scripts/cloud.core.instance.js | 5 ++--- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/ui/new/jsp/tab_instance.jsp b/ui/new/jsp/tab_instance.jsp index 8a19cbe74fa..7d12d1a7542 100644 --- a/ui/new/jsp/tab_instance.jsp +++ b/ui/new/jsp/tab_instance.jsp @@ -157,7 +157,7 @@ Step 5
-
+
@@ -269,7 +269,7 @@ Step 5
-
+
@@ -364,7 +364,7 @@ Step 5
-
+
@@ -405,7 +405,7 @@ Step 5
-
+
@@ -479,7 +479,7 @@ Step 5
-
+
diff --git a/ui/new/scripts/cloud.core.instance.js b/ui/new/scripts/cloud.core.instance.js index 94cc5e51c3f..c4d59a654b0 100755 --- a/ui/new/scripts/cloud.core.instance.js +++ b/ui/new/scripts/cloud.core.instance.js @@ -641,9 +641,8 @@ function clickInstanceGroupHeader($arrowIcon) { $vmPopup.hide(); $("#overlay_black").hide(); } - - - $vmPopup.find("#vm_wizard_close").bind("click", function(event) { + + $vmPopup.find("#close_button").bind("click", function(event) { vmWizardClose(); return false; }); From 25224d9251cf5bd373567915f769c7835d8f85f0 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 16:40:40 -0700 Subject: [PATCH 023/145] Smarter detection of PREFIX --- wscript_configure | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/wscript_configure b/wscript_configure index 829ddc72c15..8de10418871 100644 --- a/wscript_configure +++ b/wscript_configure @@ -7,7 +7,9 @@ import platform import Utils,Node,Options,Logs,Scripting,Environment,Build,Configure from os import unlink as _unlink, makedirs as _makedirs, getcwd as _getcwd, chdir as _chdir -from os.path import abspath as _abspath, basename as _basename, dirname as _dirname, exists as _exists, isdir as _isdir, split as _split, join as _join, sep, pathsep, pardir +try: from os import getuid as _getuid +except ImportError: pass +from os.path import abspath as _abspath, basename as _basename, dirname as _dirname, exists as _exists, isdir as _isdir, split as _split, join as _join, expanduser as _expanduser, sep, pathsep, pardir from glob import glob as _glob @@ -88,14 +90,6 @@ hard_deps = [ ] -conf.check_tool('misc') -conf.check_tool("gnu_dirs") -conf.check_tool('tar') -conf.check_tool('mkisofs') -conf.check_tool('java') -conf.check_tool("python") -conf.check_python_version((2,4,0)) - conf.check_message_1('Detecting distribution') if platform.system() == 'Windows': conf.env.DISTRO = "Windows" elif platform.system() == 'Darwin': conf.env.DISTRO = "Mac" @@ -107,6 +101,22 @@ if conf.env.DISTRO == "unknown": c = "YELLOW" else: c = "GREEN" conf.check_message_2(conf.env.DISTRO,c) +conf.check_message_1('Detecting installation prefix') +if conf.env.PREFIX == '/usr/local': + if conf.env.DISTRO == 'Windows': + conf.env.PREFIX = '/CloudStack' + elif _getuid() != 0: # not root + conf.env.PREFIX = _join(_expanduser("~"),"cloudstack") +conf.check_message_2("%s"%conf.env.PREFIX,"GREEN") + +conf.check_tool('misc') +conf.check_tool("gnu_dirs") +conf.check_tool('tar') +conf.check_tool('mkisofs') +conf.check_tool('java') +conf.check_tool("python") +conf.check_python_version((2,4,0)) + if conf.env.DISTRO not in ["Windows","Mac"]: conf.check_tool('compiler_cc') conf.check_cc(lib='pthread') From ce60139666267a85aa3c35bf6ef09e21bfae5487 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 1 Sep 2010 16:44:37 -0700 Subject: [PATCH 024/145] Document new autodetection of cloudstack prefix --- README.html | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/README.html b/README.html index 192e2dd79cd..5a38a434bd9 100644 --- a/README.html +++ b/README.html @@ -808,13 +808,13 @@ Any ant target added to the ant project files will automatically be detected -- The reason we do this rather than use the native waf capabilities for building Java projects is simple: by using ant, we can leverage the support built-in for ant in [[Eclipse|How to integrate with Eclipse]] and many other """IDEs""". Another reason to do this is because Java developers are familiar with ant, so adding a new JAR file or modifying what gets built into the existing JAR files is facilitated for Java developers.
-
+
The CloudStack build system installs files on a variety of paths, each
 one of which is selectable when building from source.
 * {{{$PREFIX}}}:
 ** the default prefix where the entire stack is installed
-** defaults to /usr/local on source builds
-** defaults to /usr on package builds
+** defaults to {{{/usr/local}}} on source builds as root, {{{$HOME/cloudstack}}} on source builds as a regular user, {{{C:\CloudStack}}} on Windows builds
+** defaults to {{{/usr}}} on package builds
 * {{{$SYSCONFDIR/cloud}}}:
 ** the prefix for CloudStack configuration files
 ** defaults to $PREFIX/etc/cloud on source builds
@@ -1172,9 +1172,9 @@ Cloud.com's contact information is:
 !Legal information
 //Unless otherwise specified// by Cloud.com, Inc., or in the sources themselves, [[this software is OSI certified Open Source Software distributed under the GNU General Public License, version 3|License statement]].  OSI Certified is a certification mark of the Open Source Initiative.  The software powering this documentation is """BSD-licensed""" and obtained from [[TiddlyWiki.com|http://tiddlywiki.com/]].
-
+
This is the typical lifecycle that you would follow when hacking on a CloudStack component, assuming that your [[development environment has been set up|Preparing your development environment]]:
-# [[Configure|waf configure]] the source code<br>{{{./waf configure --prefix=/home/youruser/cloudstack}}}
+# [[Configure|waf configure]] the source code<br>{{{./waf configure}}}
 # [[Build|waf build]] and [[install|waf install]] the CloudStack
 ## {{{./waf install}}}
 ## [[How to perform these tasks from Eclipse|How to integrate with Eclipse]]
@@ -1252,7 +1252,7 @@ Makes an inventory of all build products in {{{artifacts/default}}}, and removes
 
 Contrast to [[waf distclean]].
-
+
{{{
 ./waf configure --prefix=/directory/that/you/have/write/permission/to
 }}}
@@ -1261,7 +1261,7 @@ This runs the file {{{wscript_configure}}}, which takes care of setting the  var
 !When / why should I run this?
 You run this command //once//, in preparation to building the stack, or every time you need to change a configure-time variable.  Once you find an acceptable set of configure-time variables, you should not need to run {{{configure}}} again.
 !What happens if I don't run it?
-For convenience reasons, if you forget to configure the source, waf will autoconfigure itself and select some sensible default configuration options.  By default, {{{PREFIX}}} is {{{/usr/local}}}, but you can set it e.g. to  {{{/home/youruser/cloudstack}}} if you plan to do a non-root install.  Be ware that you can later install the stack as a regular user, but most components need to //run// as root.
+For convenience reasons, if you forget to configure the source, waf will autoconfigure itself and select some sensible default configuration options.  By default, {{{PREFIX}}} is {{{/usr/local}}} if you configure as root (do this if you plan to do a non-root install), or {{{/home/youruser/cloudstack}}} if you configure as your regular user name.  Be ware that you can later install the stack as a regular user, but most components need to //run// as root.
 !What variables / options exist for configure?
 In general: refer to the output of {{{./waf configure --help}}}.
 

From 6490451eecc49690f14a890feb499ae3bca2a063 Mon Sep 17 00:00:00 2001
From: "Manuel Amador (Rudd-O)" 
Date: Wed, 1 Sep 2010 16:46:23 -0700
Subject: [PATCH 025/145] Smarter detection of PREFIX -- Windows does
 C:\CloudStack

---
 wscript_configure | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/wscript_configure b/wscript_configure
index 8de10418871..a0f40d814ea 100644
--- a/wscript_configure
+++ b/wscript_configure
@@ -104,7 +104,7 @@ conf.check_message_2(conf.env.DISTRO,c)
 conf.check_message_1('Detecting installation prefix')
 if conf.env.PREFIX == '/usr/local':
 	if conf.env.DISTRO == 'Windows':
-		conf.env.PREFIX = '/CloudStack'
+		conf.env.PREFIX = 'C:\\CloudStack'
 	elif _getuid() != 0: # not root
 		conf.env.PREFIX = _join(_expanduser("~"),"cloudstack")
 conf.check_message_2("%s"%conf.env.PREFIX,"GREEN")

From b0b27b1ce024762fe8a15ad7e3d16e212388aafc Mon Sep 17 00:00:00 2001
From: "Manuel Amador (Rudd-O)" 
Date: Wed, 1 Sep 2010 18:59:33 -0500
Subject: [PATCH 026/145] Default prefix on windows fixed

---
 wscript_configure | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/wscript_configure b/wscript_configure
index a0f40d814ea..d76968e573a 100644
--- a/wscript_configure
+++ b/wscript_configure
@@ -102,7 +102,7 @@ else: 				    c = "GREEN"
 conf.check_message_2(conf.env.DISTRO,c)
 
 conf.check_message_1('Detecting installation prefix')
-if conf.env.PREFIX == '/usr/local':
+if Options.options.prefix == Options.default_prefix:
 	if conf.env.DISTRO == 'Windows':
 		conf.env.PREFIX = 'C:\\CloudStack'
 	elif _getuid() != 0: # not root

From 58bad1a936d310bff41e5df21dac0e4d3f9002ca Mon Sep 17 00:00:00 2001
From: "Manuel Amador (Rudd-O)" 
Date: Wed, 1 Sep 2010 17:07:02 -0700
Subject: [PATCH 027/145] If tomcat is not detected, fail immediately rather
 than at compile-time

---
 wscript_configure | 1 +
 1 file changed, 1 insertion(+)

diff --git a/wscript_configure b/wscript_configure
index d76968e573a..c807d210741 100644
--- a/wscript_configure
+++ b/wscript_configure
@@ -161,6 +161,7 @@ else:
 	else:
 		conf.env.TOMCATHOME = _join(conf.env.DATADIR,'tomcat6')
 		conf.check_message_2("%s (assumed presence of Tomcat there)"%conf.env.TOMCATHOME,"GREEN")
+if not _exists(conf.env.TOMCATHOME): conf.fatal("Tomcat directory %s not found.  Cannot proceed."%conf.env.TOMCATHOME)
 
 conf.env.AGENTPATH = _join(conf.env.PACKAGE,"agent")
 conf.env.CPPATH = _join(conf.env.PACKAGE,"console-proxy")

From 398b08457210e30fdcc6ee9c521dd181594eddfe Mon Sep 17 00:00:00 2001
From: "Manuel Amador (Rudd-O)" 
Date: Wed, 1 Sep 2010 17:09:18 -0700
Subject: [PATCH 028/145] Explain how to get Tomcat if it is not installed

---
 wscript_configure | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/wscript_configure b/wscript_configure
index c807d210741..c182cefc9f6 100644
--- a/wscript_configure
+++ b/wscript_configure
@@ -161,7 +161,7 @@ else:
 	else:
 		conf.env.TOMCATHOME = _join(conf.env.DATADIR,'tomcat6')
 		conf.check_message_2("%s (assumed presence of Tomcat there)"%conf.env.TOMCATHOME,"GREEN")
-if not _exists(conf.env.TOMCATHOME): conf.fatal("Tomcat directory %s not found.  Cannot proceed."%conf.env.TOMCATHOME)
+if not _exists(conf.env.TOMCATHOME): conf.fatal("Tomcat directory %s not found.  Either install Tomcat using ./waf installrpmdeps or ./waf installdebdeps, or manually install Tomcat to a directory in your system and set the environment variable TOMCAT_HOME to point to it."%conf.env.TOMCATHOME)
 
 conf.env.AGENTPATH = _join(conf.env.PACKAGE,"agent")
 conf.env.CPPATH = _join(conf.env.PACKAGE,"console-proxy")

From 49f2616fd9971ec1fe1ce7ffc9bb10c3ea71a718 Mon Sep 17 00:00:00 2001
From: "Manuel Amador (Rudd-O)" 
Date: Wed, 1 Sep 2010 17:18:38 -0700
Subject: [PATCH 029/145] Verbose and helpful messages for waf configure if
 mkisofs or mysql.exe are not found

---
 wscript           | 4 ++--
 wscript_configure | 7 ++++++-
 2 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/wscript b/wscript
index e04ee7e8980..7a86b03aa02 100644
--- a/wscript
+++ b/wscript
@@ -758,7 +758,7 @@ def deploydb(ctx,virttech=None):
 		before = before + file(p).read()
 		Utils.pprint("GREEN","Reading database code from %s"%p)
 
-	cmd = ["mysql","--user=%s"%dbuser,"-h",dbhost,"--password=%s"%dbpw]
+	cmd = [ctx.env.MYSQL,"--user=%s"%dbuser,"-h",dbhost,"--password=%s"%dbpw]
 	Utils.pprint("GREEN","Deploying database scripts to %s (user %s)"%(dbhost,dbuser))
 	Utils.pprint("BLUE"," ".join(cmd))
 	p = _Popen(cmd,stdin=PIPE,stdout=None,stderr=None)
@@ -783,7 +783,7 @@ def deploydb(ctx,virttech=None):
 		after = after + file(p).read()
 		Utils.pprint("GREEN","Reading database code from %s"%p)
 
-	cmd = ["mysql","--user=%s"%dbuser,"-h",dbhost,"--password=%s"%dbpw]
+	cmd = [ctx.env.MYSQL,"--user=%s"%dbuser,"-h",dbhost,"--password=%s"%dbpw]
 	Utils.pprint("GREEN","Deploying post-configuration database scripts to %s (user %s)"%(dbhost,dbuser))
 	Utils.pprint("BLUE"," ".join(cmd))
 	p = _Popen(cmd,stdin=PIPE,stdout=None,stderr=None)
diff --git a/wscript_configure b/wscript_configure
index c182cefc9f6..46f95f5cb02 100644
--- a/wscript_configure
+++ b/wscript_configure
@@ -112,7 +112,12 @@ conf.check_message_2("%s"%conf.env.PREFIX,"GREEN")
 conf.check_tool('misc')
 conf.check_tool("gnu_dirs")
 conf.check_tool('tar')
-conf.check_tool('mkisofs')
+try: conf.check_tool('mkisofs')
+except Configure.ConfigurationError,e:
+	raise Configure.ConfigurationError, "The program genisoimage (or mkisofs) could not be found.\nOn Linux: ./waf installrpmdeps or ./waf installdebdeps according to your distro's package format.\nOn Windows: Use cygwin to install the mkisofs package, then ensure that the program is in your PATH."
+try: conf.find_program('mysql',mandatory=True)
+except Configure.ConfigurationError,e:
+	raise Configure.ConfigurationError, "The program mysql (or mysql.exe) could not be found.\nOn Linux: ./waf installrpmdeps or ./waf installdebdeps according to your distro's package format.\nOn Windows: Install the MySQL client package and ensure that the mysql.exe program is in your PATH."
 conf.check_tool('java')
 conf.check_tool("python")
 conf.check_python_version((2,4,0))

From 4289dd79ecdfd3523837352771ad5e46da591b21 Mon Sep 17 00:00:00 2001
From: anthony 
Date: Wed, 1 Sep 2010 20:12:01 -0700
Subject: [PATCH 030/145] Template size,

1. the size in template_pool_ref now is physical size.
2. template in storage pool is implemented as snapshot, fix x3  issue
3. fix x4 issue
---
 .../xen/resource/CitrixResourceBase.java      |  13 +-
 .../vm/hypervisor/xenserver/prepsystemvm.sh   | 232 ------------------
 .../cloud/agent/manager/AgentManagerImpl.java |   2 +
 .../com/cloud/network/NetworkManagerImpl.java |   3 +
 .../com/cloud/storage/StorageManagerImpl.java |   2 +-
 .../AbstractStoragePoolAllocator.java         |  29 +--
 6 files changed, 21 insertions(+), 260 deletions(-)
 delete mode 100755 scripts/vm/hypervisor/xenserver/prepsystemvm.sh

diff --git a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
index f04aa4dd702..d96cc18dc58 100644
--- a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
+++ b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
@@ -2078,6 +2078,7 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR
 
             /* Does the template exist in primary storage pool? If yes, no copy */
             VDI vmtmpltvdi = null;
+            VDI snapshotvdi = null;
 
             Set vdis = VDI.getByNameLabel(conn, "Template " + cmd.getName());
 
@@ -2110,19 +2111,21 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR
                     return new DownloadAnswer(null, 0, msg, com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR, "", "", 0);
                 }
                 vmtmpltvdi = cloudVDIcopy(tmpltvdi, poolsr);
-
-                vmtmpltvdi.setNameLabel(conn, "Template " + cmd.getName());
+                snapshotvdi = vmtmpltvdi.snapshot(conn, new HashMap());
+                vmtmpltvdi.destroy(conn);
+                snapshotvdi.setNameLabel(conn, "Template " + cmd.getName());
                 // vmtmpltvdi.setNameDescription(conn, cmd.getDescription());
-                uuid = vmtmpltvdi.getUuid(conn);
+                uuid = snapshotvdi.getUuid(conn);
+                vmtmpltvdi = snapshotvdi;
 
             } else
                 uuid = vmtmpltvdi.getUuid(conn);
 
             // Determine the size of the template
-            long createdSize = vmtmpltvdi.getVirtualSize(conn);
+            long phySize = vmtmpltvdi.getPhysicalUtilisation(conn);
 
             DownloadAnswer answer = new DownloadAnswer(null, 100, cmd, com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED, uuid, uuid);
-            answer.setTemplateSize(createdSize);
+            answer.setTemplateSize(phySize);
 
             return answer;
 
diff --git a/scripts/vm/hypervisor/xenserver/prepsystemvm.sh b/scripts/vm/hypervisor/xenserver/prepsystemvm.sh
deleted file mode 100755
index 4a5b7f0e695..00000000000
--- a/scripts/vm/hypervisor/xenserver/prepsystemvm.sh
+++ /dev/null
@@ -1,232 +0,0 @@
-#/bin/bash
-# $Id: prepsystemvm.sh 10800 2010-07-16 13:48:39Z edison $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/scripts/vm/hypervisor/xenserver/prepsystemvm.sh $
-
-#set -x
-
-mntpath() {
-  local vmname=$1
-  echo "/mnt/$vmname"
-}
-
-mount_local() {
-   local vmname=$1
-   local disk=$2
-   local path=$(mntpath $vmname)
-
-   mkdir -p ${path}
-   mount $disk ${path} 
-
-   return $?
-}
-
-umount_local() {
-   local vmname=$1
-   local path=$(mntpath $vmname)
-
-   umount  $path
-   local ret=$?
-   
-   rm -rf $path
-   return $ret
-}
-
-
-patch_scripts() {
-   local vmname=$1
-   local patchfile=$2
-   local path=$(mntpath $vmname)
-
-   local oldmd5=
-   local md5file=${path}/md5sum
-   [ -f ${md5file} ] && oldmd5=$(cat ${md5file})
-   local newmd5=$(md5sum $patchfile | awk '{print $1}')
-
-   if [ "$oldmd5" != "$newmd5" ]
-   then
-     tar xzf $patchfile -C ${path}
-     echo ${newmd5} > ${md5file}
-   fi
-
-   return 0
-}
-
-#
-# To use existing console proxy .zip-based package file
-#
-patch_console_proxy() {
-   local vmname=$1
-   local patchfile=$2
-   local path=$(mntpath $vmname)
-   local oldmd5=
-   local md5file=${path}/usr/local/cloud/systemvm/md5sum
-
-   [ -f ${md5file} ] && oldmd5=$(cat ${md5file})
-   local newmd5=$(md5sum $patchfile | awk '{print $1}')
-
-   if [ "$oldmd5" != "$newmd5" ]
-   then
-     echo "All" | unzip $patchfile -d ${path}/usr/local/cloud/systemvm >/dev/null 2>&1
-     chmod 555 ${path}/usr/local/cloud/systemvm/run.sh
-     find ${path}/usr/local/cloud/systemvm/ -name \*.sh | xargs chmod 555
-     echo ${newmd5} > ${md5file}
-   fi
-
-   return 0
-}
-
-consoleproxy_svcs() {
-   local vmname=$1
-   local path=$(mntpath $vmname)
-
-   chroot ${path} /sbin/chkconfig cloud on
-   chroot ${path} /sbin/chkconfig postinit on
-   chroot ${path} /sbin/chkconfig domr_webserver off
-   chroot ${path} /sbin/chkconfig haproxy off ;
-   chroot ${path} /sbin/chkconfig dnsmasq off
-   chroot ${path} /sbin/chkconfig sshd on
-   chroot ${path} /sbin/chkconfig httpd off
-   chroot ${path} /sbin/chkconfig nfs off
-   chroot ${path} /sbin/chkconfig nfslock off
-   chroot ${path} /sbin/chkconfig rpcbind off
-   chroot ${path} /sbin/chkconfig rpcidmap off
-
-   cp ${path}/etc/sysconfig/iptables-consoleproxy ${path}/etc/sysconfig/iptables
-}
-
-secstorage_svcs() {
-   local vmname=$1
-   local path=$(mntpath $vmname)
-
-   chroot ${path} /sbin/chkconfig cloud on
-   chroot ${path} /sbin/chkconfig postinit on
-   chroot ${path} /sbin/chkconfig domr_webserver off
-   chroot ${path} /sbin/chkconfig haproxy off ;
-   chroot ${path} /sbin/chkconfig dnsmasq off
-   chroot ${path} /sbin/chkconfig sshd on
-   chroot ${path} /sbin/chkconfig httpd off
-    
-
-   cp ${path}/etc/sysconfig/iptables-secstorage ${path}/etc/sysconfig/iptables
-   mkdir -p ${path}/var/log/cloud
-}
-
-routing_svcs() {
-   local vmname=$1
-   local path=$(mntpath $vmname)
-
-   chroot ${path} /sbin/chkconfig cloud off
-   chroot ${path} /sbin/chkconfig domr_webserver on ; 
-   chroot ${path} /sbin/chkconfig haproxy on ; 
-   chroot ${path} /sbin/chkconfig dnsmasq on
-   chroot ${path} /sbin/chkconfig sshd on
-   chroot ${path} /sbin/chkconfig nfs off
-   chroot ${path} /sbin/chkconfig nfslock off
-   chroot ${path} /sbin/chkconfig rpcbind off
-   chroot ${path} /sbin/chkconfig rpcidmap off
-   cp ${path}/etc/sysconfig/iptables-domr ${path}/etc/sysconfig/iptables
-}
-
-lflag=
-dflag=
-
-while getopts 't:l:d:' OPTION
-do
-  case $OPTION in
-  l)	lflag=1
-	vmname="$OPTARG"
-        ;;
-  t)    tflag=1
-        vmtype="$OPTARG"
-        ;;
-  d)    dflag=1
-        rootdisk="$OPTARG"
-        ;;
-  *)    ;;
-  esac
-done
-
-if [ "$lflag$tflag$dflag" != "111" ]
-then
-  printf "Error: Not enough parameter\n" >&2
-  exit 1
-fi
-
-
-mount_local $vmname $rootdisk
-
-if [ $? -gt 0 ]
-then
-  printf "Failed to mount disk $rootdisk for $vmname\n" >&2
-  exit 1
-fi
-
-if [ -f $(dirname $0)/patch.tgz ]
-then
-  patch_scripts $vmname $(dirname $0)/patch.tgz
-  if [ $? -gt 0 ]
-  then
-    printf "Failed to apply patch patch.zip to $vmname\n" >&2
-    umount_local $vmname
-    exit 4
-  fi
-fi
-
-cpfile=$(dirname $0)/systemvm-premium.zip
-if [ "$vmtype" == "consoleproxy" ] || [ "$vmtype" == "secstorage" ]  && [ -f $cpfile ]
-then
-  patch_console_proxy $vmname $cpfile
-  if [ $? -gt 0 ]
-  then
-    printf "Failed to apply patch $patch $cpfile to $vmname\n" >&2
-    umount_local $vmname
-    exit 5
-  fi
-fi
-
-# domr is 64 bit, need to copy 32bit chkconfig to domr
-# this is workaroud, will use 32 bit domr
-dompath=$(mntpath $vmname)
-cp /sbin/chkconfig $dompath/sbin
-# copy public key to system vm
-cp $(dirname $0)/id_rsa.pub  $dompath/root/.ssh/authorized_keys
-#empty known hosts
-echo "" > $dompath/root/.ssh/known_hosts
-
-if [ "$vmtype" == "router" ]
-then
-  routing_svcs $vmname
-  if [ $? -gt 0 ]
-  then
-    printf "Failed to execute routing_svcs\n" >&2
-    umount_local $vmname
-    exit 6
-  fi
-fi
-
-
-if [ "$vmtype" == "consoleproxy" ]
-then
-  consoleproxy_svcs $vmname
-  if [ $? -gt 0 ]
-  then
-    printf "Failed to execute consoleproxy_svcs\n" >&2
-    umount_local $vmname
-    exit 7
-  fi
-fi
-
-if [ "$vmtype" == "secstorage" ]
-then
-  secstorage_svcs $vmname
-  if [ $? -gt 0 ]
-  then
-    printf "Failed to execute secstorage_svcs\n" >&2
-    umount_local $vmname
-    exit 8
-  fi
-fi
-
-
-umount_local $vmname
-
-exit $?
diff --git a/server/src/com/cloud/agent/manager/AgentManagerImpl.java b/server/src/com/cloud/agent/manager/AgentManagerImpl.java
index 991f4348452..3de6fdf5809 100755
--- a/server/src/com/cloud/agent/manager/AgentManagerImpl.java
+++ b/server/src/com/cloud/agent/manager/AgentManagerImpl.java
@@ -617,6 +617,8 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory {
             templateHostSC.addAnd("hostId", SearchCriteria.Op.EQ, secStorageHost.getId());
             _vmTemplateHostDao.remove(templateHostSC);
             
+            /*Disconnected agent needs special handling here*/
+    		secStorageHost.setGuid(null);
     		txn.commit();
     		return true;
     	}catch (Throwable t) {
diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java
index 35664556c3d..58d87e31749 100644
--- a/server/src/com/cloud/network/NetworkManagerImpl.java
+++ b/server/src/com/cloud/network/NetworkManagerImpl.java
@@ -635,6 +635,9 @@ public class NetworkManagerImpl implements NetworkManager, VirtualMachineManager
             }
             
             if (!found) {
+                event.setDescription("failed to create Domain Router : " + name);
+                event.setLevel(EventVO.LEVEL_ERROR);
+                _eventDao.persist(event);
                 throw new ExecutionException("Unable to create DomainRouter");
             }
             _routerDao.updateIf(router, Event.OperationSucceeded, null);
diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java
index 6ea715d25ed..9449b043602 100644
--- a/server/src/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/com/cloud/storage/StorageManagerImpl.java
@@ -892,7 +892,7 @@ public class StorageManagerImpl implements StorageManager {
             
             if (dataVol != null) {
                 StoragePoolVO pool = _storagePoolDao.findById(rootCreated.getPoolId());
-                dataCreated = createVolume(dataVol, vm, template, dc, pod, pool.getClusterId(), offering, diskOffering, avoids,size);
+                dataCreated = createVolume(dataVol, vm, null, dc, pod, pool.getClusterId(), offering, diskOffering, avoids,size);
                 if (dataCreated == null) {
                     throw new CloudRuntimeException("Unable to create " + dataVol);
                 }
diff --git a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java
index b332bd187cc..e0ae7a9e756 100644
--- a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java
+++ b/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java
@@ -147,7 +147,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
 		if (!poolIsCorrectType(dskCh, pool, vm, offering)) {
 			return false;
 		}
-
+	
 		// check the used size against the total size, skip this host if it's greater than the configured
 		// capacity check "storage.capacity.threshold"
 		if (sc != null) {
@@ -179,35 +179,20 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
 
 		for (VMTemplateStoragePoolVO templatePoolVO : templatePoolVOs) {
 			VMTemplateVO templateInPool = _templateDao.findById(templatePoolVO.getTemplateId());
-			int templateSizeMultiplier = 2;
 
 			if ((template != null) && !tmpinstalled && (templateInPool.getId() == template.getId())) {
 				tmpinstalled = true;
-				templateSizeMultiplier = 3;
 			}
 			
-			s_logger.debug("For template: " + templateInPool.getName() + ", using template size multiplier: " + templateSizeMultiplier);
-
 			long templateSize = templatePoolVO.getTemplateSize();
-			totalAllocatedSize += templateSizeMultiplier * (templateSize + _extraBytesPerVolume);
+			totalAllocatedSize += templateSize + _extraBytesPerVolume;
 		}
 
-		if ((template != null) && !tmpinstalled) {
-			// If the template that was passed into this allocator is not installed in the storage pool,
-			// add 3 * (template size on secondary storage) to the running total
-			HostVO secondaryStorageHost = _storageMgr.getSecondaryStorageHost(pool.getDataCenterId());
-			if (secondaryStorageHost == null) {
-				return false;
-			} else {
-				VMTemplateHostVO templateHostVO = _templateHostDao.findByHostTemplate(secondaryStorageHost.getId(), template.getId());
-				if (templateHostVO == null) {
-					return false;
-				} else {
-					s_logger.debug("For template: " + template.getName() + ", using template size multiplier: " + 3);
-					long templateSize = templateHostVO.getSize();
-					totalAllocatedSize += 3 * (templateSize + _extraBytesPerVolume);
-				}
-			}
+		if (template != null && !tmpinstalled ) {
+			// If the template that was passed into this allocator is not installed in the storage pool
+			// should add template size
+			// dskCh.getSize() should be template virtualsize
+			totalAllocatedSize += dskCh.getSize() + _extraBytesPerVolume;
 		}
 
 		long askingSize = dskCh.getSize();

From 10d62a5fe603686e9ed8f1499f6b21ee3fed51bb Mon Sep 17 00:00:00 2001
From: "Manuel Amador (Rudd-O)" 
Date: Wed, 1 Sep 2010 17:44:00 -0700
Subject: [PATCH 031/145] Connect cloud-setup-databases with deploydb.  Use
 MySQLdb instead of using the command-line MySQL client.  Detect the existence
 of the MySQLdb python module on Windows.  Works on windows.

---
 cloud.spec                            |    3 +-
 debian/control                        |    2 +-
 python/lib/cloud_utils.py             | 2319 +++++++++++++------------
 python/wscript_build                  |    5 +-
 setup/bindir/cloud-setup-databases.in |   95 +-
 setup/db/create-database.sql          |    2 +-
 wscript                               |   68 +-
 wscript_configure                     |    8 +-
 8 files changed, 1242 insertions(+), 1260 deletions(-)

diff --git a/cloud.spec b/cloud.spec
index 8207e103408..24983320b19 100644
--- a/cloud.spec
+++ b/cloud.spec
@@ -182,12 +182,11 @@ Summary:   Cloud.com setup tools
 Obsoletes: vmops-setup < %{version}-%{release}
 Requires: java >= 1.6.0
 Requires: python
-Requires: mysql
+Requires: MySQL-python
 Requires: %{name}-utils = %{version}-%{release}
 Requires: %{name}-server = %{version}-%{release}
 Requires: %{name}-deps = %{version}-%{release}
 Requires: %{name}-python = %{version}-%{release}
-Requires: MySQL-python
 Group:     System Environment/Libraries
 %description setup
 The Cloud.com setup tools let you set up your Management Server and Usage Server.
diff --git a/debian/control b/debian/control
index 0f1f3e5093a..2dd9c58ad02 100644
--- a/debian/control
+++ b/debian/control
@@ -128,7 +128,7 @@ Provides: vmops-setup
 Conflicts: vmops-setup
 Replaces: vmops-setup
 Architecture: any
-Depends: openjdk-6-jre, python, cloud-utils (= ${source:Version}), mysql-client, cloud-deps (= ${source:Version}), cloud-server (= ${source:Version}), cloud-python (= ${source:Version}), python-mysqldb
+Depends: openjdk-6-jre, python, cloud-utils (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-server (= ${source:Version}), cloud-python (= ${source:Version}), python-mysqldb
 Description: Cloud.com client
  The Cloud.com setup tools let you set up your Management Server and Usage Server.
 
diff --git a/python/lib/cloud_utils.py b/python/lib/cloud_utils.py
index 1434372d548..3c4d5598d62 100644
--- a/python/lib/cloud_utils.py
+++ b/python/lib/cloud_utils.py
@@ -1,1159 +1,1160 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""Cloud.com Python utility library"""
-
-import sys, os, subprocess, errno, re, time, glob
-import urllib2
-import xml.dom.minidom
-import logging
-import socket
-
-# exit() error constants
-E_GENERIC= 1
-E_NOKVM = 2
-E_NODEFROUTE = 3
-E_DHCP = 4
-E_NOPERSISTENTNET = 5
-E_NETRECONFIGFAILED = 6
-E_VIRTRECONFIGFAILED = 7
-E_FWRECONFIGFAILED = 8
-E_AGENTRECONFIGFAILED = 9
-E_AGENTFAILEDTOSTART = 10
-E_NOFQDN = 11
-E_SELINUXENABLED = 12
-E_USAGE = os.EX_USAGE
-
-E_NEEDSMANUALINTERVENTION = 13
-E_INTERRUPTED = 14
-E_SETUPFAILED = 15
-E_UNHANDLEDEXCEPTION = 16
-E_MISSINGDEP = 17
-
-Unknown = 0
-Fedora = 1
-CentOS = 2
-Ubuntu = 3
-
-IPV4 = 4
-IPV6 = 6
-
-#=================== DISTRIBUTION DETECTION =================
-
-if os.path.exists("/etc/fedora-release"): distro = Fedora
-elif os.path.exists("/etc/centos-release"): distro = CentOS
-elif os.path.exists("/etc/redhat-release") and not os.path.exists("/etc/fedora-release"): distro = CentOS
-elif os.path.exists("/etc/legal") and "Ubuntu" in file("/etc/legal").read(-1): distro = Ubuntu
-else: distro = Unknown
-
-logFileName=None
-# ==================  LIBRARY UTILITY CODE=============
-def setLogFile(logFile):
-	global logFileName
-	logFileName=logFile
-def read_properties(propfile):
-	if not hasattr(propfile,"read"): propfile = file(propfile)
-	properties = propfile.read().splitlines()
-	properties = [ s.strip() for s in properties ]
-	properties = [ s for s in properties if
-			s and
-			not s.startswith("#") and
-			not s.startswith(";") ]
-	#[ logging.debug("Valid config file line: %s",s) for s in properties ]
-	proppairs = [ s.split("=",1) for s in properties ]
-	return dict(proppairs)
-
-def stderr(msgfmt,*args):
-	"""Print a message to stderr, optionally interpolating the arguments into it"""
-	msgfmt += "\n"
-	if logFileName != None:
-		sys.stderr = open(logFileName, 'a+')
-	if args: sys.stderr.write(msgfmt%args)
-	else: sys.stderr.write(msgfmt)
-
-def exit(errno=E_GENERIC,message=None,*args):
-	"""Exit with an error status code, printing a message to stderr if specified"""
-	if message: stderr(message,*args)
-	sys.exit(errno)
-
-def resolve(host,port):
-	return [ (x[4][0],len(x[4])+2) for x in socket.getaddrinfo(host,port,socket.AF_UNSPEC,socket.SOCK_STREAM, 0, socket.AI_PASSIVE) ]
-	
-def resolves_to_ipv6(host,port):
-	return resolve(host,port)[0][1] == IPV6
-
-###add this to Python 2.4, patching the subprocess module at runtime
-if hasattr(subprocess,"check_call"):
-	from subprocess import CalledProcessError, check_call
-else:
-	class CalledProcessError(Exception):
-		def __init__(self, returncode, cmd):
-			self.returncode = returncode ; self.cmd = cmd
-		def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
-	subprocess.CalledProcessError = CalledProcessError
-	
-	def check_call(*popenargs, **kwargs):
-		retcode = subprocess.call(*popenargs, **kwargs)
-		cmd = kwargs.get("args")
-		if cmd is None: cmd = popenargs[0]
-		if retcode: raise subprocess.CalledProcessError(retcode, cmd)
-		return retcode
-	subprocess.check_call = check_call
-
-# python 2.4 does not have this
-try:
-	any = any
-	all = all
-except NameError:
-	def any(sequence):
-		for i in sequence:
-			if i: return True
-		return False
-	def all(sequence):
-		for i in sequence:
-			if not i: return False
-		return True
-
-class Command:
-	"""This class simulates a shell command"""
-	def __init__(self,name,parent=None):
-		self.__name = name
-		self.__parent = parent
-	def __getattr__(self,name):
-		if name == "_print": name = "print"
-		return Command(name,self)
-	def __call__(self,*args,**kwargs):
-		cmd = self.__get_recursive_name() + list(args)
-		#print "	",cmd
-		kwargs = dict(kwargs)
-		if "stdout" not in kwargs: kwargs["stdout"] = subprocess.PIPE
-		if "stderr" not in kwargs: kwargs["stderr"] = subprocess.PIPE
-		popen = subprocess.Popen(cmd,**kwargs)
-		m = popen.communicate()
-		ret = popen.wait()
-		if ret:
-			e = CalledProcessError(ret,cmd)
-			e.stdout,e.stderr = m
-			raise e
-		class CommandOutput:
-			def __init__(self,stdout,stderr):
-				self.stdout = stdout
-				self.stderr = stderr
-		return CommandOutput(*m)
-	def __lt__(self,other):
-		cmd = self.__get_recursive_name()
-		#print "	",cmd,"<",other
-		popen = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
-		m = popen.communicate(other)
-		ret = popen.wait()
-		if ret:
-			e = CalledProcessError(ret,cmd)
-			e.stdout,e.stderr = m
-			raise e
-		class CommandOutput:
-			def __init__(self,stdout,stderr):
-				self.stdout = stdout
-				self.stderr = stderr
-		return CommandOutput(*m)
-		
-	def __get_recursive_name(self,sep=None):
-		m = self
-		l = []
-		while m is not None:
-			l.append(m.__name)
-			m = m.__parent
-		l.reverse()
-		if sep: return sep.join(l)
-		else: return l
-	def __str__(self):
-		return ''%self.__get_recursive_name(sep=" ")
-		
-	def __repr__(self): return self.__str__()
-
-kvmok = Command("kvm-ok")
-getenforce = Command("/usr/sbin/getenforce")
-ip = Command("ip")
-service = Command("service")
-chkconfig = Command("chkconfig")
-updatercd = Command("update-rc.d")
-ufw = Command("ufw")
-iptables = Command("iptables")
-iptablessave = Command("iptables-save")
-augtool = Command("augtool")
-ifconfig = Command("ifconfig")
-ifdown = Command("ifdown")
-ifup = Command("ifup")
-brctl = Command("brctl")
-uuidgen = Command("uuidgen")
-
-
-def is_service_running(servicename):
-	try:
-		o = service(servicename,"status")
-		if distro is Ubuntu:
-			# status in ubuntu does not signal service status via return code
-			if "start/running" in o.stdout: return True
-			return False
-		else:
-			# retcode 0, service running
-			return True
-	except CalledProcessError,e:
-		# retcode nonzero, service not running
-		return False
-
-
-def stop_service(servicename,force=False):
-	# This function is idempotent.  N number of calls have the same result as N+1 number of calls.
-	if is_service_running(servicename) or force: service(servicename,"stop",stdout=None,stderr=None)
-
-
-def disable_service(servicename):
-	# Stops AND disables the service
-	stop_service(servicename)
-	if distro is Ubuntu:
-		updatercd("-f",servicename,"remove",stdout=None,stderr=None)
-	else:
-		chkconfig("--del",servicename,stdout=None,stderr=None)
-
-
-def start_service(servicename,force=False):
-	# This function is idempotent unless force is True.  N number of calls have the same result as N+1 number of calls.
-	if not is_service_running(servicename) or force: service(servicename,"start",stdout=None,stderr=None)
-
-
-def enable_service(servicename,forcestart=False):
-	# Stops AND disables the service
-	if distro is Ubuntu:
-		updatercd("-f",servicename,"remove",stdout=None,stderr=None)
-		updatercd("-f",servicename,"start","2","3","4","5",".",stdout=None,stderr=None)
-	else:
-		chkconfig("--add",servicename,stdout=None,stderr=None)
-		chkconfig("--level","345",servicename,"on",stdout=None,stderr=None)
-	start_service(servicename,force=forcestart)
-
-
-def replace_line(f,startswith,stanza,always_add=False):
-	lines = [ s.strip() for s in file(f).readlines() ]
-	newlines = []
-	replaced = False
-	for line in lines:
-		if line.startswith(startswith):
-			newlines.append(stanza)
-			replaced = True
-		else: newlines.append(line)
-	if not replaced and always_add: newlines.append(stanza)
-	newlines = [ s + '\n' for s in newlines ]
-	file(f,"w").writelines(newlines)
-
-def replace_or_add_line(f,startswith,stanza):
-	return replace_line(f,startswith,stanza,always_add=True)
-	
-# ==================================== CHECK FUNCTIONS ==========================
-
-# If they return without exception, it's okay.  If they raise a CheckFailed exception, that means a condition
-# (generallly one that needs administrator intervention) was detected.
-
-class CheckFailed(Exception): pass
-
-#check function
-def check_hostname():
-	"""If the hostname is a non-fqdn, fail with CalledProcessError.  Else return 0."""
-	try: check_call(["hostname",'--fqdn'])
-	except CalledProcessError:
-		raise CheckFailed("This machine does not have an FQDN (fully-qualified domain name) for a hostname")
-
-#check function
-def check_kvm():
-	if distro in (Fedora,CentOS):
-		if os.path.exists("/dev/kvm"): return True
-		raise CheckFailed("KVM is not correctly installed on this system, or support for it is not enabled in the BIOS")
-	else:
-		try:
-			kvmok()
-			return True
-		except CalledProcessError:
-			raise CheckFailed("KVM is not correctly installed on this system, or support for it is not enabled in the BIOS")
-		except OSError,e:
-			if e.errno is errno.ENOENT: raise CheckFailed("KVM is not correctly installed on this system, or support for it is not enabled in the BIOS")
-			raise
-		return True
-	raise AssertionError, "check_kvm() should have never reached this part"
-
-def check_cgroups():
-	return glob.glob("/*/cpu.shares")
-
-#check function
-def check_selinux():
-	if distro not in [Fedora,CentOS]: return # no selinux outside of those
-	enforcing = False
-	try:
-		output = getenforce().stdout.strip()
-		if "nforcing" in output:
-			enforcing = True
-		if any ( [ s.startswith("SELINUX=enforcing") for s in file("/etc/selinux/config").readlines() ] ):
-			enforcing = True
-	except (IOError,OSError),e:
-		if e.errno == 2: pass
-		else: raise CheckFailed("An unknown error (%s) took place while checking for SELinux"%str(e))
-	if enforcing:
-		raise CheckFailed("SELinux is set to enforcing, please set it to permissive in /etc/selinux/config, then reboot the machine or type setenforce Permissive, after which you can run this program again.")
-
-
-def preflight_checks(do_check_kvm=True):
-	if distro is Ubuntu:
-		preflight_checks = [
-			(check_hostname,"Checking hostname"),
-		]
-	else:
-		preflight_checks = [
-			(check_hostname,"Checking hostname"),
-			(check_selinux,"Checking if SELinux is disabled"),
-		]
-	#preflight_checks.append( (check_cgroups,"Checking if the control groups /cgroup filesystem is mounted") )
-	if do_check_kvm: preflight_checks.append( (check_kvm,"Checking for KVM") )
-	return preflight_checks
-
-
-# ========================== CONFIGURATION TASKS ================================
-
-# A Task is a function that runs within the context of its run() function that runs the function execute(), which does several things, reporting back to the caller as it goes with the use of yield
-# the done() method ought to return true if the task has run in the past
-# the execute() method must implement the configuration act itself
-# run() wraps the output of execute() within a Starting taskname and a Completed taskname message
-# tasks have a name
-
-class TaskFailed(Exception): pass
-	#def __init__(self,code,msg):
-		#Exception.__init__(self,msg)
-		#self.code = code
-
-class ConfigTask:
-	name = "generic config task"
-	autoMode=False
-	def __init__(self): pass
-	def done(self):
-		"""Returns true if the config task has already been done in the past, false if it hasn't"""
-		return False
-	def execute(self):
-		"""Executes the configuration task.  Must not be run if test() returned true.
-		Must yield strings that describe the steps in the task.
-		Raises TaskFailed if the task failed at some step.
-		"""
-	def run (self):
-		stderr("Starting %s"%self.name)
-		it = self.execute()
-		if not it:
-			pass # not a yielding iterable
-		else:
-			for msg in it: stderr(msg)
-		stderr("Completed %s"%self.name)
-	def setAutoMode(self, autoMode):
-		self.autoMode = autoMode
-	def  isAutoMode(self):
-		return self.autoMode
-
-
-# ============== these are some configuration tasks ==================
-
-class SetupNetworking(ConfigTask):
-	name = "network setup"
-	def __init__(self,brname):
-		ConfigTask.__init__(self)
-		self.brname = brname
-		self.runtime_state_changed = False
-		self.was_nm_service_running = None
-		self.was_net_service_running = None
-		if distro in (Fedora, CentOS):
-			self.nmservice = 'NetworkManager'
-			self.netservice = 'network'
-		else:
-			self.nmservice = 'network-manager'
-			self.netservice = 'networking'
-		
-		
-	def done(self):
-		try:
-			if distro in (Fedora,CentOS):
-				alreadysetup = augtool._print("/files/etc/sysconfig/network-scripts/ifcfg-%s"%self.brname).stdout.strip()
-			else:
-				alreadysetup = augtool.match("/files/etc/network/interfaces/iface",self.brname).stdout.strip()
-			return alreadysetup
-		except OSError,e:
-			if e.errno is 2: raise TaskFailed("augtool has not been properly installed on this system")
-			raise
-
-	def restore_state(self):
-		if not self.runtime_state_changed: return
-		
-		try:
-			o = ifconfig(self.brname)
-			bridge_exists = True
-		except CalledProcessError,e:
-			print e.stdout + e.stderr
-			bridge_exists = False
-			
-		if bridge_exists:
-			ifconfig(self.brname,"0.0.0.0")
-			if hasattr(self,"old_net_device"):
-				ifdown(self.old_net_device)
-				ifup(self.old_net_device)
-			try: ifdown(self.brname)
-			except CalledProcessError: pass
-			try: ifconfig(self.brname,"down")
-			except CalledProcessError: pass
-			try: brctl("delbr",self.brname)
-			except CalledProcessError: pass
-			try: ifdown("--force",self.brname)
-			except CalledProcessError: pass
-		
-		
-		if self.was_net_service_running is None:
-			# we do nothing
-			pass
-		elif self.was_net_service_running == False:
-			stop_service(self.netservice,force=True)
-			time.sleep(1)
-		else:
-			# we altered service configuration
-			stop_service(self.netservice,force=True)
-			time.sleep(1)
-			try: start_service(self.netservice,force=True)
-			except CalledProcessError,e:
-				if e.returncode == 1: pass
-				else: raise
-			time.sleep(1)
-		
-		if self.was_nm_service_running is None:
-			 # we do nothing
-			 pass
-		elif self.was_nm_service_running == False:
-			stop_service(self.nmservice,force=True)
-			time.sleep(1)
-		else:
-			# we altered service configuration
-			stop_service(self.nmservice,force=True)
-			time.sleep(1)
-			start_service(self.nmservice,force=True)
-			time.sleep(1)
-		
-		self.runtime_state_changed = False
-
-	def execute(self):
-		yield "Determining default route"
-		routes = ip.route().stdout.splitlines()
-		defaultroute = [ x for x in routes if x.startswith("default") ]
-		if not defaultroute: raise TaskFailed("Your network configuration does not have a default route")
-		
-		dev = defaultroute[0].split()[4]
-		yield "Default route assigned to device %s"%dev
-		
-		self.old_net_device = dev
-		
-		if distro in (Fedora, CentOS):
-			inconfigfile = "/".join(augtool.match("/files/etc/sysconfig/network-scripts/*/DEVICE",dev).stdout.strip().split("/")[:-1])
-			if not inconfigfile: raise TaskFailed("Device %s has not been set up in /etc/sysconfig/network-scripts"%dev)
-			pathtoconfigfile = inconfigfile[6:]
-
-		if distro in (Fedora, CentOS):
-			automatic = augtool.match("%s/ONBOOT"%inconfigfile,"yes").stdout.strip()
-		else:
-			automatic = augtool.match("/files/etc/network/interfaces/auto/*/",dev).stdout.strip()
-		if not automatic:
-			if distro is Fedora: raise TaskFailed("Device %s has not been set up in %s as automatic on boot"%dev,pathtoconfigfile)
-			else: raise TaskFailed("Device %s has not been set up in /etc/network/interfaces as automatic on boot"%dev)
-			
-		if distro not in (Fedora , CentOS):
-			inconfigfile = augtool.match("/files/etc/network/interfaces/iface",dev).stdout.strip()
-			if not inconfigfile: raise TaskFailed("Device %s has not been set up in /etc/network/interfaces"%dev)
-
-		if distro in (Fedora, CentOS):
-			isstatic = augtool.match(inconfigfile + "/BOOTPROTO","none").stdout.strip()
-			if not isstatic: isstatic = augtool.match(inconfigfile + "/BOOTPROTO","static").stdout.strip()
-		else:
-			isstatic = augtool.match(inconfigfile + "/method","static").stdout.strip()
-		if not isstatic:
-			if distro in (Fedora, CentOS): raise TaskFailed("Device %s has not been set up as a static device in %s"%(dev,pathtoconfigfile))
-			else: raise TaskFailed("Device %s has not been set up as a static device in /etc/network/interfaces"%dev)
-
-		if is_service_running(self.nmservice):
-			self.was_nm_service_running = True
-			yield "Stopping NetworkManager to avoid automatic network reconfiguration"
-			disable_service(self.nmservice)
-		else:
-			self.was_nm_service_running = False
-			
-		if is_service_running(self.netservice):
-			self.was_net_service_running = True
-		else:
-			self.was_net_service_running = False
-			
-		yield "Creating Cloud bridging device and making device %s member of this bridge"%dev
-
-		if distro in (Fedora, CentOS):
-			ifcfgtext = file(pathtoconfigfile).read()
-			newf = "/etc/sysconfig/network-scripts/ifcfg-%s"%self.brname
-			#def restore():
-				#try: os.unlink(newf)
-				#except OSError,e:
-					#if errno == 2: pass
-					#raise
-				#try: file(pathtoconfigfile,"w").write(ifcfgtext)
-				#except OSError,e: raise
-
-			f = file(newf,"w") ; f.write(ifcfgtext) ; f.flush() ; f.close()
-			innewconfigfile = "/files" + newf
-
-			script = """set %s/DEVICE %s
-set %s/NAME %s
-set %s/BRIDGE_PORTS %s
-set %s/TYPE Bridge
-rm %s/HWADDR
-rm %s/UUID
-rm %s/HWADDR
-rm %s/IPADDR
-rm %s/DEFROUTE
-rm %s/NETMASK
-rm %s/GATEWAY
-rm %s/BROADCAST
-rm %s/NETWORK
-set %s/BRIDGE %s
-save"""%(innewconfigfile,self.brname,innewconfigfile,self.brname,innewconfigfile,dev,
-			innewconfigfile,innewconfigfile,innewconfigfile,innewconfigfile,
-			inconfigfile,inconfigfile,inconfigfile,inconfigfile,inconfigfile,inconfigfile,
-			inconfigfile,self.brname)
-			
-			yield "Executing the following reconfiguration script:\n%s"%script
-			
-			try:
-				returned = augtool < script
-				if "Saved 2 file" not in returned.stdout:
-					print returned.stdout + returned.stderr
-					#restore()
-					raise TaskFailed("Network reconfiguration failed.")
-				else:
-					yield "Network reconfiguration complete"
-			except CalledProcessError,e:
-				#restore()
-				print e.stdout + e.stderr
-				raise TaskFailed("Network reconfiguration failed")
-		else: # Not fedora
-			backup = file("/etc/network/interfaces").read(-1)
-			#restore = lambda: file("/etc/network/interfaces","w").write(backup)
-
-			script = """set %s %s
-set %s %s
-set %s/bridge_ports %s
-save"""%(automatic,self.brname,inconfigfile,self.brname,inconfigfile,dev)
-			
-			yield "Executing the following reconfiguration script:\n%s"%script
-			
-			try:
-				returned = augtool < script
-				if "Saved 1 file" not in returned.stdout:
-					#restore()
-					raise TaskFailed("Network reconfiguration failed.")
-				else:
-					yield "Network reconfiguration complete"
-			except CalledProcessError,e:
-				#restore()
-				print e.stdout + e.stderr
-				raise TaskFailed("Network reconfiguration failed")
-		
-		yield "We are going to restart network services now, to make the network changes take effect.  Hit ENTER when you are ready."
-		if self.isAutoMode(): pass
-        	else:
-		    raw_input()
-		
-		# if we reach here, then if something goes wrong we should attempt to revert the runinng state
-		# if not, then no point
-		self.runtime_state_changed = True
-		
-		yield "Enabling and restarting non-NetworkManager networking"
-		if distro is Ubuntu: ifup(self.brname,stdout=None,stderr=None)
-		stop_service(self.netservice)
-		try: enable_service(self.netservice,forcestart=True)
-		except CalledProcessError,e:
-			if e.returncode == 1: pass
-			else: raise
-		
-		yield "Verifying that the bridge is up"
-		try:
-			o = ifconfig(self.brname)
-		except CalledProcessError,e:
-			print e.stdout + e.stderr
-			raise TaskFailed("The bridge could not be set up properly")
-		
-		yield "Networking restart done"
-
-
-class SetupCgConfig(ConfigTask):
-	name = "control groups configuration"
-	
-	def done(self):
-		
-		try:
-			return "group virt" in file("/etc/cgconfig.conf","r").read(-1)
-		except IOError,e:
-			if e.errno is 2: raise TaskFailed("cgconfig has not been properly installed on this system")
-			raise
-		
-	def execute(self):
-		cgconfig = file("/etc/cgconfig.conf","r").read(-1)
-		cgconfig = cgconfig + """
-group virt {
-	cpu {
-		cpu.shares = 9216;
-	}
-}
-"""
-		file("/etc/cgconfig.conf","w").write(cgconfig)
-		
-		stop_service("cgconfig")
-		enable_service("cgconfig",forcestart=True)
-
-
-class SetupCgRules(ConfigTask):
-	name = "control group rules setup"
-	cfgline = "root:/usr/sbin/libvirtd	cpu	virt/"
-	
-	def done(self):
-		try:
-			return self.cfgline in file("/etc/cgrules.conf","r").read(-1)
-		except IOError,e:
-			if e.errno is 2: raise TaskFailed("cgrulesd has not been properly installed on this system")
-			raise
-	
-	def execute(self):
-		cgrules = file("/etc/cgrules.conf","r").read(-1)
-		cgrules = cgrules + "\n" + self.cfgline + "\n"
-		file("/etc/cgrules.conf","w").write(cgrules)
-		
-		stop_service("cgred")
-		enable_service("cgred")
-
-
-class SetupCgroupControllers(ConfigTask):
-	name = "qemu cgroup controllers setup"
-	cfgline = "cgroup_controllers = [ \"cpu\" ]"
-	filename = "/etc/libvirt/qemu.conf"
-	
-	def done(self):
-		try:
-			return self.cfgline in file(self.filename,"r").read(-1)
-		except IOError,e:
-			if e.errno is 2: raise TaskFailed("qemu has not been properly installed on this system")
-			raise
-	
-	def execute(self):
-		libvirtqemu = file(self.filename,"r").read(-1)
-		libvirtqemu = libvirtqemu + "\n" + self.cfgline + "\n"
-		file("/etc/libvirt/qemu.conf","w").write(libvirtqemu)
-
-
-class SetupSecurityDriver(ConfigTask):
-	name = "security driver setup"
-	cfgline = "security_driver = \"none\""
-	filename = "/etc/libvirt/qemu.conf"
-	
-	def done(self):
-		try:
-			return self.cfgline in file(self.filename,"r").read(-1)
-		except IOError,e:
-			if e.errno is 2: raise TaskFailed("qemu has not been properly installed on this system")
-			raise
-	
-	def execute(self):
-		libvirtqemu = file(self.filename,"r").read(-1)
-		libvirtqemu = libvirtqemu + "\n" + self.cfgline + "\n"
-		file("/etc/libvirt/qemu.conf","w").write(libvirtqemu)
-
-
-class SetupLibvirt(ConfigTask):
-	name = "libvirt setup"
-	cfgline = "export CGROUP_DAEMON='cpu:/virt'"
-	def done(self):
-		try:
-			if distro in (Fedora,CentOS): 	 libvirtfile = "/etc/sysconfig/libvirtd"
-			elif distro is Ubuntu:	 libvirtfile = "/etc/default/libvirt-bin"
-			else: raise AssertionError, "We should not reach this"
-			return self.cfgline in file(libvirtfile,"r").read(-1)
-		except IOError,e:
-			if e.errno is 2: raise TaskFailed("libvirt has not been properly installed on this system")
-			raise
-	
-	def execute(self):
-		if distro in (Fedora,CentOS): 	 libvirtfile = "/etc/sysconfig/libvirtd"
-		elif distro is Ubuntu:	 libvirtfile = "/etc/default/libvirt-bin"
-		else: raise AssertionError, "We should not reach this"
-		libvirtbin = file(libvirtfile,"r").read(-1)
-		libvirtbin = libvirtbin + "\n" + self.cfgline + "\n"
-		file(libvirtfile,"w").write(libvirtbin)
-		
-		if distro in (CentOS, Fedora):	svc = "libvirtd"
-		else:					svc = "libvirt-bin"
-		stop_service(svc)
-		enable_service(svc)
-
-class SetupLiveMigration(ConfigTask):
-	name = "live migration setup"
-	stanzas = (
-			"listen_tcp=1",
-			'tcp_port="16509"',
-			'auth_tcp="none"',
-			"listen_tls=0",
-	)
-	
-	def done(self):
-		try:
-			lines = [ s.strip() for s in file("/etc/libvirt/libvirtd.conf").readlines() ]
-			if all( [ stanza in lines for stanza in self.stanzas ] ): return True
-		except IOError,e:
-			if e.errno is 2: raise TaskFailed("libvirt has not been properly installed on this system")
-			raise
-	
-	def execute(self):
-		
-		for stanza in self.stanzas:
-			startswith = stanza.split("=")[0] + '='
-			replace_or_add_line("/etc/libvirt/libvirtd.conf",startswith,stanza)
-
-		if distro is Fedora:
-			replace_or_add_line("/etc/sysconfig/libvirtd","LIBVIRTD_ARGS=","LIBVIRTD_ARGS=-l")
-		
-		elif distro is Ubuntu:
-			if os.path.exists("/etc/init/libvirt-bin.conf"):
-				replace_line("/etc/init/libvirt-bin.conf", "exec /usr/sbin/libvirtd","exec /usr/sbin/libvirtd -d -l")
-			else:
-				replace_or_add_line("/etc/default/libvirt-bin","libvirtd_opts=","libvirtd_opts='-l -d'")
-			
-		else:
-			raise AssertionError("Unsupported distribution")
-		
-		if distro in (CentOS, Fedora):	svc = "libvirtd"
-		else:						svc = "libvirt-bin"
-		stop_service(svc)
-		enable_service(svc)
-
-
-class SetupRequiredServices(ConfigTask):
-	name = "required services setup"
-	
-	def done(self):
-		if distro is Fedora:  nfsrelated = "rpcbind nfslock"
-		elif distro is CentOS: nfsrelated = "portmap nfslock"
-		else: return True
-		return all( [ is_service_running(svc) for svc in nfsrelated.split() ] )
-		
-	def execute(self):
-
-		if distro is Fedora:  nfsrelated = "rpcbind nfslock"
-		elif distro is CentOS: nfsrelated = "portmap nfslock"
-		else: raise AssertionError("Unsupported distribution")
-
-		for svc in nfsrelated.split(): enable_service(svc)
-
-
-class SetupFirewall(ConfigTask):
-	name = "firewall setup"
-	
-	def done(self):
-		
-		if distro in (Fedora, CentOS):
-			if not os.path.exists("/etc/sysconfig/iptables"): return True
-			if ":on" not in chkconfig("--list","iptables").stdout: return True
-		else:
-			if "Status: active" not in ufw.status().stdout: return True
-			if not os.path.exists("/etc/ufw/before.rules"): return True
-		rule = "-p tcp -m tcp --dport 16509 -j ACCEPT"
-		if rule in iptablessave().stdout: return True
-		return False
-	
-	def execute(self):
-		ports = "22 1798 16509".split()
-		if distro in (Fedora , CentOS):
-			for p in ports: iptables("-I","INPUT","1","-p","tcp","--dport",p,'-j','ACCEPT')
-			o = service.iptables.save() ; print o.stdout + o.stderr
-		else:
-			for p in ports: ufw.allow(p)
-
-
-class SetupFirewall2(ConfigTask):
-	# this closes bug 4371
-	name = "additional firewall setup"
-	def __init__(self,brname):
-		ConfigTask.__init__(self)
-		self.brname = brname
-	
-	def done(self):
-		
-		if distro in (Fedora, CentOS):
-			if not os.path.exists("/etc/sysconfig/iptables"): return True
-			if ":on" not in chkconfig("--list","iptables").stdout: return True
-			rule = "FORWARD -i %s -o %s -j ACCEPT"%(self.brname,self.brname)
-			if rule in iptablessave().stdout: return True
-			return False
-		else:
-			if "Status: active" not in ufw.status().stdout: return True
-			if not os.path.exists("/etc/ufw/before.rules"): return True
-			rule = "-A ufw-before-forward -i %s -o %s -j ACCEPT"%(self.brname,self.brname)
-			if rule in file("/etc/ufw/before.rules").read(-1): return True
-			return False
-		
-	def execute(self):
-		
-		yield "Permitting traffic in the bridge interface, migration port and for VNC ports"
-		
-		if distro in (Fedora , CentOS):
-			
-			for rule in (
-				"-I FORWARD -i %s -o %s -j ACCEPT"%(self.brname,self.brname),
-				"-I INPUT 1 -p tcp --dport 5900:6100 -j ACCEPT",
-				"-I INPUT 1 -p tcp --dport 49152:49216 -j ACCEPT",
-				):
-				args = rule.split()
-				o = iptables(*args)
-			service.iptables.save(stdout=None,stderr=None)
-			
-		else:
-			
-			rule = "-A ufw-before-forward -i %s -o %s -j ACCEPT"%(self.brname,self.brname)
-			text = file("/etc/ufw/before.rules").readlines()
-			newtext = []
-			for line in text:
-				if line.startswith("COMMIT"):
-					newtext.append(rule + "\n")
-				newtext.append(line)
-			file("/etc/ufw/before.rules","w").writelines(newtext)
-			ufw.allow.proto.tcp("from","any","to","any","port","5900:6100")
-			ufw.allow.proto.tcp("from","any","to","any","port","49152:49216")
-
-			stop_service("ufw")
-			start_service("ufw")
-
-
-# Tasks according to distribution -- at some point we will split them in separate modules
-
-def config_tasks(brname):
-	if distro is CentOS:
-		config_tasks = (
-			SetupNetworking(brname),
-			SetupLibvirt(),
-			SetupRequiredServices(),
-			SetupFirewall(),
-			SetupFirewall2(brname),
-		)
-	elif distro in (Ubuntu,Fedora):
-		config_tasks = (
-			SetupNetworking(brname),
-			SetupCgConfig(),
-			SetupCgRules(),
-			SetupCgroupControllers(),
-			SetupSecurityDriver(),
-			SetupLibvirt(),
-			SetupLiveMigration(),
-			SetupRequiredServices(),
-			SetupFirewall(),
-			SetupFirewall2(brname),
-		)
-	else:
-		raise AssertionError("Unknown distribution")
-	return config_tasks
-
-
-def backup_etc(targetdir):
-	if not targetdir.endswith("/"): targetdir += "/"
-	check_call( ["mkdir","-p",targetdir] )
-	rsynccall = ["rsync","-ax","--delete"] + ["/etc/",targetdir]
-	check_call( rsynccall )
-def restore_etc(targetdir):
-	if not targetdir.endswith("/"): targetdir += "/"
-	rsynccall = ["rsync","-ax","--delete"] + [targetdir,"/etc/"]
-	check_call( rsynccall )
-def remove_backup(targetdir):
-	check_call( ["rm","-rf",targetdir] )
-
-def list_zonespods(host):
-	text = urllib2.urlopen('http://%s:8096/client/api?command=listPods'%host).read(-1)
-	dom = xml.dom.minidom.parseString(text) 
-	x = [ (zonename,podname)
-		for pod in dom.childNodes[0].childNodes  
-		for podname in [ x.childNodes[0].wholeText for x in pod.childNodes if x.tagName == "name" ] 
-		for zonename in  [ x.childNodes[0].wholeText for x in pod.childNodes if x.tagName == "zonename" ]
-		]
-	return x
-	
-def prompt_for_hostpods(zonespods):
-	"""Ask user to select one from those zonespods
-	Returns (zone,pod) or None if the user made the default selection."""
-	while True:
-		stderr("Type the number of the zone and pod combination this host belongs to (hit ENTER to skip this step)")
-		print "  N) ZONE, POD" 
-		print "================"
-		for n,(z,p) in enumerate(zonespods):
-			print "%3d) %s, %s"%(n,z,p)
-		print "================"
-		zoneandpod = raw_input().strip()
-		
-		if not zoneandpod:
-			# we go with default, do not touch anything, just break
-			return None
-		
-		try:
-			# if parsing fails as an int, just vomit and retry
-			zoneandpod = int(zoneandpod)
-			if zoneandpod >= len(zonespods) or zoneandpod < 0: raise ValueError, "%s out of bounds"%zoneandpod
-		except ValueError,e:
-			stderr(str(e))
-			continue # re-ask
-		
-		# oh yeah, the int represents an valid zone and pod index in the array
-		return zonespods[zoneandpod]
-	
-# this configures the agent
-
-def setup_agent_config(configfile, host, zone, pod, guid):
-	stderr("Examining Agent configuration")
-	fn = configfile
-	text = file(fn).read(-1)
-	lines = [ s.strip() for s in text.splitlines() ]
-	confopts = dict([ m.split("=",1) for m in lines if "=" in m and not m.startswith("#") ])
-	confposes = dict([ (m.split("=",1)[0],n) for n,m in enumerate(lines) if "=" in m and not m.startswith("#") ])
-	
-	if guid != None:
-		confopts['guid'] = guid
-	else:
-		if not "guid" in confopts:
-			stderr("Generating GUID for this Agent")
-			confopts['guid'] = uuidgen().stdout.strip()
-	
-	if host == None:
-		try: host = confopts["host"]
-		except KeyError: host = "localhost"
-		stderr("Please enter the host name of the management server that this agent will connect to: (just hit ENTER to go with %s)",host)
-		newhost = raw_input().strip()
-		if newhost: host = newhost
-
-	confopts["host"] = host
-	
-	stderr("Querying %s for zones and pods",host)
-	
-	try:
-	    if zone == None or pod == None:
-			x = list_zonespods(confopts['host'])
-			zoneandpod = prompt_for_hostpods(x)
-			if zoneandpod:
-				confopts["zone"],confopts["pod"] = zoneandpod
-				stderr("You selected zone %s pod %s",confopts["zone"],confopts["pod"])
-			else:
-				stderr("Skipped -- using the previous zone %s pod %s",confopts["zone"],confopts["pod"])
-	    else:
-			confopts["zone"] = zone
-			confopts["pod"] = pod
-	except (urllib2.URLError,urllib2.HTTPError),e:
-		stderr("Query failed: %s.  Defaulting to zone %s pod %s",str(e),confopts["zone"],confopts["pod"])
-
-	for opt,val in confopts.items():
-		line = "=".join([opt,val])
-		if opt not in confposes: lines.append(line)
-		else: lines[confposes[opt]] = line
-	
-	text = "\n".join(lines)
-	file(fn,"w").write(text)
-
-def setup_consoleproxy_config(configfile, host, zone, pod):
-	stderr("Examining Console Proxy configuration")
-	fn = configfile
-	text = file(fn).read(-1)
-	lines = [ s.strip() for s in text.splitlines() ]
-	confopts = dict([ m.split("=",1) for m in lines if "=" in m and not m.startswith("#") ])
-	confposes = dict([ (m.split("=",1)[0],n) for n,m in enumerate(lines) if "=" in m and not m.startswith("#") ])
-
-	if not "guid" in confopts:
-		stderr("Generating GUID for this Console Proxy")
-		confopts['guid'] = uuidgen().stdout.strip()
-
-        if host == None:
-		try: host = confopts["host"]
-		except KeyError: host = "localhost"
-		stderr("Please enter the host name of the management server that this console-proxy will connect to: (just hit ENTER to go with %s)",host)
-		newhost = raw_input().strip()
-		if newhost: host = newhost
-	confopts["host"] = host
-
-	stderr("Querying %s for zones and pods",host)
-	
-	try:
-                if zone == None or pod == None:
-			x = list_zonespods(confopts['host'])
-			zoneandpod = prompt_for_hostpods(x)
-			if zoneandpod:
-				confopts["zone"],confopts["pod"] = zoneandpod
-				stderr("You selected zone %s pod %s",confopts["zone"],confopts["pod"])
-			else:
-				stderr("Skipped -- using the previous zone %s pod %s",confopts["zone"],confopts["pod"])
-		else:
-			confopts["zone"] = zone
-			confopts["pod"] = pod
-	except (urllib2.URLError,urllib2.HTTPError),e:
-		stderr("Query failed: %s.  Defaulting to zone %s pod %s",str(e),confopts["zone"],confopts["pod"])
-
-	for opt,val in confopts.items():
-		line = "=".join([opt,val])
-		if opt not in confposes: lines.append(line)
-		else: lines[confposes[opt]] = line
-	
-	text = "\n".join(lines)
-	file(fn,"w").write(text)
-
-# =========================== DATABASE MIGRATION SUPPORT CODE ===================
-
-# Migrator, Migratee and Evolvers -- this is the generic infrastructure.
-# To actually implement Cloud.com-specific code, search "Cloud.com-specific evolvers and context"
-
-
-class MigratorException(Exception): pass
-class NoMigrationPath(MigratorException): pass
-class NoMigrator(MigratorException): pass
-
-INITIAL_LEVEL = '-'
-
-class Migrator:
-	"""Migrator class.
-	
-	The migrator gets a list of Python objects, and discovers MigrationSteps in it. It then sorts the steps into a chain, based on the attributes from_level and to_level in each one of the steps.
-	
-	When the migrator's run(context) is called, the chain of steps is applied sequentially on the context supplied to run(), in the order of the chain of steps found at discovery time.  See the documentation for the MigrationStep class for information on how that happens.
-	"""
-	
-	def __init__(self,evolver_source):
-		self.discover_evolvers(evolver_source)
-		self.sort_evolvers()
-		
-	def discover_evolvers(self,source):
-		self.evolvers = []
-		for val in source:
-			if hasattr(val,"from_level") and hasattr(val,"to_level") and val.to_level:
-				self.evolvers.append(val)
-	
-	def sort_evolvers(self):
-		new = []
-		while self.evolvers:
-			if not new:
-				try: idx= [ i for i,s in enumerate(self.evolvers)
-					if s.from_level == INITIAL_LEVEL ][0] # initial evolver
-				except IndexError,e:
-					raise IndexError, "no initial evolver (from_level is None) could be found"
-			else:
-				try: idx= [ i for i,s in enumerate(self.evolvers)
-					if new[-1].to_level == s.from_level ][0]
-				except IndexError,e:
-					raise IndexError, "no evolver could be found to evolve from level %s"%new[-1].to_level
-			new.append(self.evolvers.pop(idx))
-		self.evolvers = new
-	
-	def get_evolver_chain(self):
-		return [ (s.from_level, s.to_level, s) for s in self.evolvers ]
-		
-	def get_evolver_by_starting_level(self,level):
-		try: return [ s for s in self.evolvers if s.from_level == level][0]
-		except IndexError: raise NoMigrator, "No evolver knows how to evolve the database from schema level %r"%level
-	
-	def get_evolver_by_ending_level(self,level):
-		try: return [ s for s in self.evolvers if s.to_level == level][0]
-		except IndexError: raise NoMigrator, "No evolver knows how to evolve the database to schema level %r"%level
-	
-	def run(self, context, dryrun = False, starting_level = None, ending_level = None):
-		"""Runs each one of the steps in sequence, passing the migration context to each. At the end of the process, context.commit() is called to save the changes, or context.rollback() is called if dryrun = True.
-		
-		If starting_level is not specified, then the context.get_schema_level() is used to find out at what level the context is at.  Then starting_level is set to that.
-		
-		If ending_level is not specified, then the evolvers will run till the end of the chain."""
-		
-		assert dryrun is False # NOT IMPLEMENTED, prolly gonna implement by asking the context itself to remember its state
-		
-		starting_level = starting_level or context.get_schema_level() or self.evolvers[0].from_level
-		ending_level = ending_level or self.evolvers[-1].to_level
-		
-		evolution_path = self.evolvers
-		idx = evolution_path.index(self.get_evolver_by_starting_level(starting_level))
-		evolution_path = evolution_path[idx:]
-		try: idx = evolution_path.index(self.get_evolver_by_ending_level(ending_level))
-		except ValueError:
-			raise NoEvolutionPath, "No evolution path from schema level %r to schema level %r" % \
-				(starting_level,ending_level)
-		evolution_path = evolution_path[:idx+1]
-		
-		logging.info("Starting migration on %s"%context)
-		
-		for ec in evolution_path:
-			assert ec.from_level == context.get_schema_level()
-			evolver = ec(context=context)
-			logging.info("%s (from level %s to level %s)",
-				evolver,
-				evolver.from_level,
-				evolver.to_level)
-			#try:
-			evolver.run()
-			#except:
-				#context.rollback()
-				#raise
-			context.set_schema_level(evolver.to_level)
-			#context.commit()
-			logging.info("%s is now at level %s",context,context.get_schema_level())
-		
-		#if dryrun: # implement me with backup and restore
-			#logging.info("Rolling back changes on %s",context)
-			#context.rollback()
-		#else:
-			#logging.info("Committing changes on %s",context)
-			#context.commit()
-		
-		logging.info("Migration finished")
-		
-
-class MigrationStep:
-	"""Base MigrationStep class, aka evolver.
-	
-	You develop your own steps, and then pass a list of those steps to the
-	Migrator instance that will run them in order.
-	
-	When the migrator runs, it will take the list of steps you gave him,
-	and, for each step:
-	
-	a) instantiate it, passing the context you gave to the migrator
-	   into the step's __init__().
-	b) run() the method in the migration step.
-	
-	As you can see, the default MigrationStep constructor makes the passed
-	context available as self.context in the methods of your step.
-	
-	Each step has two member vars that determine in which order they
-	are run, and if they need to run:
-	
-	- from_level = the schema level that the database should be at,
-		       before running the evolver
-		       The value None has special meaning here, it
-		       means the first evolver that should be run if the
-		       database does not have a schema level yet.
-	- to_level =   the schema level number that the database will be at
-		       after the evolver has run
-	"""
-	
-	# Implement these attributes in your steps
-	from_level = None
-	to_level = None
-	
-	def __init__(self,context):
-		self.context = context
-		
-	def run(self):
-		raise NotImplementedError
-
-
-class MigrationContext:
-	def __init__(self): pass
-	def commit(self):raise NotImplementedError
-	def rollback(self):raise NotImplementedError
-	def get_schema_level(self):raise NotImplementedError
-	def set_schema_level(self,l):raise NotImplementedError
-
-
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""Cloud.com Python utility library"""
+
+import sys, os, subprocess, errno, re, time, glob
+import urllib2
+import xml.dom.minidom
+import logging
+import socket
+
+# exit() error constants
+E_GENERIC= 1
+E_NOKVM = 2
+E_NODEFROUTE = 3
+E_DHCP = 4
+E_NOPERSISTENTNET = 5
+E_NETRECONFIGFAILED = 6
+E_VIRTRECONFIGFAILED = 7
+E_FWRECONFIGFAILED = 8
+E_AGENTRECONFIGFAILED = 9
+E_AGENTFAILEDTOSTART = 10
+E_NOFQDN = 11
+E_SELINUXENABLED = 12
+try: E_USAGE = os.EX_USAGE
+except AttributeError: E_USAGE = 64
+
+E_NEEDSMANUALINTERVENTION = 13
+E_INTERRUPTED = 14
+E_SETUPFAILED = 15
+E_UNHANDLEDEXCEPTION = 16
+E_MISSINGDEP = 17
+
+Unknown = 0
+Fedora = 1
+CentOS = 2
+Ubuntu = 3
+
+IPV4 = 4
+IPV6 = 6
+
+#=================== DISTRIBUTION DETECTION =================
+
+if os.path.exists("/etc/fedora-release"): distro = Fedora
+elif os.path.exists("/etc/centos-release"): distro = CentOS
+elif os.path.exists("/etc/redhat-release") and not os.path.exists("/etc/fedora-release"): distro = CentOS
+elif os.path.exists("/etc/legal") and "Ubuntu" in file("/etc/legal").read(-1): distro = Ubuntu
+else: distro = Unknown
+
+logFileName=None
+# ==================  LIBRARY UTILITY CODE=============
+def setLogFile(logFile):
+	global logFileName
+	logFileName=logFile
+def read_properties(propfile):
+	if not hasattr(propfile,"read"): propfile = file(propfile)
+	properties = propfile.read().splitlines()
+	properties = [ s.strip() for s in properties ]
+	properties = [ s for s in properties if
+			s and
+			not s.startswith("#") and
+			not s.startswith(";") ]
+	#[ logging.debug("Valid config file line: %s",s) for s in properties ]
+	proppairs = [ s.split("=",1) for s in properties ]
+	return dict(proppairs)
+
+def stderr(msgfmt,*args):
+	"""Print a message to stderr, optionally interpolating the arguments into it"""
+	msgfmt += "\n"
+	if logFileName != None:
+		sys.stderr = open(logFileName, 'a+')
+	if args: sys.stderr.write(msgfmt%args)
+	else: sys.stderr.write(msgfmt)
+
+def exit(errno=E_GENERIC,message=None,*args):
+	"""Exit with an error status code, printing a message to stderr if specified"""
+	if message: stderr(message,*args)
+	sys.exit(errno)
+
+def resolve(host,port):
+	return [ (x[4][0],len(x[4])+2) for x in socket.getaddrinfo(host,port,socket.AF_UNSPEC,socket.SOCK_STREAM, 0, socket.AI_PASSIVE) ]
+	
+def resolves_to_ipv6(host,port):
+	return resolve(host,port)[0][1] == IPV6
+
+###add this to Python 2.4, patching the subprocess module at runtime
+if hasattr(subprocess,"check_call"):
+	from subprocess import CalledProcessError, check_call
+else:
+	class CalledProcessError(Exception):
+		def __init__(self, returncode, cmd):
+			self.returncode = returncode ; self.cmd = cmd
+		def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
+	subprocess.CalledProcessError = CalledProcessError
+	
+	def check_call(*popenargs, **kwargs):
+		retcode = subprocess.call(*popenargs, **kwargs)
+		cmd = kwargs.get("args")
+		if cmd is None: cmd = popenargs[0]
+		if retcode: raise subprocess.CalledProcessError(retcode, cmd)
+		return retcode
+	subprocess.check_call = check_call
+
+# python 2.4 does not have this
+try:
+	any = any
+	all = all
+except NameError:
+	def any(sequence):
+		for i in sequence:
+			if i: return True
+		return False
+	def all(sequence):
+		for i in sequence:
+			if not i: return False
+		return True
+
+class Command:
+	"""This class simulates a shell command"""
+	def __init__(self,name,parent=None):
+		self.__name = name
+		self.__parent = parent
+	def __getattr__(self,name):
+		if name == "_print": name = "print"
+		return Command(name,self)
+	def __call__(self,*args,**kwargs):
+		cmd = self.__get_recursive_name() + list(args)
+		#print "	",cmd
+		kwargs = dict(kwargs)
+		if "stdout" not in kwargs: kwargs["stdout"] = subprocess.PIPE
+		if "stderr" not in kwargs: kwargs["stderr"] = subprocess.PIPE
+		popen = subprocess.Popen(cmd,**kwargs)
+		m = popen.communicate()
+		ret = popen.wait()
+		if ret:
+			e = CalledProcessError(ret,cmd)
+			e.stdout,e.stderr = m
+			raise e
+		class CommandOutput:
+			def __init__(self,stdout,stderr):
+				self.stdout = stdout
+				self.stderr = stderr
+		return CommandOutput(*m)
+	def __lt__(self,other):
+		cmd = self.__get_recursive_name()
+		#print "	",cmd,"<",other
+		popen = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
+		m = popen.communicate(other)
+		ret = popen.wait()
+		if ret:
+			e = CalledProcessError(ret,cmd)
+			e.stdout,e.stderr = m
+			raise e
+		class CommandOutput:
+			def __init__(self,stdout,stderr):
+				self.stdout = stdout
+				self.stderr = stderr
+		return CommandOutput(*m)
+		
+	def __get_recursive_name(self,sep=None):
+		m = self
+		l = []
+		while m is not None:
+			l.append(m.__name)
+			m = m.__parent
+		l.reverse()
+		if sep: return sep.join(l)
+		else: return l
+	def __str__(self):
+		return ''%self.__get_recursive_name(sep=" ")
+		
+	def __repr__(self): return self.__str__()
+
+kvmok = Command("kvm-ok")
+getenforce = Command("/usr/sbin/getenforce")
+ip = Command("ip")
+service = Command("service")
+chkconfig = Command("chkconfig")
+updatercd = Command("update-rc.d")
+ufw = Command("ufw")
+iptables = Command("iptables")
+iptablessave = Command("iptables-save")
+augtool = Command("augtool")
+ifconfig = Command("ifconfig")
+ifdown = Command("ifdown")
+ifup = Command("ifup")
+brctl = Command("brctl")
+uuidgen = Command("uuidgen")
+
+
+def is_service_running(servicename):
+	try:
+		o = service(servicename,"status")
+		if distro is Ubuntu:
+			# status in ubuntu does not signal service status via return code
+			if "start/running" in o.stdout: return True
+			return False
+		else:
+			# retcode 0, service running
+			return True
+	except CalledProcessError,e:
+		# retcode nonzero, service not running
+		return False
+
+
+def stop_service(servicename,force=False):
+	# This function is idempotent.  N number of calls have the same result as N+1 number of calls.
+	if is_service_running(servicename) or force: service(servicename,"stop",stdout=None,stderr=None)
+
+
+def disable_service(servicename):
+	# Stops AND disables the service
+	stop_service(servicename)
+	if distro is Ubuntu:
+		updatercd("-f",servicename,"remove",stdout=None,stderr=None)
+	else:
+		chkconfig("--del",servicename,stdout=None,stderr=None)
+
+
+def start_service(servicename,force=False):
+	# This function is idempotent unless force is True.  N number of calls have the same result as N+1 number of calls.
+	if not is_service_running(servicename) or force: service(servicename,"start",stdout=None,stderr=None)
+
+
+def enable_service(servicename,forcestart=False):
+	# Stops AND disables the service
+	if distro is Ubuntu:
+		updatercd("-f",servicename,"remove",stdout=None,stderr=None)
+		updatercd("-f",servicename,"start","2","3","4","5",".",stdout=None,stderr=None)
+	else:
+		chkconfig("--add",servicename,stdout=None,stderr=None)
+		chkconfig("--level","345",servicename,"on",stdout=None,stderr=None)
+	start_service(servicename,force=forcestart)
+
+
+def replace_line(f,startswith,stanza,always_add=False):
+	lines = [ s.strip() for s in file(f).readlines() ]
+	newlines = []
+	replaced = False
+	for line in lines:
+		if line.startswith(startswith):
+			newlines.append(stanza)
+			replaced = True
+		else: newlines.append(line)
+	if not replaced and always_add: newlines.append(stanza)
+	newlines = [ s + '\n' for s in newlines ]
+	file(f,"w").writelines(newlines)
+
+def replace_or_add_line(f,startswith,stanza):
+	return replace_line(f,startswith,stanza,always_add=True)
+	
+# ==================================== CHECK FUNCTIONS ==========================
+
+# If they return without exception, it's okay.  If they raise a CheckFailed exception, that means a condition
+# (generallly one that needs administrator intervention) was detected.
+
+class CheckFailed(Exception): pass
+
+#check function
+def check_hostname():
+	"""If the hostname is a non-fqdn, fail with CalledProcessError.  Else return 0."""
+	try: check_call(["hostname",'--fqdn'])
+	except CalledProcessError:
+		raise CheckFailed("This machine does not have an FQDN (fully-qualified domain name) for a hostname")
+
+#check function
+def check_kvm():
+	if distro in (Fedora,CentOS):
+		if os.path.exists("/dev/kvm"): return True
+		raise CheckFailed("KVM is not correctly installed on this system, or support for it is not enabled in the BIOS")
+	else:
+		try:
+			kvmok()
+			return True
+		except CalledProcessError:
+			raise CheckFailed("KVM is not correctly installed on this system, or support for it is not enabled in the BIOS")
+		except OSError,e:
+			if e.errno is errno.ENOENT: raise CheckFailed("KVM is not correctly installed on this system, or support for it is not enabled in the BIOS")
+			raise
+		return True
+	raise AssertionError, "check_kvm() should have never reached this part"
+
+def check_cgroups():
+	return glob.glob("/*/cpu.shares")
+
+#check function
+def check_selinux():
+	if distro not in [Fedora,CentOS]: return # no selinux outside of those
+	enforcing = False
+	try:
+		output = getenforce().stdout.strip()
+		if "nforcing" in output:
+			enforcing = True
+		if any ( [ s.startswith("SELINUX=enforcing") for s in file("/etc/selinux/config").readlines() ] ):
+			enforcing = True
+	except (IOError,OSError),e:
+		if e.errno == 2: pass
+		else: raise CheckFailed("An unknown error (%s) took place while checking for SELinux"%str(e))
+	if enforcing:
+		raise CheckFailed("SELinux is set to enforcing, please set it to permissive in /etc/selinux/config, then reboot the machine or type setenforce Permissive, after which you can run this program again.")
+
+
+def preflight_checks(do_check_kvm=True):
+	if distro is Ubuntu:
+		preflight_checks = [
+			(check_hostname,"Checking hostname"),
+		]
+	else:
+		preflight_checks = [
+			(check_hostname,"Checking hostname"),
+			(check_selinux,"Checking if SELinux is disabled"),
+		]
+	#preflight_checks.append( (check_cgroups,"Checking if the control groups /cgroup filesystem is mounted") )
+	if do_check_kvm: preflight_checks.append( (check_kvm,"Checking for KVM") )
+	return preflight_checks
+
+
+# ========================== CONFIGURATION TASKS ================================
+
+# A Task is a function that runs within the context of its run() function that runs the function execute(), which does several things, reporting back to the caller as it goes with the use of yield
+# the done() method ought to return true if the task has run in the past
+# the execute() method must implement the configuration act itself
+# run() wraps the output of execute() within a Starting taskname and a Completed taskname message
+# tasks have a name
+
+class TaskFailed(Exception): pass
+	#def __init__(self,code,msg):
+		#Exception.__init__(self,msg)
+		#self.code = code
+
+class ConfigTask:
+	name = "generic config task"
+	autoMode=False
+	def __init__(self): pass
+	def done(self):
+		"""Returns true if the config task has already been done in the past, false if it hasn't"""
+		return False
+	def execute(self):
+		"""Executes the configuration task.  Must not be run if test() returned true.
+		Must yield strings that describe the steps in the task.
+		Raises TaskFailed if the task failed at some step.
+		"""
+	def run (self):
+		stderr("Starting %s"%self.name)
+		it = self.execute()
+		if not it:
+			pass # not a yielding iterable
+		else:
+			for msg in it: stderr(msg)
+		stderr("Completed %s"%self.name)
+	def setAutoMode(self, autoMode):
+		self.autoMode = autoMode
+	def  isAutoMode(self):
+		return self.autoMode
+
+
+# ============== these are some configuration tasks ==================
+
+class SetupNetworking(ConfigTask):
+	name = "network setup"
+	def __init__(self,brname):
+		ConfigTask.__init__(self)
+		self.brname = brname
+		self.runtime_state_changed = False
+		self.was_nm_service_running = None
+		self.was_net_service_running = None
+		if distro in (Fedora, CentOS):
+			self.nmservice = 'NetworkManager'
+			self.netservice = 'network'
+		else:
+			self.nmservice = 'network-manager'
+			self.netservice = 'networking'
+		
+		
+	def done(self):
+		try:
+			if distro in (Fedora,CentOS):
+				alreadysetup = augtool._print("/files/etc/sysconfig/network-scripts/ifcfg-%s"%self.brname).stdout.strip()
+			else:
+				alreadysetup = augtool.match("/files/etc/network/interfaces/iface",self.brname).stdout.strip()
+			return alreadysetup
+		except OSError,e:
+			if e.errno is 2: raise TaskFailed("augtool has not been properly installed on this system")
+			raise
+
+	def restore_state(self):
+		if not self.runtime_state_changed: return
+		
+		try:
+			o = ifconfig(self.brname)
+			bridge_exists = True
+		except CalledProcessError,e:
+			print e.stdout + e.stderr
+			bridge_exists = False
+			
+		if bridge_exists:
+			ifconfig(self.brname,"0.0.0.0")
+			if hasattr(self,"old_net_device"):
+				ifdown(self.old_net_device)
+				ifup(self.old_net_device)
+			try: ifdown(self.brname)
+			except CalledProcessError: pass
+			try: ifconfig(self.brname,"down")
+			except CalledProcessError: pass
+			try: brctl("delbr",self.brname)
+			except CalledProcessError: pass
+			try: ifdown("--force",self.brname)
+			except CalledProcessError: pass
+		
+		
+		if self.was_net_service_running is None:
+			# we do nothing
+			pass
+		elif self.was_net_service_running == False:
+			stop_service(self.netservice,force=True)
+			time.sleep(1)
+		else:
+			# we altered service configuration
+			stop_service(self.netservice,force=True)
+			time.sleep(1)
+			try: start_service(self.netservice,force=True)
+			except CalledProcessError,e:
+				if e.returncode == 1: pass
+				else: raise
+			time.sleep(1)
+		
+		if self.was_nm_service_running is None:
+			 # we do nothing
+			 pass
+		elif self.was_nm_service_running == False:
+			stop_service(self.nmservice,force=True)
+			time.sleep(1)
+		else:
+			# we altered service configuration
+			stop_service(self.nmservice,force=True)
+			time.sleep(1)
+			start_service(self.nmservice,force=True)
+			time.sleep(1)
+		
+		self.runtime_state_changed = False
+
+	def execute(self):
+		yield "Determining default route"
+		routes = ip.route().stdout.splitlines()
+		defaultroute = [ x for x in routes if x.startswith("default") ]
+		if not defaultroute: raise TaskFailed("Your network configuration does not have a default route")
+		
+		dev = defaultroute[0].split()[4]
+		yield "Default route assigned to device %s"%dev
+		
+		self.old_net_device = dev
+		
+		if distro in (Fedora, CentOS):
+			inconfigfile = "/".join(augtool.match("/files/etc/sysconfig/network-scripts/*/DEVICE",dev).stdout.strip().split("/")[:-1])
+			if not inconfigfile: raise TaskFailed("Device %s has not been set up in /etc/sysconfig/network-scripts"%dev)
+			pathtoconfigfile = inconfigfile[6:]
+
+		if distro in (Fedora, CentOS):
+			automatic = augtool.match("%s/ONBOOT"%inconfigfile,"yes").stdout.strip()
+		else:
+			automatic = augtool.match("/files/etc/network/interfaces/auto/*/",dev).stdout.strip()
+		if not automatic:
+			if distro is Fedora: raise TaskFailed("Device %s has not been set up in %s as automatic on boot"%dev,pathtoconfigfile)
+			else: raise TaskFailed("Device %s has not been set up in /etc/network/interfaces as automatic on boot"%dev)
+			
+		if distro not in (Fedora , CentOS):
+			inconfigfile = augtool.match("/files/etc/network/interfaces/iface",dev).stdout.strip()
+			if not inconfigfile: raise TaskFailed("Device %s has not been set up in /etc/network/interfaces"%dev)
+
+		if distro in (Fedora, CentOS):
+			isstatic = augtool.match(inconfigfile + "/BOOTPROTO","none").stdout.strip()
+			if not isstatic: isstatic = augtool.match(inconfigfile + "/BOOTPROTO","static").stdout.strip()
+		else:
+			isstatic = augtool.match(inconfigfile + "/method","static").stdout.strip()
+		if not isstatic:
+			if distro in (Fedora, CentOS): raise TaskFailed("Device %s has not been set up as a static device in %s"%(dev,pathtoconfigfile))
+			else: raise TaskFailed("Device %s has not been set up as a static device in /etc/network/interfaces"%dev)
+
+		if is_service_running(self.nmservice):
+			self.was_nm_service_running = True
+			yield "Stopping NetworkManager to avoid automatic network reconfiguration"
+			disable_service(self.nmservice)
+		else:
+			self.was_nm_service_running = False
+			
+		if is_service_running(self.netservice):
+			self.was_net_service_running = True
+		else:
+			self.was_net_service_running = False
+			
+		yield "Creating Cloud bridging device and making device %s member of this bridge"%dev
+
+		if distro in (Fedora, CentOS):
+			ifcfgtext = file(pathtoconfigfile).read()
+			newf = "/etc/sysconfig/network-scripts/ifcfg-%s"%self.brname
+			#def restore():
+				#try: os.unlink(newf)
+				#except OSError,e:
+					#if errno == 2: pass
+					#raise
+				#try: file(pathtoconfigfile,"w").write(ifcfgtext)
+				#except OSError,e: raise
+
+			f = file(newf,"w") ; f.write(ifcfgtext) ; f.flush() ; f.close()
+			innewconfigfile = "/files" + newf
+
+			script = """set %s/DEVICE %s
+set %s/NAME %s
+set %s/BRIDGE_PORTS %s
+set %s/TYPE Bridge
+rm %s/HWADDR
+rm %s/UUID
+rm %s/HWADDR
+rm %s/IPADDR
+rm %s/DEFROUTE
+rm %s/NETMASK
+rm %s/GATEWAY
+rm %s/BROADCAST
+rm %s/NETWORK
+set %s/BRIDGE %s
+save"""%(innewconfigfile,self.brname,innewconfigfile,self.brname,innewconfigfile,dev,
+			innewconfigfile,innewconfigfile,innewconfigfile,innewconfigfile,
+			inconfigfile,inconfigfile,inconfigfile,inconfigfile,inconfigfile,inconfigfile,
+			inconfigfile,self.brname)
+			
+			yield "Executing the following reconfiguration script:\n%s"%script
+			
+			try:
+				returned = augtool < script
+				if "Saved 2 file" not in returned.stdout:
+					print returned.stdout + returned.stderr
+					#restore()
+					raise TaskFailed("Network reconfiguration failed.")
+				else:
+					yield "Network reconfiguration complete"
+			except CalledProcessError,e:
+				#restore()
+				print e.stdout + e.stderr
+				raise TaskFailed("Network reconfiguration failed")
+		else: # Not fedora
+			backup = file("/etc/network/interfaces").read(-1)
+			#restore = lambda: file("/etc/network/interfaces","w").write(backup)
+
+			script = """set %s %s
+set %s %s
+set %s/bridge_ports %s
+save"""%(automatic,self.brname,inconfigfile,self.brname,inconfigfile,dev)
+			
+			yield "Executing the following reconfiguration script:\n%s"%script
+			
+			try:
+				returned = augtool < script
+				if "Saved 1 file" not in returned.stdout:
+					#restore()
+					raise TaskFailed("Network reconfiguration failed.")
+				else:
+					yield "Network reconfiguration complete"
+			except CalledProcessError,e:
+				#restore()
+				print e.stdout + e.stderr
+				raise TaskFailed("Network reconfiguration failed")
+		
+		yield "We are going to restart network services now, to make the network changes take effect.  Hit ENTER when you are ready."
+		if self.isAutoMode(): pass
+        	else:
+		    raw_input()
+		
+		# if we reach here, then if something goes wrong we should attempt to revert the runinng state
+		# if not, then no point
+		self.runtime_state_changed = True
+		
+		yield "Enabling and restarting non-NetworkManager networking"
+		if distro is Ubuntu: ifup(self.brname,stdout=None,stderr=None)
+		stop_service(self.netservice)
+		try: enable_service(self.netservice,forcestart=True)
+		except CalledProcessError,e:
+			if e.returncode == 1: pass
+			else: raise
+		
+		yield "Verifying that the bridge is up"
+		try:
+			o = ifconfig(self.brname)
+		except CalledProcessError,e:
+			print e.stdout + e.stderr
+			raise TaskFailed("The bridge could not be set up properly")
+		
+		yield "Networking restart done"
+
+
+class SetupCgConfig(ConfigTask):
+	name = "control groups configuration"
+	
+	def done(self):
+		
+		try:
+			return "group virt" in file("/etc/cgconfig.conf","r").read(-1)
+		except IOError,e:
+			if e.errno is 2: raise TaskFailed("cgconfig has not been properly installed on this system")
+			raise
+		
+	def execute(self):
+		cgconfig = file("/etc/cgconfig.conf","r").read(-1)
+		cgconfig = cgconfig + """
+group virt {
+	cpu {
+		cpu.shares = 9216;
+	}
+}
+"""
+		file("/etc/cgconfig.conf","w").write(cgconfig)
+		
+		stop_service("cgconfig")
+		enable_service("cgconfig",forcestart=True)
+
+
+class SetupCgRules(ConfigTask):
+	name = "control group rules setup"
+	cfgline = "root:/usr/sbin/libvirtd	cpu	virt/"
+	
+	def done(self):
+		try:
+			return self.cfgline in file("/etc/cgrules.conf","r").read(-1)
+		except IOError,e:
+			if e.errno is 2: raise TaskFailed("cgrulesd has not been properly installed on this system")
+			raise
+	
+	def execute(self):
+		cgrules = file("/etc/cgrules.conf","r").read(-1)
+		cgrules = cgrules + "\n" + self.cfgline + "\n"
+		file("/etc/cgrules.conf","w").write(cgrules)
+		
+		stop_service("cgred")
+		enable_service("cgred")
+
+
+class SetupCgroupControllers(ConfigTask):
+	name = "qemu cgroup controllers setup"
+	cfgline = "cgroup_controllers = [ \"cpu\" ]"
+	filename = "/etc/libvirt/qemu.conf"
+	
+	def done(self):
+		try:
+			return self.cfgline in file(self.filename,"r").read(-1)
+		except IOError,e:
+			if e.errno is 2: raise TaskFailed("qemu has not been properly installed on this system")
+			raise
+	
+	def execute(self):
+		libvirtqemu = file(self.filename,"r").read(-1)
+		libvirtqemu = libvirtqemu + "\n" + self.cfgline + "\n"
+		file("/etc/libvirt/qemu.conf","w").write(libvirtqemu)
+
+
+class SetupSecurityDriver(ConfigTask):
+	name = "security driver setup"
+	cfgline = "security_driver = \"none\""
+	filename = "/etc/libvirt/qemu.conf"
+	
+	def done(self):
+		try:
+			return self.cfgline in file(self.filename,"r").read(-1)
+		except IOError,e:
+			if e.errno is 2: raise TaskFailed("qemu has not been properly installed on this system")
+			raise
+	
+	def execute(self):
+		libvirtqemu = file(self.filename,"r").read(-1)
+		libvirtqemu = libvirtqemu + "\n" + self.cfgline + "\n"
+		file("/etc/libvirt/qemu.conf","w").write(libvirtqemu)
+
+
+class SetupLibvirt(ConfigTask):
+	name = "libvirt setup"
+	cfgline = "export CGROUP_DAEMON='cpu:/virt'"
+	def done(self):
+		try:
+			if distro in (Fedora,CentOS): 	 libvirtfile = "/etc/sysconfig/libvirtd"
+			elif distro is Ubuntu:	 libvirtfile = "/etc/default/libvirt-bin"
+			else: raise AssertionError, "We should not reach this"
+			return self.cfgline in file(libvirtfile,"r").read(-1)
+		except IOError,e:
+			if e.errno is 2: raise TaskFailed("libvirt has not been properly installed on this system")
+			raise
+	
+	def execute(self):
+		if distro in (Fedora,CentOS): 	 libvirtfile = "/etc/sysconfig/libvirtd"
+		elif distro is Ubuntu:	 libvirtfile = "/etc/default/libvirt-bin"
+		else: raise AssertionError, "We should not reach this"
+		libvirtbin = file(libvirtfile,"r").read(-1)
+		libvirtbin = libvirtbin + "\n" + self.cfgline + "\n"
+		file(libvirtfile,"w").write(libvirtbin)
+		
+		if distro in (CentOS, Fedora):	svc = "libvirtd"
+		else:					svc = "libvirt-bin"
+		stop_service(svc)
+		enable_service(svc)
+
+class SetupLiveMigration(ConfigTask):
+	name = "live migration setup"
+	stanzas = (
+			"listen_tcp=1",
+			'tcp_port="16509"',
+			'auth_tcp="none"',
+			"listen_tls=0",
+	)
+	
+	def done(self):
+		try:
+			lines = [ s.strip() for s in file("/etc/libvirt/libvirtd.conf").readlines() ]
+			if all( [ stanza in lines for stanza in self.stanzas ] ): return True
+		except IOError,e:
+			if e.errno is 2: raise TaskFailed("libvirt has not been properly installed on this system")
+			raise
+	
+	def execute(self):
+		
+		for stanza in self.stanzas:
+			startswith = stanza.split("=")[0] + '='
+			replace_or_add_line("/etc/libvirt/libvirtd.conf",startswith,stanza)
+
+		if distro is Fedora:
+			replace_or_add_line("/etc/sysconfig/libvirtd","LIBVIRTD_ARGS=","LIBVIRTD_ARGS=-l")
+		
+		elif distro is Ubuntu:
+			if os.path.exists("/etc/init/libvirt-bin.conf"):
+				replace_line("/etc/init/libvirt-bin.conf", "exec /usr/sbin/libvirtd","exec /usr/sbin/libvirtd -d -l")
+			else:
+				replace_or_add_line("/etc/default/libvirt-bin","libvirtd_opts=","libvirtd_opts='-l -d'")
+			
+		else:
+			raise AssertionError("Unsupported distribution")
+		
+		if distro in (CentOS, Fedora):	svc = "libvirtd"
+		else:						svc = "libvirt-bin"
+		stop_service(svc)
+		enable_service(svc)
+
+
+class SetupRequiredServices(ConfigTask):
+	name = "required services setup"
+	
+	def done(self):
+		if distro is Fedora:  nfsrelated = "rpcbind nfslock"
+		elif distro is CentOS: nfsrelated = "portmap nfslock"
+		else: return True
+		return all( [ is_service_running(svc) for svc in nfsrelated.split() ] )
+		
+	def execute(self):
+
+		if distro is Fedora:  nfsrelated = "rpcbind nfslock"
+		elif distro is CentOS: nfsrelated = "portmap nfslock"
+		else: raise AssertionError("Unsupported distribution")
+
+		for svc in nfsrelated.split(): enable_service(svc)
+
+
+class SetupFirewall(ConfigTask):
+	name = "firewall setup"
+	
+	def done(self):
+		
+		if distro in (Fedora, CentOS):
+			if not os.path.exists("/etc/sysconfig/iptables"): return True
+			if ":on" not in chkconfig("--list","iptables").stdout: return True
+		else:
+			if "Status: active" not in ufw.status().stdout: return True
+			if not os.path.exists("/etc/ufw/before.rules"): return True
+		rule = "-p tcp -m tcp --dport 16509 -j ACCEPT"
+		if rule in iptablessave().stdout: return True
+		return False
+	
+	def execute(self):
+		ports = "22 1798 16509".split()
+		if distro in (Fedora , CentOS):
+			for p in ports: iptables("-I","INPUT","1","-p","tcp","--dport",p,'-j','ACCEPT')
+			o = service.iptables.save() ; print o.stdout + o.stderr
+		else:
+			for p in ports: ufw.allow(p)
+
+
+class SetupFirewall2(ConfigTask):
+	# this closes bug 4371
+	name = "additional firewall setup"
+	def __init__(self,brname):
+		ConfigTask.__init__(self)
+		self.brname = brname
+	
+	def done(self):
+		
+		if distro in (Fedora, CentOS):
+			if not os.path.exists("/etc/sysconfig/iptables"): return True
+			if ":on" not in chkconfig("--list","iptables").stdout: return True
+			rule = "FORWARD -i %s -o %s -j ACCEPT"%(self.brname,self.brname)
+			if rule in iptablessave().stdout: return True
+			return False
+		else:
+			if "Status: active" not in ufw.status().stdout: return True
+			if not os.path.exists("/etc/ufw/before.rules"): return True
+			rule = "-A ufw-before-forward -i %s -o %s -j ACCEPT"%(self.brname,self.brname)
+			if rule in file("/etc/ufw/before.rules").read(-1): return True
+			return False
+		
+	def execute(self):
+		
+		yield "Permitting traffic in the bridge interface, migration port and for VNC ports"
+		
+		if distro in (Fedora , CentOS):
+			
+			for rule in (
+				"-I FORWARD -i %s -o %s -j ACCEPT"%(self.brname,self.brname),
+				"-I INPUT 1 -p tcp --dport 5900:6100 -j ACCEPT",
+				"-I INPUT 1 -p tcp --dport 49152:49216 -j ACCEPT",
+				):
+				args = rule.split()
+				o = iptables(*args)
+			service.iptables.save(stdout=None,stderr=None)
+			
+		else:
+			
+			rule = "-A ufw-before-forward -i %s -o %s -j ACCEPT"%(self.brname,self.brname)
+			text = file("/etc/ufw/before.rules").readlines()
+			newtext = []
+			for line in text:
+				if line.startswith("COMMIT"):
+					newtext.append(rule + "\n")
+				newtext.append(line)
+			file("/etc/ufw/before.rules","w").writelines(newtext)
+			ufw.allow.proto.tcp("from","any","to","any","port","5900:6100")
+			ufw.allow.proto.tcp("from","any","to","any","port","49152:49216")
+
+			stop_service("ufw")
+			start_service("ufw")
+
+
+# Tasks according to distribution -- at some point we will split them in separate modules
+
+def config_tasks(brname):
+	if distro is CentOS:
+		config_tasks = (
+			SetupNetworking(brname),
+			SetupLibvirt(),
+			SetupRequiredServices(),
+			SetupFirewall(),
+			SetupFirewall2(brname),
+		)
+	elif distro in (Ubuntu,Fedora):
+		config_tasks = (
+			SetupNetworking(brname),
+			SetupCgConfig(),
+			SetupCgRules(),
+			SetupCgroupControllers(),
+			SetupSecurityDriver(),
+			SetupLibvirt(),
+			SetupLiveMigration(),
+			SetupRequiredServices(),
+			SetupFirewall(),
+			SetupFirewall2(brname),
+		)
+	else:
+		raise AssertionError("Unknown distribution")
+	return config_tasks
+
+
+def backup_etc(targetdir):
+	if not targetdir.endswith("/"): targetdir += "/"
+	check_call( ["mkdir","-p",targetdir] )
+	rsynccall = ["rsync","-ax","--delete"] + ["/etc/",targetdir]
+	check_call( rsynccall )
+def restore_etc(targetdir):
+	if not targetdir.endswith("/"): targetdir += "/"
+	rsynccall = ["rsync","-ax","--delete"] + [targetdir,"/etc/"]
+	check_call( rsynccall )
+def remove_backup(targetdir):
+	check_call( ["rm","-rf",targetdir] )
+
+def list_zonespods(host):
+	text = urllib2.urlopen('http://%s:8096/client/api?command=listPods'%host).read(-1)
+	dom = xml.dom.minidom.parseString(text) 
+	x = [ (zonename,podname)
+		for pod in dom.childNodes[0].childNodes  
+		for podname in [ x.childNodes[0].wholeText for x in pod.childNodes if x.tagName == "name" ] 
+		for zonename in  [ x.childNodes[0].wholeText for x in pod.childNodes if x.tagName == "zonename" ]
+		]
+	return x
+	
+def prompt_for_hostpods(zonespods):
+	"""Ask user to select one from those zonespods
+	Returns (zone,pod) or None if the user made the default selection."""
+	while True:
+		stderr("Type the number of the zone and pod combination this host belongs to (hit ENTER to skip this step)")
+		print "  N) ZONE, POD" 
+		print "================"
+		for n,(z,p) in enumerate(zonespods):
+			print "%3d) %s, %s"%(n,z,p)
+		print "================"
+		zoneandpod = raw_input().strip()
+		
+		if not zoneandpod:
+			# we go with default, do not touch anything, just break
+			return None
+		
+		try:
+			# if parsing fails as an int, just vomit and retry
+			zoneandpod = int(zoneandpod)
+			if zoneandpod >= len(zonespods) or zoneandpod < 0: raise ValueError, "%s out of bounds"%zoneandpod
+		except ValueError,e:
+			stderr(str(e))
+			continue # re-ask
+		
+		# oh yeah, the int represents an valid zone and pod index in the array
+		return zonespods[zoneandpod]
+	
+# this configures the agent
+
+def setup_agent_config(configfile, host, zone, pod, guid):
+	stderr("Examining Agent configuration")
+	fn = configfile
+	text = file(fn).read(-1)
+	lines = [ s.strip() for s in text.splitlines() ]
+	confopts = dict([ m.split("=",1) for m in lines if "=" in m and not m.startswith("#") ])
+	confposes = dict([ (m.split("=",1)[0],n) for n,m in enumerate(lines) if "=" in m and not m.startswith("#") ])
+	
+	if guid != None:
+		confopts['guid'] = guid
+	else:
+		if not "guid" in confopts:
+			stderr("Generating GUID for this Agent")
+			confopts['guid'] = uuidgen().stdout.strip()
+	
+	if host == None:
+		try: host = confopts["host"]
+		except KeyError: host = "localhost"
+		stderr("Please enter the host name of the management server that this agent will connect to: (just hit ENTER to go with %s)",host)
+		newhost = raw_input().strip()
+		if newhost: host = newhost
+
+	confopts["host"] = host
+	
+	stderr("Querying %s for zones and pods",host)
+	
+	try:
+	    if zone == None or pod == None:
+			x = list_zonespods(confopts['host'])
+			zoneandpod = prompt_for_hostpods(x)
+			if zoneandpod:
+				confopts["zone"],confopts["pod"] = zoneandpod
+				stderr("You selected zone %s pod %s",confopts["zone"],confopts["pod"])
+			else:
+				stderr("Skipped -- using the previous zone %s pod %s",confopts["zone"],confopts["pod"])
+	    else:
+			confopts["zone"] = zone
+			confopts["pod"] = pod
+	except (urllib2.URLError,urllib2.HTTPError),e:
+		stderr("Query failed: %s.  Defaulting to zone %s pod %s",str(e),confopts["zone"],confopts["pod"])
+
+	for opt,val in confopts.items():
+		line = "=".join([opt,val])
+		if opt not in confposes: lines.append(line)
+		else: lines[confposes[opt]] = line
+	
+	text = "\n".join(lines)
+	file(fn,"w").write(text)
+
+def setup_consoleproxy_config(configfile, host, zone, pod):
+	stderr("Examining Console Proxy configuration")
+	fn = configfile
+	text = file(fn).read(-1)
+	lines = [ s.strip() for s in text.splitlines() ]
+	confopts = dict([ m.split("=",1) for m in lines if "=" in m and not m.startswith("#") ])
+	confposes = dict([ (m.split("=",1)[0],n) for n,m in enumerate(lines) if "=" in m and not m.startswith("#") ])
+
+	if not "guid" in confopts:
+		stderr("Generating GUID for this Console Proxy")
+		confopts['guid'] = uuidgen().stdout.strip()
+
+        if host == None:
+		try: host = confopts["host"]
+		except KeyError: host = "localhost"
+		stderr("Please enter the host name of the management server that this console-proxy will connect to: (just hit ENTER to go with %s)",host)
+		newhost = raw_input().strip()
+		if newhost: host = newhost
+	confopts["host"] = host
+
+	stderr("Querying %s for zones and pods",host)
+	
+	try:
+                if zone == None or pod == None:
+			x = list_zonespods(confopts['host'])
+			zoneandpod = prompt_for_hostpods(x)
+			if zoneandpod:
+				confopts["zone"],confopts["pod"] = zoneandpod
+				stderr("You selected zone %s pod %s",confopts["zone"],confopts["pod"])
+			else:
+				stderr("Skipped -- using the previous zone %s pod %s",confopts["zone"],confopts["pod"])
+		else:
+			confopts["zone"] = zone
+			confopts["pod"] = pod
+	except (urllib2.URLError,urllib2.HTTPError),e:
+		stderr("Query failed: %s.  Defaulting to zone %s pod %s",str(e),confopts["zone"],confopts["pod"])
+
+	for opt,val in confopts.items():
+		line = "=".join([opt,val])
+		if opt not in confposes: lines.append(line)
+		else: lines[confposes[opt]] = line
+	
+	text = "\n".join(lines)
+	file(fn,"w").write(text)
+
+# =========================== DATABASE MIGRATION SUPPORT CODE ===================
+
+# Migrator, Migratee and Evolvers -- this is the generic infrastructure.
+# To actually implement Cloud.com-specific code, search "Cloud.com-specific evolvers and context"
+
+
+class MigratorException(Exception): pass
+class NoMigrationPath(MigratorException): pass
+class NoMigrator(MigratorException): pass
+
+INITIAL_LEVEL = '-'
+
+class Migrator:
+	"""Migrator class.
+	
+	The migrator gets a list of Python objects, and discovers MigrationSteps in it. It then sorts the steps into a chain, based on the attributes from_level and to_level in each one of the steps.
+	
+	When the migrator's run(context) is called, the chain of steps is applied sequentially on the context supplied to run(), in the order of the chain of steps found at discovery time.  See the documentation for the MigrationStep class for information on how that happens.
+	"""
+	
+	def __init__(self,evolver_source):
+		self.discover_evolvers(evolver_source)
+		self.sort_evolvers()
+		
+	def discover_evolvers(self,source):
+		self.evolvers = []
+		for val in source:
+			if hasattr(val,"from_level") and hasattr(val,"to_level") and val.to_level:
+				self.evolvers.append(val)
+	
+	def sort_evolvers(self):
+		new = []
+		while self.evolvers:
+			if not new:
+				try: idx= [ i for i,s in enumerate(self.evolvers)
+					if s.from_level == INITIAL_LEVEL ][0] # initial evolver
+				except IndexError,e:
+					raise IndexError, "no initial evolver (from_level is None) could be found"
+			else:
+				try: idx= [ i for i,s in enumerate(self.evolvers)
+					if new[-1].to_level == s.from_level ][0]
+				except IndexError,e:
+					raise IndexError, "no evolver could be found to evolve from level %s"%new[-1].to_level
+			new.append(self.evolvers.pop(idx))
+		self.evolvers = new
+	
+	def get_evolver_chain(self):
+		return [ (s.from_level, s.to_level, s) for s in self.evolvers ]
+		
+	def get_evolver_by_starting_level(self,level):
+		try: return [ s for s in self.evolvers if s.from_level == level][0]
+		except IndexError: raise NoMigrator, "No evolver knows how to evolve the database from schema level %r"%level
+	
+	def get_evolver_by_ending_level(self,level):
+		try: return [ s for s in self.evolvers if s.to_level == level][0]
+		except IndexError: raise NoMigrator, "No evolver knows how to evolve the database to schema level %r"%level
+	
+	def run(self, context, dryrun = False, starting_level = None, ending_level = None):
+		"""Runs each one of the steps in sequence, passing the migration context to each. At the end of the process, context.commit() is called to save the changes, or context.rollback() is called if dryrun = True.
+		
+		If starting_level is not specified, then the context.get_schema_level() is used to find out at what level the context is at.  Then starting_level is set to that.
+		
+		If ending_level is not specified, then the evolvers will run till the end of the chain."""
+		
+		assert dryrun is False # NOT IMPLEMENTED, prolly gonna implement by asking the context itself to remember its state
+		
+		starting_level = starting_level or context.get_schema_level() or self.evolvers[0].from_level
+		ending_level = ending_level or self.evolvers[-1].to_level
+		
+		evolution_path = self.evolvers
+		idx = evolution_path.index(self.get_evolver_by_starting_level(starting_level))
+		evolution_path = evolution_path[idx:]
+		try: idx = evolution_path.index(self.get_evolver_by_ending_level(ending_level))
+		except ValueError:
+			raise NoEvolutionPath, "No evolution path from schema level %r to schema level %r" % \
+				(starting_level,ending_level)
+		evolution_path = evolution_path[:idx+1]
+		
+		logging.info("Starting migration on %s"%context)
+		
+		for ec in evolution_path:
+			assert ec.from_level == context.get_schema_level()
+			evolver = ec(context=context)
+			logging.info("%s (from level %s to level %s)",
+				evolver,
+				evolver.from_level,
+				evolver.to_level)
+			#try:
+			evolver.run()
+			#except:
+				#context.rollback()
+				#raise
+			context.set_schema_level(evolver.to_level)
+			#context.commit()
+			logging.info("%s is now at level %s",context,context.get_schema_level())
+		
+		#if dryrun: # implement me with backup and restore
+			#logging.info("Rolling back changes on %s",context)
+			#context.rollback()
+		#else:
+			#logging.info("Committing changes on %s",context)
+			#context.commit()
+		
+		logging.info("Migration finished")
+		
+
+class MigrationStep:
+	"""Base MigrationStep class, aka evolver.
+	
+	You develop your own steps, and then pass a list of those steps to the
+	Migrator instance that will run them in order.
+	
+	When the migrator runs, it will take the list of steps you gave him,
+	and, for each step:
+	
+	a) instantiate it, passing the context you gave to the migrator
+	   into the step's __init__().
+	b) run() the method in the migration step.
+	
+	As you can see, the default MigrationStep constructor makes the passed
+	context available as self.context in the methods of your step.
+	
+	Each step has two member vars that determine in which order they
+	are run, and if they need to run:
+	
+	- from_level = the schema level that the database should be at,
+		       before running the evolver
+		       The value None has special meaning here, it
+		       means the first evolver that should be run if the
+		       database does not have a schema level yet.
+	- to_level =   the schema level number that the database will be at
+		       after the evolver has run
+	"""
+	
+	# Implement these attributes in your steps
+	from_level = None
+	to_level = None
+	
+	def __init__(self,context):
+		self.context = context
+		
+	def run(self):
+		raise NotImplementedError
+
+
+class MigrationContext:
+	def __init__(self): pass
+	def commit(self):raise NotImplementedError
+	def rollback(self):raise NotImplementedError
+	def get_schema_level(self):raise NotImplementedError
+	def set_schema_level(self,l):raise NotImplementedError
+
+
diff --git a/python/wscript_build b/python/wscript_build
index 4b78e04b404..d3a80e70d26 100644
--- a/python/wscript_build
+++ b/python/wscript_build
@@ -1,3 +1,2 @@
-if bld.env.DISTRO not in ['Windows','Mac']:
-	obj = bld(features = 'py',name='pythonmodules')
-	obj.find_sources_in_dirs('lib', exts=['.py'])
+obj = bld(features = 'py',name='pythonmodules')
+obj.find_sources_in_dirs('lib', exts=['.py'])
diff --git a/setup/bindir/cloud-setup-databases.in b/setup/bindir/cloud-setup-databases.in
index 3fe66d9c111..3853bec9cc3 100755
--- a/setup/bindir/cloud-setup-databases.in
+++ b/setup/bindir/cloud-setup-databases.in
@@ -9,6 +9,7 @@ from random import choice
 import string
 from optparse import OptionParser
 import commands
+import MySQLdb
 
 # ---- This snippet of code adds the sources path and the waf configured PYTHONDIR to the Python path ----
 # ---- We do this so cloud_utils can be looked up in the following order:
@@ -128,22 +129,34 @@ def get_creds(parser,options,args):
 	host,port = parse_hostport(hostinfo)
 	return (user,password,host,port)
 
-def run_mysql(text,user,password,host,port,extraargs=None):
-  cmd = ["mysql",
-    "--user=%s"%user,
-    "--host=%s"%host,
-  ]
-  if password: 
-    cmd.append("--password=%s"%password)
-  if password: 
-    cmd.append("--port=%s"%port)
-  if extraargs:
-    cmd.extend(extraargs)
-    
-  p = subprocess.Popen(cmd,stdin=subprocess.PIPE)
-  p.communicate(text)
-  ret = p.wait()
-  if ret != 0: raise CalledProcessError(ret,cmd)
+def run_mysql(text,user,password,host,port,debug=False):
+  kwargs = {}
+  kwargs['host'] = host
+  kwargs['user'] = user
+  if password: kwargs['passwd']   = password
+  if port: kwargs['port']   = port
+
+  conn = MySQLdb.connect(**kwargs)
+  cur = conn.cursor()
+  import re
+  exp = re.compile("DELIMITER (.*)$",re.M)
+  pairs = [";"]+[x.strip() for x in exp.split(text)]
+  delims = []
+  chunks = []
+  while pairs:
+      delims.append( pairs[0] )
+      chunks.append( pairs[1] )
+      pairs = pairs[2:]
+
+  for delim,chunk in zip(delims,chunks):
+      for stmt in chunk.split(delim):
+	stmt = stmt.strip()
+	if not stmt: continue
+	if debug: print stmt
+	cur.execute(stmt)
+  cur.close()
+  conn.commit()
+  conn.close()
 
 def ifaces():
     status,lines = commands.getstatusoutput('LANG=C /sbin/ip address show')
@@ -239,8 +252,8 @@ if options.serversetup and not os.path.isfile(options.serversetup):
 	e("%s is not a valid file"%options.serversetup)
 
 
-dbfilepath = "@SETUPDATADIR@"
-dbppaths = [ os.path.join("@MSCONF@","db.properties") ] # , os.path.join("@USAGESYSCONFDIR@","db.properties") ]
+dbfilepath = r"@SETUPDATADIR@"
+dbppaths = [ os.path.join(r"@MSCONF@","db.properties") ] # , os.path.join("@USAGESYSCONFDIR@","db.properties") ]
 dbppaths = [ x for x in dbppaths if os.path.exists(x) ]
 if not dbppaths:
 	print "No services to set up installed on this system.  Refusing to continue."
@@ -249,28 +262,29 @@ if not dbppaths:
 #run sanity checks
 # checkutc()
 checkdbserverhostname(host)
-checkhostname()
+if sys.platform != "win32": checkhostname()
 try: checkselinux()
 except OSError,e:
 	if e.errno == 2: pass
 	else: raise
-checknetwork()
+if sys.platform != 'win32': checknetwork()
 
 
 #initialize variables
-ipaddr = firstip(ifaces())
+if sys.platform != 'win32': ipaddr = firstip(ifaces())
+else: ipaddr = None
 if not ipaddr: ipaddr='127.0.0.1'
 
 
 if rootuser:
 	print "Testing specified deployment credentials on server %s:%s"%(host,port)
-	try: run_mysql("SELECT * from mysql.user limit 0",rootuser,rootpassword,host,port)
+        try: run_mysql("SELECT * from mysql.user limit 0",rootuser,rootpassword,host,port,debug=options.debug)
 	except CalledProcessError:
 		print "The deployment credentials you specified are not valid.  Refusing to continue."
 		sys.exit(19)
 else:
 	print "Testing specified connection credentials on server %s:%s"%(host,port)
-	try: run_mysql("SELECT * from cloud.user limit 0",user,password,host,port)
+        try: run_mysql("SELECT * from cloud.user limit 0",user,password,host,port,debug=options.debug)
 	except CalledProcessError:
 		print "The connection credentials you specified are not valid.  Refusing to continue."
 		sys.exit(19)
@@ -315,22 +329,27 @@ if rootuser:
 		if not os.path.exists(p): continue
 		text = file(p).read()
 		for t,r in replacements: text = text.replace(t,r)
-		print "Applying file %s to the database on server %s:%s"%(p,host,port)
-		try: run_mysql(text,rootuser,rootpassword,host,port)
+                print "Applying file %s to the database on server %s:%s"%(p,host,port)
+		try: run_mysql(text,rootuser,rootpassword,host,port,debug=options.debug)
 		except CalledProcessError: sys.exit(20)
 		
 	if options.serversetup:
-		systemjars = "@SYSTEMJARS@".split()
-		pipe = subprocess.Popen(["build-classpath"]+systemjars,stdout=subprocess.PIPE)
-		systemcp,throwaway = pipe.communicate()
-		systemcp = systemcp.strip()
-		if pipe.wait(): # this means that build-classpath failed miserably
-			systemcp = "@SYSTEMCLASSPATH@"
-		pcp = os.path.pathsep.join( glob.glob( os.path.join ( "@PREMIUMJAVADIR@" , "*" ) ) )
-		mscp = "@MSCLASSPATH@"
-		depscp = "@DEPSCLASSPATH@"
 		conf = os.path.dirname(dbppaths[0])
-		classpath = os.path.pathsep.join([pcp,systemcp,depscp,mscp,conf])
+		pcp = os.path.pathsep.join( glob.glob( os.path.join ( r"@PREMIUMJAVADIR@" , "*" ) ) )
+		if sys.platform == 'win32':
+			mscp = r"@MSCLASSPATH@"
+			depscp = r"@DEPSCLASSPATH@"
+			classpath = os.path.pathsep.join([pcp,depscp,mscp,conf])
+		else:
+			systemjars = r"@SYSTEMJARS@".split()
+			pipe = subprocess.Popen(["build-classpath"]+systemjars,stdout=subprocess.PIPE)
+			systemcp,throwaway = pipe.communicate()
+			systemcp = systemcp.strip()
+			if pipe.wait(): # this means that build-classpath failed miserably
+				systemcp = r"@SYSTEMCLASSPATH@"
+			mscp = r"@MSCLASSPATH@"
+			depscp = r"@DEPSCLASSPATH@"
+			classpath = os.path.pathsep.join([pcp,systemcp,depscp,mscp,conf])
 		print "Performing unattended automated setup using file %s"%options.serversetup
 		cmd = ["java","-cp",classpath,"com.cloud.test.DatabaseConfig",options.serversetup]
 		if options.debug: print "Running command: %s"%" ".join(cmd)
@@ -343,19 +362,19 @@ if rootuser:
 			p = os.path.join(dbfilepath,"%s.sql"%f)
 			text = file(p).read()
 			print "Applying file %s to the database on server %s:%s"%(p,host,port)
-			try: run_mysql(text,rootuser,rootpassword,host,port)
+                        try: run_mysql(text,rootuser,rootpassword,host,port,debug=options.debug)
 			except CalledProcessError: sys.exit(22)
 
 	for f in ["templates.%s"%virttech,"create-index-fk"]:
 		p = os.path.join(dbfilepath,"%s.sql"%f)
 		text = file(p).read()
 		print "Applying file %s to the database on server %s:%s"%(p,host,port)
-		try: run_mysql(text,rootuser,rootpassword,host,port)
+                try: run_mysql(text,rootuser,rootpassword,host,port,debug=options.debug)
 		except CalledProcessError: sys.exit(22)
 
 	p = os.path.join(dbfilepath,"schema-level.sql")
 	if os.path.isfile(p):
 		text = file(p).read()
 		print "Applying file %s to the database on server %s:%s"%(p,host,port)
-		try: run_mysql(text,rootuser,rootpassword,host,port)
+                try: run_mysql(text,rootuser,rootpassword,host,port,debug=options.debug)
 		except CalledProcessError: sys.exit(22)
diff --git a/setup/db/create-database.sql b/setup/db/create-database.sql
index 3a2ad436170..704568edc60 100644
--- a/setup/db/create-database.sql
+++ b/setup/db/create-database.sql
@@ -24,7 +24,7 @@ BEGIN
   IF foo > 0 THEN 
          DROP USER 'cloud'@'%' ;
   END IF;
-END ;$$
+END $$
 DELIMITER ;
 
 CALL `mysql`.`cloud_drop_user_if_exists`() ;
diff --git a/wscript b/wscript
index 7a86b03aa02..6b5d6bce275 100644
--- a/wscript
+++ b/wscript
@@ -731,64 +731,26 @@ def installdebdeps(context):
 
 @throws_command_errors
 def deploydb(ctx,virttech=None):
-	if not virttech: raise Utils.WafError('use deploydb_xenserver or deploydb_kvm rather than deploydb')
+	if not virttech: raise Utils.WafError('use deploydb_xenserver, deploydb_vmware or deploydb_kvm rather than deploydb')
 	
 	ctx = _getbuildcontext()
-	srcdir = ctx.path.abspath()
-	builddir = ctx.path.abspath(ctx.env)
+	setupdatabases = _join(ctx.env.BINDIR,"cloud-setup-databases")
+	serversetup = _join(ctx.env.SETUPDATADIR,"server-setup.xml")
 	
-	dbhost = ctx.env.DBHOST
-	dbuser = ctx.env.DBUSER
-	dbpw   = ctx.env.DBPW
-	dbdir  = ctx.env.DBDIR
+	if not _exists(setupdatabases): # Needs install!
+		Scripting.install(ctx)
+
+	cmd = [
+		ctx.env.PYTHON,
+		setupdatabases,
+		"cloud@%s"%ctx.env.DBHOST,
+		virttech,
+		"--auto=%s"%serversetup,
+                "--deploy-as=%s:%s"%(ctx.env.DBUSER,ctx.env.DBPW),
+		]
 	
-	if not _exists(_join(builddir,"client","tomcatconf","db.properties")): raise Utils.WafError("Please build at least once to generate the db.properties configuration file")
-
-	cp = []
-	cp += [ _join(builddir,"client","tomcatconf") ]
-	cp += [ _join("test","conf") ]
-	cp += _glob(_join(builddir,"target", "jar", "*.jar"))
-	cp += [ctx.env.CLASSPATH]
-	cp = pathsep.join(cp)
-
-	before = ""
-	for f in ["create-database","create-schema"]:
-		p = _join("setup","db",f+".sql")
-		p = dev_override(p)
-		before = before + file(p).read()
-		Utils.pprint("GREEN","Reading database code from %s"%p)
-
-	cmd = [ctx.env.MYSQL,"--user=%s"%dbuser,"-h",dbhost,"--password=%s"%dbpw]
-	Utils.pprint("GREEN","Deploying database scripts to %s (user %s)"%(dbhost,dbuser))
 	Utils.pprint("BLUE"," ".join(cmd))
-	p = _Popen(cmd,stdin=PIPE,stdout=None,stderr=None)
-	p.communicate(before)
-	retcode = p.wait()
-	if retcode: raise CalledProcessError(retcode,cmd)
-	
-	serversetup = dev_override(_join("setup","db","server-setup.xml"))
-	Utils.pprint("GREEN","Configuring database with com.cloud.test.DatabaseConfig")
-	run_java("com.cloud.test.DatabaseConfig",cp,['-Dlog4j.configuration=log4j-stdout.properties'],[serversetup])
-
-	after = ""
-	for f in ["templates.%s"%virttech,"create-index-fk"]:
-		p = _join("setup","db",f+".sql")
-		p = dev_override(p)
-		after = after + file(p).read()
-		Utils.pprint("GREEN","Reading database code from %s"%p)
-
-	p = _join("setup","db","schema-level.sql")
-	if _exists(p):
-		p = dev_override(p)
-		after = after + file(p).read()
-		Utils.pprint("GREEN","Reading database code from %s"%p)
-
-	cmd = [ctx.env.MYSQL,"--user=%s"%dbuser,"-h",dbhost,"--password=%s"%dbpw]
-	Utils.pprint("GREEN","Deploying post-configuration database scripts to %s (user %s)"%(dbhost,dbuser))
-	Utils.pprint("BLUE"," ".join(cmd))
-	p = _Popen(cmd,stdin=PIPE,stdout=None,stderr=None)
-	p.communicate(after)
-	retcode = p.wait()
+	retcode = Utils.exec_command(cmd,shell=False,stdout=None,stderr=None,log=True)
 	if retcode: raise CalledProcessError(retcode,cmd)
 	
 def deploydb_xenserver(ctx):
diff --git a/wscript_configure b/wscript_configure
index 46f95f5cb02..cd1c4f8b29c 100644
--- a/wscript_configure
+++ b/wscript_configure
@@ -115,12 +115,14 @@ conf.check_tool('tar')
 try: conf.check_tool('mkisofs')
 except Configure.ConfigurationError,e:
 	raise Configure.ConfigurationError, "The program genisoimage (or mkisofs) could not be found.\nOn Linux: ./waf installrpmdeps or ./waf installdebdeps according to your distro's package format.\nOn Windows: Use cygwin to install the mkisofs package, then ensure that the program is in your PATH."
-try: conf.find_program('mysql',mandatory=True)
-except Configure.ConfigurationError,e:
-	raise Configure.ConfigurationError, "The program mysql (or mysql.exe) could not be found.\nOn Linux: ./waf installrpmdeps or ./waf installdebdeps according to your distro's package format.\nOn Windows: Install the MySQL client package and ensure that the mysql.exe program is in your PATH."
 conf.check_tool('java')
 conf.check_tool("python")
 conf.check_python_version((2,4,0))
+conf.check_message_1('Detecting Python MySQL module')
+try: import MySQLdb
+except ImportError,e:
+	raise Configure.ConfigurationError, "The Python MySQLdb module could not be found.\nOn Linux: ./waf installrpmdeps or ./waf installdebdeps according to your distro's package format.\nOn Windows: Install MySQL on your machine, then install the Python MySQLdb module for Python %s.\nThe module for Python 2.6 / win32 is available here: http://soemin.googlecode.com/files/MySQL-python-1.2.3c1.win32-py2.6.exe"%conf.env.PYTHON_VERSION
+conf.check_message_2('MySQLdb','GREEN')
 
 if conf.env.DISTRO not in ["Windows","Mac"]:
 	conf.check_tool('compiler_cc')

From dcbf49d098c418ff64ac4605e6e579ee1b35d0bb Mon Sep 17 00:00:00 2001
From: "Manuel Amador (Rudd-O)" 
Date: Wed, 1 Sep 2010 22:28:49 -0500
Subject: [PATCH 032/145] mysql 5.1 does not have implicit user creation so we
 do need to have the create user statement

---
 setup/bindir/cloud-setup-databases.in | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/setup/bindir/cloud-setup-databases.in b/setup/bindir/cloud-setup-databases.in
index 3853bec9cc3..067ff7b2156 100755
--- a/setup/bindir/cloud-setup-databases.in
+++ b/setup/bindir/cloud-setup-databases.in
@@ -301,7 +301,9 @@ if rootuser:
 
 	replacements = (
 		("CREATE USER cloud identified by 'cloud';",
-			""),	
+			"CREATE USER %s@`localhost` identified by '%s'; CREATE USER %s@`%%` identified by '%s';"%(
+					(user,password,user,password)
+				)),
 		("cloud identified by 'cloud';",
 			"%s identified by '%s';"%(user,password)),
 		("cloud@`localhost` identified by 'cloud'",

From 9b93a19e46f2a5eeaf565f8e5d63f2d335089ff2 Mon Sep 17 00:00:00 2001
From: "Manuel Amador (Rudd-O)" 
Date: Wed, 1 Sep 2010 20:30:06 -0700
Subject: [PATCH 033/145] Specify that MySQL ought to be 5.1

---
 wscript_configure | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/wscript_configure b/wscript_configure
index cd1c4f8b29c..e856b61c844 100644
--- a/wscript_configure
+++ b/wscript_configure
@@ -121,7 +121,7 @@ conf.check_python_version((2,4,0))
 conf.check_message_1('Detecting Python MySQL module')
 try: import MySQLdb
 except ImportError,e:
-	raise Configure.ConfigurationError, "The Python MySQLdb module could not be found.\nOn Linux: ./waf installrpmdeps or ./waf installdebdeps according to your distro's package format.\nOn Windows: Install MySQL on your machine, then install the Python MySQLdb module for Python %s.\nThe module for Python 2.6 / win32 is available here: http://soemin.googlecode.com/files/MySQL-python-1.2.3c1.win32-py2.6.exe"%conf.env.PYTHON_VERSION
+	raise Configure.ConfigurationError, "The Python MySQLdb module could not be found.\nOn Linux: ./waf installrpmdeps or ./waf installdebdeps according to your distro's package format.\nOn Windows: Install MySQL 5.1 on your machine, then install the Python MySQLdb module for Python %s.\nThe module for Python 2.6 / win32 / MySQL 5.1 is available here: http://soemin.googlecode.com/files/MySQL-python-1.2.3c1.win32-py2.6.exe"%conf.env.PYTHON_VERSION
 conf.check_message_2('MySQLdb','GREEN')
 
 if conf.env.DISTRO not in ["Windows","Mac"]:

From da56b9b7673812f363b68cbe2f9bb78e13fffa27 Mon Sep 17 00:00:00 2001
From: "Manuel Amador (Rudd-O)" 
Date: Wed, 1 Sep 2010 20:39:53 -0700
Subject: [PATCH 034/145] when waf is used to run the agent or consoleproxy or
 managementserver, and the stack has not been installed yet, install it
 intelligently

---
 wscript | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/wscript b/wscript
index 6b5d6bce275..f63796fa4d5 100644
--- a/wscript
+++ b/wscript
@@ -786,6 +786,8 @@ def run(args):
 		"-Dcatalina.home=" + conf.env.MSENVIRON,
 		"-Djava.io.tmpdir="+_join(conf.env.MSENVIRON,"temp"), ]
 
+	if not _exists(_join(conf.env.BINDIR,"cloud-setup-databases")): Scripting.install(conf)
+
 	cp = [conf.env.MSCONF]
 	cp += _glob(_join(conf.env.MSENVIRON,'bin',"*.jar"))
 	cp += _glob(_join(conf.env.MSENVIRON,'lib',"*.jar"))
@@ -800,8 +802,6 @@ def run(args):
 	#vendorconfs = _glob(  _join(conf.env.MSCONF,"vendor","*")  )
 	#if vendorconfs: cp = plugins + cp
 
-	#Scripting.install(conf)
-
 	run_java("org.apache.catalina.startup.Bootstrap",cp,options,["start"])
 
 def debug(ctx):
@@ -812,12 +812,14 @@ def debug(ctx):
 def run_agent(args):
 	"""runs the management server"""
 	conf = _getbuildcontext()
+	if not _exists(_join(conf.env.LIBEXECDIR,"agent-runner")): Scripting.install(conf)
 	_check_call("sudo",[_join(conf.env.LIBEXECDIR,"agent-runner")])
 
 @throws_command_errors
 def run_console_proxy(args):
 	"""runs the management server"""
 	conf = _getbuildcontext()
+	if not _exists(_join(conf.env.LIBEXECDIR,"console-proxy-runner")): Scripting.install(conf)
 	_check_call("sudo",[_join(conf.env.LIBEXECDIR,"console-proxy-runner")])
 
 def simulate_agent(args):
@@ -844,7 +846,7 @@ def simulate_agent(args):
 	cp += [conf.env.DEPSCLASSPATH]
 	cp += [conf.env.AGENTSIMULATORCLASSPATH]
 
-	#Scripting.install(conf)
+	if not _exists(_join(conf.env.LIBEXECDIR,"agent-runner")): Scripting.install(conf)
 
 	run_java("com.cloud.agent.AgentSimulator",cp,arguments=args)
 

From 1ce10df39ac31ea21d297e0a98e31705d076afc8 Mon Sep 17 00:00:00 2001
From: anthony 
Date: Wed, 1 Sep 2010 21:01:50 -0700
Subject: [PATCH 035/145] since there is not vhd-util, we need to check if it
 is there

---
 scripts/storage/secondary/createtmplt.sh | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/scripts/storage/secondary/createtmplt.sh b/scripts/storage/secondary/createtmplt.sh
index 9dfd5be5cd1..b210a9649fa 100755
--- a/scripts/storage/secondary/createtmplt.sh
+++ b/scripts/storage/secondary/createtmplt.sh
@@ -161,10 +161,13 @@ rollback_if_needed $tmpltfs $? "failed to uncompress $tmpltimg\n"
 tmpltimg2=$(untar $tmpltimg2)
 rollback_if_needed $tmpltfs $? "tar archives not supported\n"
 
-if [ ${tmpltname%.vhd} = ${tmpltname} ]
+if [ ${tmpltname%.vhd} != ${tmpltname} ]
 then
-  vhd-util check -n ${tmpltimg2} > /dev/null
-  rollback_if_needed $tmpltfs $? "vhd tool check $tmpltimg2 failed\n"
+  if  which  vhd-util 2>/dev/null
+  then 
+    vhd-util check -n ${tmpltimg2} > /dev/null
+    rollback_if_needed $tmpltfs $? "vhd tool check $tmpltimg2 failed\n"
+  fi
 fi
 
 # need the 'G' suffix on volume size

From d16f0b9ddf1bc72b10b87e1abfe2b3714cfd8e1a Mon Sep 17 00:00:00 2001
From: kishan 
Date: Thu, 2 Sep 2010 17:55:55 +0530
Subject: [PATCH 036/145] bug 6036: Added usage exec timeZone and made
 aggregation timezone as GMT

---
 server/src/com/cloud/configuration/Config.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java
index ae4dd16ca23..e2f22c3989d 100644
--- a/server/src/com/cloud/configuration/Config.java
+++ b/server/src/com/cloud/configuration/Config.java
@@ -168,7 +168,7 @@ public enum Config {
     
 	// Premium
 	
-	UsageAggregationTimezone("Premium", ManagementServer.class, String.class, "usage.aggregation.timezone", "GMT", "The timezone to use when aggregating user statistics", null),
+	UsageExecutionTimezone("Premium", ManagementServer.class, String.class, "usage.execution.timezone", null, "The timezone to use for usage job execution time", null),
 	UsageStatsJobAggregationRange("Premium", ManagementServer.class, Integer.class, "usage.stats.job.aggregation.range", "1440", "The range of time for aggregating the user statistics specified in minutes (e.g. 1440 for daily, 60 for hourly.", null),
 	UsageStatsJobExecTime("Premium", ManagementServer.class, String.class, "usage.stats.job.exec.time", "00:15", "The time at which the usage statistics aggregation job will run as an HH24:MM time, e.g. 00:30 to run at 12:30am.", null),
     EnableUsageServer("Premium", ManagementServer.class, Boolean.class, "enable.usage.server", "true", "Flag for enabling usage", null),

From 254381d41ba2ad0dc05e2aa3294ad57fca2c7e59 Mon Sep 17 00:00:00 2001
From: abhishek 
Date: Thu, 2 Sep 2010 10:48:15 -0700
Subject: [PATCH 037/145] bug 6031: Fixing the error msg when the deployment of
 a directly attached vm fails

status 6031: resolved fixed
---
 server/src/com/cloud/vm/UserVmManagerImpl.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java
index 721f73e3eb4..d22db81acbf 100755
--- a/server/src/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/com/cloud/vm/UserVmManagerImpl.java
@@ -2725,6 +2725,8 @@ public class UserVmManagerImpl implements UserVmManager {
 	        	else
 	        	{
 	        		s_logger.debug("failed to create VM instance : " + name);
+	        		throw new InternalErrorException("We could not find a suitable pool for creating this directly attached vm");
+	        		
 	        	}
 	            return null;
 	        }
@@ -2757,7 +2759,7 @@ public class UserVmManagerImpl implements UserVmManager {
             _accountMgr.decrementResourceCount(account.getId(), ResourceType.volume, numVolumes);
 
 	        s_logger.error("Unable to create vm", th);
-	        throw new CloudRuntimeException("Unable to create vm", th);
+	        throw new CloudRuntimeException("Unable to create vm: "+th.getMessage(), th);
 	    }
 	}
     

From eb3516dde88b55ff6e485a1ce638cfb647724137 Mon Sep 17 00:00:00 2001
From: anthony 
Date: Thu, 2 Sep 2010 14:11:50 -0700
Subject: [PATCH 038/145] bug 3453: update template size in template-sync

status 3453: resolved fixed
---
 .../com/cloud/storage/download/DownloadMonitorImpl.java    | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/server/src/com/cloud/storage/download/DownloadMonitorImpl.java b/server/src/com/cloud/storage/download/DownloadMonitorImpl.java
index 31739bfec3e..582e0f58f9e 100644
--- a/server/src/com/cloud/storage/download/DownloadMonitorImpl.java
+++ b/server/src/com/cloud/storage/download/DownloadMonitorImpl.java
@@ -450,12 +450,13 @@ public class DownloadMonitorImpl implements  DownloadMonitor {
                     tmpltHost.setDownloadPercent(100);
                     tmpltHost.setDownloadState(Status.DOWNLOADED);
                     tmpltHost.setInstallPath(templateInfo.get(uniqueName).getInstallPath());
+                    tmpltHost.setSize(templateInfo.get(uniqueName).getSize());
                     tmpltHost.setLastUpdated(new Date());
 					_vmTemplateHostDao.update(tmpltHost.getId(), tmpltHost);
 				} else {
-					VMTemplateHostVO templtHost = new VMTemplateHostVO(sserverId, tmplt.getId(), new Date(), 100, Status.DOWNLOADED, null, null, null, templateInfo.get(uniqueName).getInstallPath(), tmplt.getUrl());
-					templtHost.setSize(templateInfo.get(uniqueName).getSize());
-					_vmTemplateHostDao.persist(templtHost);
+				    tmpltHost = new VMTemplateHostVO(sserverId, tmplt.getId(), new Date(), 100, Status.DOWNLOADED, null, null, null, templateInfo.get(uniqueName).getInstallPath(), tmplt.getUrl());
+					tmpltHost.setSize(templateInfo.get(uniqueName).getSize());
+					_vmTemplateHostDao.persist(tmpltHost);
 				}
 				templateInfo.remove(uniqueName);
 				continue;

From 3a79277f26e4fa5d0ce068453a1b4fbee327dfa1 Mon Sep 17 00:00:00 2001
From: "Manuel Amador (Rudd-O)" 
Date: Thu, 2 Sep 2010 12:14:06 -0700
Subject: [PATCH 039/145] Tomcat tool split out from wscript_configure, to
 reuse in cloud-bridge

---
 tools/waf/tomcat.py | 41 +++++++++++++++++++++++++++++++++++++++++
 wscript             |  5 +----
 wscript_configure   | 24 ++++--------------------
 3 files changed, 46 insertions(+), 24 deletions(-)
 create mode 100644 tools/waf/tomcat.py

diff --git a/tools/waf/tomcat.py b/tools/waf/tomcat.py
new file mode 100644
index 00000000000..e314c32beb0
--- /dev/null
+++ b/tools/waf/tomcat.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+import Options, Utils
+import os
+
+def detect(conf):
+	if not conf.env.DATADIR:
+		conf.fatal("DATADIR not found in the environment.  Did you run conf.check_tool('gnu_dirs') before running check_tool('tomcat')?")
+	conf.check_message_1('Detecting Tomcat')
+	conf.env.TOMCATHOME = ''
+	tomcathome = getattr(Options.options, 'TOMCATHOME', '')
+	if tomcathome:
+		conf.env.TOMCATHOME = tomcathome
+		method = "forced through --with-tomcat"
+	else:
+		if    "TOMCAT_HOME" in conf.environ and conf.environ['TOMCAT_HOME'].strip():
+			conf.env.TOMCATHOME = conf.environ["TOMCAT_HOME"]
+			method = 'got through environment variable %TOMCAT_HOME%'
+		elif  "CATALINA_HOME" in conf.environ and conf.environ['CATALINA_HOME'].strip():
+			conf.env.TOMCATHOME = conf.environ['CATALINA_HOME']
+			method = 'got through environment variable %CATALINA_HOME%'
+		elif os.path.isdir(os.path.join(conf.env.DATADIR,"tomcat6")):
+			conf.env.TOMCATHOME = os.path.join(conf.env.DATADIR,"tomcat6")
+			method = 'detected existence of Tomcat directory under $DATADIR'
+		elif os.path.isdir("/usr/share/tomcat6"):
+			conf.env.TOMCATHOME = "/usr/share/tomcat6"
+			method = 'detected existence of standard Linux system directory'
+	if not conf.env.TOMCATHOME:
+		conf.fatal("Could not detect Tomcat")
+	elif not os.path.isdir(conf.env.TOMCATHOME):
+		conf.fatal("Tomcat cannot be found at %s"%conf.env.TOMCATHOME)
+	else:
+		conf.check_message_2("%s (%s)"%(conf.env.TOMCATHOME,method),"GREEN")
+
+def set_options(opt):
+        inst_dir = opt.get_option_group('--datadir') # get the group that contains bindir
+        if not inst_dir: raise Utils.WafError, "DATADIR not set.  Did you load the gnu_dirs tool options with opt.tool_options('gnu_dirs') before running opt.tool_options('tomcat')?"
+	inst_dir.add_option('--with-tomcat', # add javadir to the group that contains bindir
+		help = 'Path to installed Tomcat 6 environment [Default: ${DATADIR}/tomcat6 (unless %%CATALINA_HOME%% is set)]',
+		default = '',
+		dest = 'TOMCATHOME')
\ No newline at end of file
diff --git a/wscript b/wscript
index f63796fa4d5..2a039423637 100644
--- a/wscript
+++ b/wscript
@@ -404,16 +404,13 @@ def set_options(opt):
 	opt.tool_options('mkisofs',tooldir='tools/waf')
 	if platform.system() not in ['Windows',"Darwin"]: opt.tool_options('compiler_cc')
 	opt.tool_options('python')
+	opt.tool_options('tomcat',tooldir='tools/waf')
 	
         inst_dir = opt.get_option_group('--bindir') # get the group that contains bindir
 	inst_dir.add_option('--javadir', # add javadir to the group that contains bindir
 		help = 'Java class and jar files [Default: ${DATADIR}/java]',
 		default = '',
 		dest = 'JAVADIR')
-	inst_dir.add_option('--with-tomcat', # add javadir to the group that contains bindir
-		help = 'Path to installed Tomcat 6 environment [Default: ${DATADIR}/tomcat6 (unless %%CATALINA_HOME%% is set)]',
-		default = '',
-		dest = 'TOMCATHOME')
         inst_dir = opt.get_option_group('--srcdir') # get the group that contains the srcdir
 	inst_dir.add_option('--with-db-host', # add javadir to the group that contains bindir
 		help = 'Database host to use for waf deploydb [Default: 127.0.0.1]',
diff --git a/wscript_configure b/wscript_configure
index e856b61c844..07eaffceddb 100644
--- a/wscript_configure
+++ b/wscript_configure
@@ -137,6 +137,10 @@ if conf.env.DISTRO in ['Windows']:
 for a in "DBHOST DBUSER DBPW DBDIR".split():
 	conf.env[a] = getattr(Options.options, a, '')
 
+try: conf.check_tool("tomcat")
+except Configure.ConfigurationError,e:
+	conf.fatal("Tomcat directory %r not found.  Either install Tomcat using ./waf installrpmdeps or ./waf installdebdeps, or manually install Tomcat to a directory in your system and set the environment variable TOMCAT_HOME to point to it."%conf.env.TOMCATHOME)
+
 conf.check_message_1('Determining management server user name')
 msuser = getattr(Options.options, 'MSUSER', '')
 if msuser:
@@ -150,26 +154,6 @@ else:
 		conf.env.MSUSER = conf.env.PACKAGE
 		conf.check_message_2("%s (Linux default)"%conf.env.MSUSER,"GREEN")
 
-conf.check_message_1('Detecting Tomcat')
-tomcathome = getattr(Options.options, 'TOMCATHOME', '')
-if tomcathome:
-	conf.env.TOMCATHOME = tomcathome
-	conf.check_message_2("%s (forced through --with-tomcat)"%conf.env.TOMCATHOME,"GREEN")
-else:
-	if    "TOMCAT_HOME" in conf.environ and conf.environ['TOMCAT_HOME'].strip():
-		conf.env.TOMCATHOME = conf.environ["TOMCAT_HOME"]
-		conf.check_message_2("%s (got through environment variable %%TOMCAT_HOME%%)"%conf.env.TOMCATHOME,"GREEN")
-	elif  "CATALINA_HOME" in conf.environ and conf.environ['CATALINA_HOME'].strip():
-		conf.env.TOMCATHOME = conf.environ['CATALINA_HOME']
-		conf.check_message_2("%s (got through environment variable %%CATALINA_HOME%%)"%conf.env.TOMCATHOME,"GREEN")
-	elif _isdir("/usr/share/tomcat6"):
-		conf.env.TOMCATHOME = "/usr/share/tomcat6"
-		conf.check_message_2("%s (detected existence of system directory)"%conf.env.TOMCATHOME,"GREEN")
-	else:
-		conf.env.TOMCATHOME = _join(conf.env.DATADIR,'tomcat6')
-		conf.check_message_2("%s (assumed presence of Tomcat there)"%conf.env.TOMCATHOME,"GREEN")
-if not _exists(conf.env.TOMCATHOME): conf.fatal("Tomcat directory %s not found.  Either install Tomcat using ./waf installrpmdeps or ./waf installdebdeps, or manually install Tomcat to a directory in your system and set the environment variable TOMCAT_HOME to point to it."%conf.env.TOMCATHOME)
-
 conf.env.AGENTPATH = _join(conf.env.PACKAGE,"agent")
 conf.env.CPPATH = _join(conf.env.PACKAGE,"console-proxy")
 conf.env.MSPATH = _join(conf.env.PACKAGE,"management")

From daf569e8446b8614b031e01bfc558cca9bda4f1d Mon Sep 17 00:00:00 2001
From: Jessica Wang 
Date: Thu, 2 Sep 2010 14:14:35 -0700
Subject: [PATCH 040/145] new UI - click left menu of Events will load events
 in middle panel.

---
 ui/new/index.jsp                  | 48 +++++++++++++++----------------
 ui/new/scripts/cloud.core.init.js | 14 +++++----
 2 files changed, 32 insertions(+), 30 deletions(-)

diff --git a/ui/new/index.jsp b/ui/new/index.jsp
index 04f8230d2ae..5b6c9526172 100644
--- a/ui/new/index.jsp
+++ b/ui/new/index.jsp
@@ -294,7 +294,7 @@
                     
-
+
Dashboard
@@ -327,7 +327,7 @@
-
+
@@ -336,13 +336,13 @@ Routers
-
+
Storage
- Storage + System
@@ -358,7 +358,7 @@
-
+
Host
@@ -378,28 +378,28 @@
-
+
storage
Primary Storage
-
+
storage
secondary Storage
-
+
storage
Volumes
-
+
storage
@@ -419,14 +419,14 @@
-
+
Network
IP Addresses
-
+
Network
@@ -446,14 +446,14 @@
-
+
Templates
Template
-
+
Templates
@@ -473,7 +473,7 @@
-
+
Accounts
@@ -493,7 +493,7 @@
-
+
Domain
@@ -513,14 +513,14 @@
-
+
Events
Events
-
+
Events
@@ -540,28 +540,28 @@
-
+
Configuration
Global Settings
-
+
Configuration
Zones
-
+
Configuration
Service Offerings
-
+
Configuration
@@ -622,12 +622,12 @@

- ABC +

- diff --git a/ui/new/scripts/cloud.core.init.js b/ui/new/scripts/cloud.core.init.js index b25fcd2c601..1a9beb66132 100755 --- a/ui/new/scripts/cloud.core.init.js +++ b/ui/new/scripts/cloud.core.init.js @@ -64,12 +64,14 @@ $(document).ready(function() { success: function(json) { $midmenuContainer.empty(); var items = json[jsonResponse1][jsonResponse2]; - for(var i=0; i 0) { + for(var i=0; i Date: Thu, 2 Sep 2010 15:04:21 -0700 Subject: [PATCH 041/145] set size in createtmplt.sh and remove unused properties --- scripts/storage/secondary/createtmplt.sh | 38 ++++-------------------- 1 file changed, 5 insertions(+), 33 deletions(-) diff --git a/scripts/storage/secondary/createtmplt.sh b/scripts/storage/secondary/createtmplt.sh index b210a9649fa..a2d296332df 100755 --- a/scripts/storage/secondary/createtmplt.sh +++ b/scripts/storage/secondary/createtmplt.sh @@ -3,7 +3,7 @@ # createtmplt.sh -- install a template usage() { - printf "Usage: %s: -t -n -f -s -c -d -h [-u]\n" $(basename $0) >&2 + printf "Usage: %s: -t -n -f -c -d -h [-u]\n" $(basename $0) >&2 } @@ -67,7 +67,7 @@ uncompress() { return 1 fi - rm $1 + rm -f $1 printf $tmpfile return 0 @@ -77,16 +77,10 @@ create_from_file() { local tmpltfs=$1 local tmpltimg=$2 local tmpltname=$3 - local volsize=$4 - local cleanup=$5 #copy the file to the disk mv $tmpltimg /$tmpltfs/$tmpltname -# if [ "$cleanup" == "true" ] -# then -# rm -f $tmpltimg -# fi } tflag= @@ -112,7 +106,6 @@ do tmpltimg="$OPTARG" ;; s) sflag=1 - volsize="$OPTARG" ;; c) cflag=1 cksum="$OPTARG" @@ -170,27 +163,9 @@ then fi fi -# need the 'G' suffix on volume size -if [ ${volsize:(-1)} != G ] -then - volsize=${volsize}G -fi +imgsize=$(ls -l $tmpltimg2| awk -F" " '{print $5}') -#determine source file size -- it needs to be less than or equal to volsize -imgsize=$(ls -lh $tmpltimg2| awk -F" " '{print $5}') -if [ ${imgsize:(-1)} == G ] -then - imgsize=${imgsize%G} #strip out the G - imgsize=${imgsize%.*} #...and any decimal part - let imgsize=imgsize+1 # add 1 to compensate for decimal part - volsizetmp=${volsize%G} - if [ $volsizetmp -lt $imgsize ] - then - volsize=${imgsize}G - fi -fi - -create_from_file $tmpltfs $tmpltimg2 $tmpltname $volsize $cleanup +create_from_file $tmpltfs $tmpltimg2 $tmpltname touch /$tmpltfs/template.properties rollback_if_needed $tmpltfs $? "Failed to create template.properties file" @@ -198,13 +173,10 @@ echo -n "" > /$tmpltfs/template.properties today=$(date '+%m_%d_%Y') echo "filename=$tmpltname" > /$tmpltfs/template.properties -echo "snapshot.name=$today" >> /$tmpltfs/template.properties echo "description=$descr" >> /$tmpltfs/template.properties -echo "name=$tmpltname" >> /$tmpltfs/template.properties echo "checksum=$cksum" >> /$tmpltfs/template.properties echo "hvm=$hvm" >> /$tmpltfs/template.properties -echo "volume.size=$volsize" >> /$tmpltfs/template.properties - +echo "size=$imgsize" >> /$tmpltfs/template.properties if [ "$cleanup" == "true" ] then From b3803d563e809564b30cfab08fcd7e2b4839d771 Mon Sep 17 00:00:00 2001 From: abhishek Date: Thu, 2 Sep 2010 16:21:27 -0700 Subject: [PATCH 042/145] bug 5909: Implementing the enhancement for adding attach time for volumes. In the case of general VM creation, the attach time is usually~creation time. In the case of attaching a volume to a vm, the time significantly differs. The value is nulled out during the detaching of volume from the vm status 5909: resolved fixed --- api/src/com/cloud/storage/Volume.java | 6 ++++++ .../agent/api/storage/CreateCommand.java | 2 +- core/src/com/cloud/storage/VolumeVO.java | 15 +++++++++++++ .../com/cloud/storage/dao/VolumeDaoImpl.java | 2 ++ server/src/com/cloud/api/BaseCmd.java | 1 + .../cloud/api/commands/ListVolumesCmd.java | 1 + .../com/cloud/storage/StorageManagerImpl.java | 21 +++++++++++++++++++ setup/db/create-schema.sql | 1 + setup/db/schema-21to22.sql | 1 + 9 files changed, 49 insertions(+), 1 deletion(-) diff --git a/api/src/com/cloud/storage/Volume.java b/api/src/com/cloud/storage/Volume.java index f43f5a5be74..64c868412e3 100755 --- a/api/src/com/cloud/storage/Volume.java +++ b/api/src/com/cloud/storage/Volume.java @@ -17,6 +17,8 @@ */ package com.cloud.storage; +import java.util.Date; + import com.cloud.domain.PartOf; import com.cloud.template.BasedOn; import com.cloud.user.OwnedBy; @@ -86,4 +88,8 @@ public interface Volume extends PartOf, OwnedBy, BasedOn { void setSourceId(Long sourceId); Long getSourceId(); + + Date getAttached(); + + void setAttached(Date attached); } diff --git a/core/src/com/cloud/agent/api/storage/CreateCommand.java b/core/src/com/cloud/agent/api/storage/CreateCommand.java index ab370027c8d..48e53748f1e 100644 --- a/core/src/com/cloud/agent/api/storage/CreateCommand.java +++ b/core/src/com/cloud/agent/api/storage/CreateCommand.java @@ -64,7 +64,7 @@ public class CreateCommand extends Command { this.pool = new StoragePoolTO(pool); this.templateUrl = null; this.size = size; - this.instanceName = vm.getInstanceName(); + //this.instanceName = vm.getInstanceName(); } @Override diff --git a/core/src/com/cloud/storage/VolumeVO.java b/core/src/com/cloud/storage/VolumeVO.java index 48ccc908c99..6e342aaa185 100755 --- a/core/src/com/cloud/storage/VolumeVO.java +++ b/core/src/com/cloud/storage/VolumeVO.java @@ -90,6 +90,10 @@ public class VolumeVO implements Volume { @Column(name="created") Date created; + @Column(name="attached") + @Temporal(value=TemporalType.TIMESTAMP) + Date attached; + @Column(name="data_center_id") long dataCenterId; @@ -535,4 +539,15 @@ public class VolumeVO implements Volume { public Long getSourceId(){ return this.sourceId; } + + @Override + public Date getAttached(){ + return this.attached; + } + + @Override + public void setAttached(Date attached){ + this.attached = attached; + } + } diff --git a/core/src/com/cloud/storage/dao/VolumeDaoImpl.java b/core/src/com/cloud/storage/dao/VolumeDaoImpl.java index 976c6445483..b9040983db2 100755 --- a/core/src/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/core/src/com/cloud/storage/dao/VolumeDaoImpl.java @@ -234,6 +234,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol volume.setInstanceId(vmId); volume.setDeviceId(deviceId); volume.setUpdated(new Date()); + volume.setAttached(new Date()); update(volumeId, volume); } @@ -243,6 +244,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol volume.setInstanceId(null); volume.setDeviceId(null); volume.setUpdated(new Date()); + volume.setAttached(null); update(volumeId, volume); } diff --git a/server/src/com/cloud/api/BaseCmd.java b/server/src/com/cloud/api/BaseCmd.java index 4c630bd734c..90c2397f43a 100644 --- a/server/src/com/cloud/api/BaseCmd.java +++ b/server/src/com/cloud/api/BaseCmd.java @@ -153,6 +153,7 @@ public abstract class BaseCmd { CPU_ALLOCATED("cpuallocated", BaseCmd.TYPE_LONG, "cpuallocated"), CPU_USED("cpuused", BaseCmd.TYPE_LONG, "cpuused"), CREATED("created", BaseCmd.TYPE_DATE, "created"), + ATTACHED("attached", BaseCmd.TYPE_DATE, "attached"), CROSS_ZONES("crossZones", BaseCmd.TYPE_BOOLEAN, "crosszones"), DAILY_MAX("dailymax", BaseCmd.TYPE_INT, "dailyMax"), DATA_DISK_OFFERING_ID("datadiskofferingid", BaseCmd.TYPE_LONG, "dataDiskOfferingId"), diff --git a/server/src/com/cloud/api/commands/ListVolumesCmd.java b/server/src/com/cloud/api/commands/ListVolumesCmd.java index 85d93f39223..435c4bdeeba 100755 --- a/server/src/com/cloud/api/commands/ListVolumesCmd.java +++ b/server/src/com/cloud/api/commands/ListVolumesCmd.java @@ -194,6 +194,7 @@ public class ListVolumesCmd extends BaseCmd{ volumeData.add(new Pair(BaseCmd.Properties.SIZE.getName(), virtualSizeInBytes)); volumeData.add(new Pair(BaseCmd.Properties.CREATED.getName(), getDateString(volume.getCreated()))); + volumeData.add(new Pair(BaseCmd.Properties.ATTACHED.getName(), getDateString(volume.getAttached()))); volumeData.add(new Pair(BaseCmd.Properties.STATE.getName(),volume.getStatus())); Account accountTemp = getManagementServer().findAccountById(volume.getAccountId()); diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index 9449b043602..f6d9aa72de0 100644 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -19,7 +19,11 @@ package com.cloud.storage; import java.net.URI; import java.net.UnknownHostException; +import java.text.DateFormat; +import java.text.ParseException; +import java.text.SimpleDateFormat; import java.util.ArrayList; +import java.util.Calendar; import java.util.Collections; import java.util.Date; import java.util.Enumeration; @@ -29,6 +33,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.TimeZone; import java.util.UUID; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -122,8 +127,12 @@ import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; import com.cloud.user.User; +import com.cloud.user.UserContext; +import com.cloud.user.UserVO; import com.cloud.user.dao.AccountDao; +import com.cloud.user.dao.UserDao; import com.cloud.uservm.UserVm; +import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.Adapters; @@ -188,6 +197,7 @@ public class StorageManagerImpl implements StorageManager { @Inject protected VMTemplateDao _templateDao; @Inject protected VMTemplateHostDao _templateHostDao; @Inject protected ServiceOfferingDao _offeringDao; + @Inject protected UserDao _userDao; protected SearchBuilder HostTemplateStatesSearch; protected SearchBuilder PoolsUsedByVmSearch; @@ -921,6 +931,17 @@ public class StorageManagerImpl implements StorageManager { } for (VolumeVO v : volumes) { + + //when the user vm is created, the volume is attached upon creation + //set the attached datetime + try{ + v.setAttached(new Date()); + _volsDao.update(v.getId(), v); + }catch(Exception e) + { + s_logger.warn("Error updating the attached value for volume "+v.getId()+":"+e); + } + long volumeId = v.getId(); // Create an event long sizeMB = v.getSize() / (1024 * 1024); diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql index e9dd4fecdb7..188b6e345a9 100644 --- a/setup/db/create-schema.sql +++ b/setup/db/create-schema.sql @@ -251,6 +251,7 @@ CREATE TABLE `cloud`.`volumes` ( `recreatable` tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT 'Is this volume recreatable?', `destroyed` tinyint(1) COMMENT 'indicates whether the volume was destroyed by the user or not', `created` datetime COMMENT 'Date Created', + `attached` datetime COMMENT 'Date Attached', `updated` datetime COMMENT 'Date updated for attach/detach', `removed` datetime COMMENT 'Date removed. not null if removed', `status` varchar(32) COMMENT 'Async API volume creation status', diff --git a/setup/db/schema-21to22.sql b/setup/db/schema-21to22.sql index dff1e91ae52..ae3b3db4446 100644 --- a/setup/db/schema-21to22.sql +++ b/setup/db/schema-21to22.sql @@ -9,3 +9,4 @@ ALTER TABLE `cloud`.`resource_count` MODIFY COLUMN `account_id` bigint unsigned; ALTER TABLE `cloud`.`storage_pool` add COLUMN STATUS varchar(32) not null; -- new status column for maintenance mode support for primary storage ALTER TABLE `cloud`.`volumes` ADD COLUMN `source_id` bigint unsigned; -- id for the source ALTER TABLE `cloud`.`volumes` ADD COLUMN `source_type` varchar(32); --source from which the volume is created i.e. snapshot, diskoffering, template, blank +ALTER TABLE `cloud`.`volumes` ADD COLUMN 'attached' datetime; --date and time the volume was attached From 50781aba80f7923e2c1eec0adf6e2b7ad4d8daff Mon Sep 17 00:00:00 2001 From: anthony Date: Thu, 2 Sep 2010 16:29:20 -0700 Subject: [PATCH 043/145] bug 5917: if checkSR failed, just return error, don't create the same SR again status 5917: resolved fixed --- .../xen/resource/CitrixResourceBase.java | 239 ++---------------- .../com/cloud/storage/StorageManagerImpl.java | 1 + 2 files changed, 19 insertions(+), 221 deletions(-) diff --git a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index d96cc18dc58..22591ae59fd 100644 --- a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -4454,9 +4454,8 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR SR.Record srr = sr.getRecord(conn); Set pbds = sr.getPBDs(conn); if (pbds.size() == 0) { - String msg = "There is no PBDs for this SR: " + _host.uuid; + String msg = "There is no PBDs for this SR: " + srr.nameLabel + " on host:" + _host.uuid; s_logger.warn(msg); - removeSR(sr); return false; } Set hosts = null; @@ -4510,15 +4509,11 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR protected Answer execute(ModifyStoragePoolCommand cmd) { StoragePoolVO pool = cmd.getPool(); + StoragePoolTO poolTO = new StoragePoolTO(pool); try { Connection conn = getConnection(); - SR sr = getStorageRepository(conn, pool); - if (!checkSR(sr)) { - String msg = "ModifyStoragePoolCommand checkSR failed! host:" + _host.uuid + " pool: " + pool.getName() + pool.getHostAddress() + pool.getPath(); - s_logger.warn(msg); - return new Answer(cmd, false, msg); - } + SR sr = getStorageRepository(conn, poolTO); long capacity = sr.getPhysicalSize(conn); long available = capacity - sr.getPhysicalUtilisation(conn); if (capacity == -1) { @@ -4543,14 +4538,10 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR protected Answer execute(DeleteStoragePoolCommand cmd) { StoragePoolVO pool = cmd.getPool(); + StoragePoolTO poolTO = new StoragePoolTO(pool); try { Connection conn = getConnection(); - SR sr = getStorageRepository(conn, pool); - if (!checkSR(sr)) { - String msg = "DeleteStoragePoolCommand checkSR failed! host:" + _host.uuid + " pool: " + pool.getName() + pool.getHostAddress() + pool.getPath(); - s_logger.warn(msg); - return new Answer(cmd, false, msg); - } + SR sr = getStorageRepository(conn, poolTO); sr.setNameLabel(conn, pool.getUuid()); sr.setNameDescription(conn, pool.getName()); @@ -4960,119 +4951,10 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR s_logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } - } - protected SR getIscsiSR(Connection conn, StoragePoolVO pool) { - - synchronized (pool.getUuid().intern()) { - Map deviceConfig = new HashMap(); - try { - String target = pool.getHostAddress().trim(); - String path = pool.getPath().trim(); - if (path.endsWith("/")) { - path = path.substring(0, path.length() - 1); - } - - String tmp[] = path.split("/"); - if (tmp.length != 3) { - String msg = "Wrong iscsi path " + pool.getPath() + " it should be /targetIQN/LUN"; - s_logger.warn(msg); - throw new CloudRuntimeException(msg); - } - String targetiqn = tmp[1].trim(); - String lunid = tmp[2].trim(); - String scsiid = ""; - - Set srs = SR.getByNameLabel(conn, pool.getUuid()); - for (SR sr : srs) { - if (!SRType.LVMOISCSI.equals(sr.getType(conn))) - continue; - - Set pbds = sr.getPBDs(conn); - if (pbds.isEmpty()) - continue; - - PBD pbd = pbds.iterator().next(); - - Map dc = pbd.getDeviceConfig(conn); - - if (dc == null) - continue; - - if (dc.get("target") == null) - continue; - - if (dc.get("targetIQN") == null) - continue; - - if (dc.get("lunid") == null) - continue; - - if (target.equals(dc.get("target")) && targetiqn.equals(dc.get("targetIQN")) && lunid.equals(dc.get("lunid"))) { - return sr; - } - - } - deviceConfig.put("target", target); - deviceConfig.put("targetIQN", targetiqn); - - Host host = Host.getByUuid(conn, _host.uuid); - SR sr = null; - try { - sr = SR.create(conn, host, deviceConfig, new Long(0), pool.getUuid(), pool.getName(), SRType.LVMOISCSI.toString(), "user", true, new HashMap()); - } catch (XenAPIException e) { - String errmsg = e.toString(); - if (errmsg.contains("SR_BACKEND_FAILURE_107")) { - String lun[] = errmsg.split(""); - boolean found = false; - for (int i = 1; i < lun.length; i++) { - int blunindex = lun[i].indexOf("") + 7; - int elunindex = lun[i].indexOf(""); - String ilun = lun[i].substring(blunindex, elunindex); - ilun = ilun.trim(); - if (ilun.equals(lunid)) { - int bscsiindex = lun[i].indexOf("") + 8; - int escsiindex = lun[i].indexOf(""); - scsiid = lun[i].substring(bscsiindex, escsiindex); - scsiid = scsiid.trim(); - found = true; - break; - } - } - if (!found) { - String msg = "can not find LUN " + lunid + " in " + errmsg; - s_logger.warn(msg); - throw new CloudRuntimeException(msg); - } - } else { - String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.toString(); - s_logger.warn(msg, e); - throw new CloudRuntimeException(msg, e); - } - } - deviceConfig.put("SCSIid", scsiid); - sr = SR.create(conn, host, deviceConfig, new Long(0), pool.getUuid(), pool.getName(), SRType.LVMOISCSI.toString(), "user", true, new HashMap()); - if( !checkSR(sr) ) { - throw new Exception("no attached PBD"); - } - sr.scan(conn); - return sr; - - } catch (XenAPIException e) { - String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.toString(); - s_logger.warn(msg, e); - throw new CloudRuntimeException(msg, e); - } catch (Exception e) { - String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.getMessage(); - s_logger.warn(msg, e); - throw new CloudRuntimeException(msg, e); - } - } - } - - protected SR getIscsiSR(Connection conn, StoragePoolTO pool) { - + protected SR getIscsiSR(StoragePoolTO pool) { + Connection conn = getConnection(); synchronized (pool.getUuid().intern()) { Map deviceConfig = new HashMap(); try { @@ -5121,6 +5003,7 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR if (checkSR(sr)) { return sr; } + throw new CloudRuntimeException("SR check failed for storage pool: " + pool.getUuid() + "on host:" + _host.uuid); } } @@ -5180,13 +5063,12 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR } } - protected SR getNfsSR(StoragePoolVO pool) { + protected SR getNfsSR(StoragePoolTO pool) { Connection conn = getConnection(); Map deviceConfig = new HashMap(); try { - - String server = pool.getHostAddress(); + String server = pool.getHost(); String serverpath = pool.getPath(); serverpath = serverpath.replace("//", "/"); Set srs = SR.getAll(conn); @@ -5215,59 +5097,7 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR if (checkSR(sr)) { return sr; } - } - - } - - deviceConfig.put("server", server); - deviceConfig.put("serverpath", serverpath); - Host host = Host.getByUuid(conn, _host.uuid); - SR sr = SR.create(conn, host, deviceConfig, new Long(0), pool.getUuid(), pool.getName(), SRType.NFS.toString(), "user", true, new HashMap()); - sr.scan(conn); - return sr; - - } catch (XenAPIException e) { - String msg = "Unable to create NFS SR " + deviceConfig + " due to " + e.toString(); - s_logger.warn(msg, e); - throw new CloudRuntimeException(msg, e); - } catch (Exception e) { - String msg = "Unable to create NFS SR " + deviceConfig + " due to " + e.getMessage(); - s_logger.warn(msg); - throw new CloudRuntimeException(msg, e); - } - } - - protected SR getNfsSR(Connection conn, StoragePoolTO pool) { - Map deviceConfig = new HashMap(); - - String server = pool.getHost(); - String serverpath = pool.getPath(); - serverpath = serverpath.replace("//", "/"); - try { - Set srs = SR.getAll(conn); - for (SR sr : srs) { - if (!SRType.NFS.equals(sr.getType(conn))) - continue; - - Set pbds = sr.getPBDs(conn); - if (pbds.isEmpty()) - continue; - - PBD pbd = pbds.iterator().next(); - - Map dc = pbd.getDeviceConfig(conn); - - if (dc == null) - continue; - - if (dc.get("server") == null) - continue; - - if (dc.get("serverpath") == null) - continue; - - if (server.equals(dc.get("server")) && serverpath.equals(dc.get("serverpath"))) { - return sr; + throw new CloudRuntimeException("SR check failed for storage pool: " + pool.getUuid() + "on host:" + _host.uuid); } } @@ -5354,6 +5184,7 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR public CopyVolumeAnswer execute(final CopyVolumeCommand cmd) { String volumeUUID = cmd.getVolumePath(); StoragePoolVO pool = cmd.getPool(); + StoragePoolTO poolTO = new StoragePoolTO(pool); String secondaryStorageURL = cmd.getSecondaryStorageURL(); URI uri = null; @@ -5406,7 +5237,7 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR } // Copy the volume to the primary storage pool - primaryStoragePool = getStorageRepository(conn, pool); + primaryStoragePool = getStorageRepository(conn, poolTO); destVolume = cloudVDIcopy(srcVolume, primaryStoragePool); } @@ -6280,40 +6111,6 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR throw new CloudRuntimeException("Unable to get SR " + pool.getUuid() + " due to " + e.getMessage(), e); } - if (srs.size() > 1) { - throw new CloudRuntimeException("More than one storage repository was found for pool with uuid: " + pool.getUuid()); - } - - if (srs.size() == 1) { - SR sr = srs.iterator().next(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("SR retrieved for " + pool.getId() + " is mapped to " + sr.toString()); - } - - if (checkSR(sr)) { - return sr; - } - } - - if (pool.getType() == StoragePoolType.NetworkFilesystem) - return getNfsSR(conn, pool); - else if (pool.getType() == StoragePoolType.IscsiLUN) - return getIscsiSR(conn, pool); - else - throw new CloudRuntimeException("The pool type: " + pool.getType().name() + " is not supported."); - - } - - protected SR getStorageRepository(Connection conn, StoragePoolVO pool) { - Set srs; - try { - srs = SR.getByNameLabel(conn, pool.getUuid()); - } catch (XenAPIException e) { - throw new CloudRuntimeException("Unable to get SR " + pool.getUuid() + " due to " + e.toString(), e); - } catch (Exception e) { - throw new CloudRuntimeException("Unable to get SR " + pool.getUuid() + " due to " + e.getMessage(), e); - } - if (srs.size() > 1) { throw new CloudRuntimeException("More than one storage repository was found for pool with uuid: " + pool.getUuid()); } else if (srs.size() == 1) { @@ -6325,15 +6122,15 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR if (checkSR(sr)) { return sr; } - throw new CloudRuntimeException("Check this SR failed"); + throw new CloudRuntimeException("SR check failed for storage pool: " + pool.getUuid() + "on host:" + _host.uuid); } else { - if (pool.getPoolType() == StoragePoolType.NetworkFilesystem) + if (pool.getType() == StoragePoolType.NetworkFilesystem) return getNfsSR(pool); - else if (pool.getPoolType() == StoragePoolType.IscsiLUN) - return getIscsiSR(conn, pool); + else if (pool.getType() == StoragePoolType.IscsiLUN) + return getIscsiSR(pool); else - throw new CloudRuntimeException("The pool type: " + pool.getPoolType().name() + " is not supported."); + throw new CloudRuntimeException("The pool type: " + pool.getType().name() + " is not supported."); } } diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index 9449b043602..490c4e00818 100644 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -1369,6 +1369,7 @@ public class StorageManagerImpl implements StorageManager { boolean success = addPoolToHost(h.getId(), pool); if (success) { poolHosts.add(h); + break; } } From 79684987a4ecad6ee3f7ecd3bc7ddeb4c9da66fd Mon Sep 17 00:00:00 2001 From: abhishek Date: Thu, 2 Sep 2010 16:48:52 -0700 Subject: [PATCH 044/145] some more code cleanup --- server/src/com/cloud/api/commands/ListVMsCmd.java | 1 - server/src/com/cloud/vm/UserVmManagerImpl.java | 11 +++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/server/src/com/cloud/api/commands/ListVMsCmd.java b/server/src/com/cloud/api/commands/ListVMsCmd.java index 5989d982630..61ff9c46d96 100644 --- a/server/src/com/cloud/api/commands/ListVMsCmd.java +++ b/server/src/com/cloud/api/commands/ListVMsCmd.java @@ -34,7 +34,6 @@ import com.cloud.host.HostVO; import com.cloud.server.Criteria; import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.GuestOSCategoryVO; -import com.cloud.storage.GuestOSVO; import com.cloud.storage.VMTemplateVO; import com.cloud.user.Account; import com.cloud.uservm.UserVm; diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index d22db81acbf..66a7e8f335b 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -82,8 +82,8 @@ import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.configuration.dao.ResourceLimitDao; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; -import com.cloud.dc.Vlan.VlanType; import com.cloud.dc.VlanVO; +import com.cloud.dc.Vlan.VlanType; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.VlanDao; @@ -121,7 +121,6 @@ import com.cloud.network.dao.SecurityGroupVMMapDao; import com.cloud.network.security.NetworkGroupManager; import com.cloud.network.security.NetworkGroupVO; import com.cloud.offering.NetworkOffering; -import com.cloud.offering.NetworkOffering.GuestIpType; import com.cloud.offering.ServiceOffering; import com.cloud.offerings.NetworkOfferingVO; import com.cloud.service.ServiceOfferingVO; @@ -129,18 +128,18 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.GuestOSVO; import com.cloud.storage.Snapshot; -import com.cloud.storage.Snapshot.SnapshotType; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; -import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; -import com.cloud.storage.Volume.VolumeType; import com.cloud.storage.VolumeVO; +import com.cloud.storage.Snapshot.SnapshotType; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.Volume.VolumeType; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.DiskTemplateDao; import com.cloud.storage.dao.GuestOSCategoryDao; From 16e795d502604579697545c11a97b9b243d37335 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Tue, 31 Aug 2010 16:40:44 -0700 Subject: [PATCH 045/145] almost working --- tools/systemvm/debian/buildsystemvm.sh | 168 +++++++- tools/systemvm/debian/config.dat | 398 ++++++++++++++++++ tools/systemvm/debian/config/etc/init.d/cloud | 127 ++++++ .../config/etc/init.d/cloud-early-config | 329 +++++++++++++++ .../config/etc/init.d/cloud-passwd-srvr | 13 + .../debian/config/etc/init.d/postinit | 110 +++++ .../config/etc/iptables/iptables-consoleproxy | 20 + .../config/etc/iptables/iptables-router | 24 ++ .../config/etc/iptables/iptables-secstorage | 20 + .../systemvm/debian/config/etc/iptables/rules | 24 ++ tools/systemvm/debian/config/etc/sysctl.conf | 33 ++ .../debian/config/root/.ssh/authorized_keys | 1 + .../debian/config/root/clearUsageRules.sh | 22 + .../systemvm/debian/config/root/edithosts.sh | 50 +++ tools/systemvm/debian/config/root/firewall.sh | 204 +++++++++ .../debian/config/root/loadbalancer.sh | 167 ++++++++ .../systemvm/debian/config/root/reconfigLB.sh | 23 + 17 files changed, 1722 insertions(+), 11 deletions(-) create mode 100644 tools/systemvm/debian/config.dat create mode 100755 tools/systemvm/debian/config/etc/init.d/cloud create mode 100755 tools/systemvm/debian/config/etc/init.d/cloud-early-config create mode 100755 tools/systemvm/debian/config/etc/init.d/cloud-passwd-srvr create mode 100755 tools/systemvm/debian/config/etc/init.d/postinit create mode 100644 tools/systemvm/debian/config/etc/iptables/iptables-consoleproxy create mode 100644 tools/systemvm/debian/config/etc/iptables/iptables-router create mode 100644 tools/systemvm/debian/config/etc/iptables/iptables-secstorage create mode 100644 tools/systemvm/debian/config/etc/iptables/rules create mode 100644 tools/systemvm/debian/config/etc/sysctl.conf create mode 100644 tools/systemvm/debian/config/root/.ssh/authorized_keys create mode 100755 tools/systemvm/debian/config/root/clearUsageRules.sh create mode 100755 tools/systemvm/debian/config/root/edithosts.sh create mode 100755 tools/systemvm/debian/config/root/firewall.sh create mode 100755 tools/systemvm/debian/config/root/loadbalancer.sh create mode 100755 tools/systemvm/debian/config/root/reconfigLB.sh diff --git a/tools/systemvm/debian/buildsystemvm.sh b/tools/systemvm/debian/buildsystemvm.sh index 409883d0214..870b8f2b931 100755 --- a/tools/systemvm/debian/buildsystemvm.sh +++ b/tools/systemvm/debian/buildsystemvm.sh @@ -1,7 +1,9 @@ #!/bin/bash +set -x + IMAGENAME=systemvm -LOCATION=/var/lib/images/systemvm2 +LOCATION=/var/lib/images/systemvm3 PASSWORD=password APT_PROXY= HOSTNAME=systemvm @@ -11,16 +13,19 @@ MINIMIZE=true baseimage() { mkdir -p $LOCATION + #dd if=/dev/zero of=$IMAGELOC bs=1M count=$SIZE dd if=/dev/zero of=$IMAGELOC bs=1M seek=$((SIZE - 1)) count=1 loopdev=$(losetup -f) losetup $loopdev $IMAGELOC parted $loopdev -s 'mklabel msdos' parted $loopdev -s 'mkpart primary ext3 512B 2097151000B' + sleep 2 losetup -d $loopdev loopdev=$(losetup --show -o 512 -f $IMAGELOC ) mkfs.ext3 -L ROOT $loopdev mkdir -p $MOUNTPOINT tune2fs -c 100 -i 0 $loopdev + sleep 2 losetup -d $loopdev mount -o loop,offset=512 $IMAGELOC $MOUNTPOINT @@ -109,7 +114,6 @@ auto lo iface lo inet loopback # The primary network interface -allow-hotplug eth0 iface eth0 inet dhcp EOF @@ -193,15 +197,136 @@ EOF chmod a+x usr/local/sbin/power.sh } +fixiptables() { +cat > etc/init.d/iptables-persistent << EOF +#!/bin/sh +### BEGIN INIT INFO +# Provides: iptables +# Required-Start: mountkernfs $local_fs +# Required-Stop: $local_fs +# Should-Start: cloud-early-config +# Default-Start: S +# Default-Stop: +# Short-Description: Set up iptables rules +### END INIT INFO + +PATH="/sbin:/bin:/usr/sbin:/usr/bin" + +# Include config file for iptables-persistent +. /etc/iptables/iptables.conf + +case "\$1" in +start) + if [ -e /var/run/iptables ]; then + echo "iptables is already started!" + exit 1 + else + touch /var/run/iptables + fi + + if [ \$ENABLE_ROUTING -ne 0 ]; then + # Enable Routing + echo 1 > /proc/sys/net/ipv4/ip_forward + fi + + # Load Modules + modprobe -a \$MODULES + + # Load saved rules + if [ -f /etc/iptables/rules ]; then + iptables-restore /etc/iptables/rules + fi + + # Restore Default Policies + iptables -P INPUT ACCEPT + iptables -P FORWARD ACCEPT + iptables -P OUTPUT ACCEPT + + # Flush rules on default tables + iptables -F + iptables -t nat -F + iptables -t mangle -F + + # Unload previously loaded modules + modprobe -r \$MODULES + + # Disable Routing if enabled + if [ \$ENABLE_ROUTING -ne 0 ]; then + # Disable Routing + echo 0 > /proc/sys/net/ipv4/ip_forward + fi + + ;; +restart|force-reload) + \$0 stop + \$0 start + ;; +status) + echo "Filter Rules:" + echo "--------------" + iptables -L -v + echo "" + echo "NAT Rules:" + echo "-------------" + iptables -t nat -L -v + echo "" + echo "Mangle Rules:" + echo "----------------" + iptables -t mangle -L -v + ;; +*) + echo "Usage: \$0 {start|stop|force-stop|restart|force-reload|status}" >&2 + exit 1 + ;; +esac + +exit 0 +EOF + chmod a+x etc/init.d/iptables-persistent + + + touch etc/iptables/iptables.conf + cat > etc/iptables/iptables.conf << EOF +# A basic config file for the /etc/init.d/iptable-persistent script +# + +# Should new manually added rules from command line be saved on reboot? Assign to a value different that 0 if you want this enabled. +SAVE_NEW_RULES=0 + +# Modules to load: +MODULES="nf_nat_ftp nf_conntrack_ftp" + +# Enable Routing? +ENABLE_ROUTING=1 +EOF + chmod a+x etc/iptables/iptables.conf + +} + packages() { DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=critical DEBCONF_DB_OVERRIDE=’File{/root/config.dat}’ export DEBIAN_FRONTEND DEBIAN_PRIORITY DEBCONF_DB_OVERRIDE - chroot . apt-get --no-install-recommends -q -y --force-yes install rsyslog chkconfig insserv net-tools ifupdown vim-tiny netbase iptables openssh-server grub e2fsprogs dhcp3-client dnsmasq tcpdump socat wget apache2 python2.5 bzip2 sed gawk diff grep gzip less tar telnet xl2tpd traceroute openswan psmisc + chroot . apt-get --no-install-recommends -q -y --force-yes install rsyslog chkconfig insserv net-tools ifupdown vim-tiny netbase iptables openssh-server grub e2fsprogs dhcp3-client dnsmasq tcpdump socat wget apache2 python bzip2 sed gawk diff grep gzip less tar telnet xl2tpd traceroute openswan psmisc inetutils-ping arping httping dnsutils - chroot . apt-get --no-install-recommends -q -y --force-yes -t backports install haproxy nfs-common + chroot . apt-get --no-install-recommends -q -y --force-yes install haproxy nfs-common echo "***** getting additional modules *********" chroot . apt-get --no-install-recommends -q -y --force-yes install iproute acpid iptables-persistent @@ -218,8 +343,26 @@ password() { chroot . echo "root:$PASSWORD" | chroot . chpasswd } +services() { + mkdir -p ./var/www/html + mkdir -p ./opt/cloud/bin + mkdir -p ./var/cache/cloud + mkdir -p ./usr/local/cloud + mkdir -p ./root/.ssh + + /bin/cp -r ${scriptdir}/config/* ./ + chroot . chkconfig xl2tpd off + chroot . chkconfig --add cloud-early-config + chroot . chkconfig cloud-early-config on + chroot . chkconfig --add cloud-passwd-srvr + chroot . chkconfig cloud-passwd-srvr off + chroot . chkconfig --add cloud + chroot . chkconfig cloud off +} + cleanup() { rm -f usr/sbin/policy-rc.d + rm -f root/config.dat rm -f etc/apt/apt.conf.d/01proxy if [ "$MINIMIZE" == "true" ] @@ -229,6 +372,9 @@ cleanup() { rm -rf usr/share/locale/[a-d]* rm -rf usr/share/locale/[f-z]* rm -rf usr/share/doc/* + size=$(df | grep $MOUNTPOINT | awk '{print $4}') + dd if=/dev/zero of=$MOUNTPOINT/zeros.img bs=1M count=$((((size-200000)) / 1000)) + rm -f $MOUNTPOINT/zeros.img fi } @@ -278,20 +424,20 @@ echo "*************CONFIGURING ACPID********************" fixacpid echo "*************DONE CONFIGURING ACPID********************" -#cp etc/inittab etc/inittab.hvm -#cp $scriptdir/inittab.xen etc/inittab.xen -#cp $scriptdir/inittab.xen etc/inittab -#cp $scriptdir/fstab.xen etc/fstab.xen -#cp $scriptdir/fstab.xen etc/fstab -#cp $scriptdir/fstab etc/fstab - echo "*************INSTALLING PACKAGES********************" packages echo "*************DONE INSTALLING PACKAGES********************" +echo "*************CONFIGURING IPTABLES********************" +fixiptables +echo "*************DONE CONFIGURING IPTABLES********************" + echo "*************CONFIGURING PASSWORD********************" password +echo "*************CONFIGURING SERVICES********************" +services + echo "*************CLEANING UP********************" cleanup diff --git a/tools/systemvm/debian/config.dat b/tools/systemvm/debian/config.dat new file mode 100644 index 00000000000..b16638f742e --- /dev/null +++ b/tools/systemvm/debian/config.dat @@ -0,0 +1,398 @@ +Name: adduser/homedir-permission +Template: adduser/homedir-permission +Value: true +Owners: adduser + +Name: ca-certificates/enable_crts +Template: ca-certificates/enable_crts +Value: brasil.gov.br/brasil.gov.br.crt, cacert.org/cacert.org.crt, cacert.org/class3.crt, cacert.org/root.crt, debconf.org/ca.crt, gouv.fr/cert_igca_dsa.crt, gouv.fr/cert_igca_rsa.crt, mozilla/ABAecom_=sub.__Am._Bankers_Assn.=_Root_CA.crt, mozilla/AddTrust_External_Root.crt, mozilla/AddTrust_Low-Value_Services_Root.crt, mozilla/AddTrust_Public_Services_Root.crt, mozilla/AddTrust_Qualified_Certificates_Root.crt, mozilla/America_Online_Root_Certification_Authority_1.crt, mozilla/America_Online_Root_Certification_Authority_2.crt, mozilla/AOL_Time_Warner_Root_Certification_Authority_1.crt, mozilla/AOL_Time_Warner_Root_Certification_Authority_2.crt, mozilla/Baltimore_CyberTrust_Root.crt, mozilla/beTRUSTed_Root_CA-Baltimore_Implementation.crt, mozilla/beTRUSTed_Root_CA.crt, mozilla/beTRUSTed_Root_CA_-_Entrust_Implementation.crt, mozilla/beTRUSTed_Root_CA_-_RSA_Implementation.crt, mozilla/Camerfirma_Chambers_of_Commerce_Root.crt, mozilla/Camerfirma_Global_Chambersign_Root.crt, mozilla/Certplus_Class_2_Primary_CA.crt, mozilla/Certum_Root_CA.crt, mozilla/Comodo_AAA_Services_root.crt, mozilla/COMODO_Certification_Authority.crt, mozilla/Comodo_Secure_Services_root.crt, mozilla/Comodo_Trusted_Services_root.crt, mozilla/DigiCert_Assured_ID_Root_CA.crt, mozilla/DigiCert_Global_Root_CA.crt, mozilla/DigiCert_High_Assurance_EV_Root_CA.crt, mozilla/Digital_Signature_Trust_Co._Global_CA_1.crt, mozilla/Digital_Signature_Trust_Co._Global_CA_2.crt, mozilla/Digital_Signature_Trust_Co._Global_CA_3.crt, mozilla/Digital_Signature_Trust_Co._Global_CA_4.crt, mozilla/DST_ACES_CA_X6.crt, mozilla/DST_Root_CA_X3.crt, mozilla/Entrust.net_Global_Secure_Personal_CA.crt, mozilla/Entrust.net_Global_Secure_Server_CA.crt, mozilla/Entrust.net_Premium_2048_Secure_Server_CA.crt, mozilla/Entrust.net_Secure_Personal_CA.crt, mozilla/Entrust.net_Secure_Server_CA.crt, mozilla/Entrust_Root_Certification_Authority.crt, mozilla/Equifax_Secure_CA.crt, mozilla/Equifax_Secure_eBusiness_CA_1.crt, mozilla/Equifax_Secure_eBusiness_CA_2.crt, mozilla/Equifax_Secure_Global_eBusiness_CA.crt, mozilla/Firmaprofesional_Root_CA.crt, mozilla/GeoTrust_Global_CA_2.crt, mozilla/GeoTrust_Global_CA.crt, mozilla/GeoTrust_Primary_Certification_Authority.crt, mozilla/GeoTrust_Universal_CA_2.crt, mozilla/GeoTrust_Universal_CA.crt, mozilla/GlobalSign_Root_CA.crt, mozilla/GlobalSign_Root_CA_-_R2.crt, mozilla/Go_Daddy_Class_2_CA.crt, mozilla/GTE_CyberTrust_Global_Root.crt, mozilla/GTE_CyberTrust_Root_CA.crt, mozilla/IPS_Chained_CAs_root.crt, mozilla/IPS_CLASE1_root.crt, mozilla/IPS_CLASE3_root.crt, mozilla/IPS_CLASEA1_root.crt, mozilla/IPS_CLASEA3_root.crt, mozilla/IPS_Servidores_root.crt, mozilla/IPS_Timestamping_root.crt, mozilla/NetLock_Business_=Class_B=_Root.crt, mozilla/NetLock_Express_=Class_C=_Root.crt, mozilla/NetLock_Notary_=Class_A=_Root.crt, mozilla/NetLock_Qualified_=Class_QA=_Root.crt, mozilla/QuoVadis_Root_CA_2.crt, mozilla/QuoVadis_Root_CA_3.crt, mozilla/QuoVadis_Root_CA.crt, mozilla/RSA_Root_Certificate_1.crt, mozilla/RSA_Security_1024_v3.crt, mozilla/RSA_Security_2048_v3.crt, mozilla/Secure_Global_CA.crt, mozilla/SecureTrust_CA.crt, mozilla/Security_Communication_Root_CA.crt, mozilla/Sonera_Class_1_Root_CA.crt, mozilla/Sonera_Class_2_Root_CA.crt, mozilla/Staat_der_Nederlanden_Root_CA.crt, mozilla/Starfield_Class_2_CA.crt, mozilla/StartCom_Certification_Authority.crt, mozilla/StartCom_Ltd..crt, mozilla/Swisscom_Root_CA_1.crt, mozilla/SwissSign_Gold_CA_-_G2.crt, mozilla/SwissSign_Platinum_CA_-_G2.crt, mozilla/SwissSign_Silver_CA_-_G2.crt, mozilla/Taiwan_GRCA.crt, mozilla/TC_TrustCenter__Germany__Class_2_CA.crt, mozilla/TC_TrustCenter__Germany__Class_3_CA.crt, mozilla/TDC_Internet_Root_CA.crt, mozilla/TDC_OCES_Root_CA.crt, mozilla/Thawte_Personal_Basic_CA.crt, mozilla/Thawte_Personal_Freemail_CA.crt, mozilla/Thawte_Personal_Premium_CA.crt, mozilla/Thawte_Premium_Server_CA.crt, mozilla/thawte_Primary_Root_CA.crt, mozilla/Thawte_Server_CA.crt, mozilla/Thawte_Time_Stamping_CA.crt, mozilla/TURKTRUST_Certificate_Services_Provider_Root_1.crt, mozilla/TURKTRUST_Certificate_Services_Provider_Root_2.crt, mozilla/UTN_DATACorp_SGC_Root_CA.crt, mozilla/UTN_USERFirst_Email_Root_CA.crt, mozilla/UTN_USERFirst_Hardware_Root_CA.crt, mozilla/UTN-USER_First-Network_Applications.crt, mozilla/UTN_USERFirst_Object_Root_CA.crt, mozilla/ValiCert_Class_1_VA.crt, mozilla/ValiCert_Class_2_VA.crt, mozilla/Verisign_Class_1_Public_Primary_Certification_Authority.crt, mozilla/Verisign_Class_1_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_1_Public_Primary_Certification_Authority_-_G3.crt, mozilla/Verisign_Class_2_Public_Primary_Certification_Authority.crt, mozilla/Verisign_Class_2_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_2_Public_Primary_Certification_Authority_-_G3.crt, mozilla/Verisign_Class_3_Public_Primary_Certification_Authority.crt, mozilla/Verisign_Class_3_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_3_Public_Primary_Certification_Authority_-_G3.crt, mozilla/VeriSign_Class_3_Public_Primary_Certification_Authority_-_G5.crt, mozilla/Verisign_Class_4_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_4_Public_Primary_Certification_Authority_-_G3.crt, mozilla/Verisign_RSA_Secure_Server_CA.crt, mozilla/Verisign_Time_Stamping_Authority_CA.crt, mozilla/Visa_eCommerce_Root.crt, mozilla/Visa_International_Global_Root_2.crt, mozilla/Wells_Fargo_Root_CA.crt, mozilla/XRamp_Global_CA_Root.crt, quovadis.bm/QuoVadis_Root_Certification_Authority.crt, signet.pl/signet_ca1_pem.crt, signet.pl/signet_ca2_pem.crt, signet.pl/signet_ca3_pem.crt, signet.pl/signet_ocspklasa2_pem.crt, signet.pl/signet_ocspklasa3_pem.crt, signet.pl/signet_pca2_pem.crt, signet.pl/signet_pca3_pem.crt, signet.pl/signet_rootca_pem.crt, signet.pl/signet_tsa1_pem.crt, spi-inc.org/spi-ca-2003.crt, spi-inc.org/spi-cacert-2008.crt, telesec.de/deutsche-telekom-root-ca-2.crt +Owners: ca-certificates +Variables: + enable_crts = brasil.gov.br/brasil.gov.br.crt, cacert.org/cacert.org.crt, cacert.org/class3.crt, cacert.org/root.crt, debconf.org/ca.crt, gouv.fr/cert_igca_dsa.crt, gouv.fr/cert_igca_rsa.crt, mozilla/ABAecom_=sub.__Am._Bankers_Assn.=_Root_CA.crt, mozilla/AddTrust_External_Root.crt, mozilla/AddTrust_Low-Value_Services_Root.crt, mozilla/AddTrust_Public_Services_Root.crt, mozilla/AddTrust_Qualified_Certificates_Root.crt, mozilla/America_Online_Root_Certification_Authority_1.crt, mozilla/America_Online_Root_Certification_Authority_2.crt, mozilla/AOL_Time_Warner_Root_Certification_Authority_1.crt, mozilla/AOL_Time_Warner_Root_Certification_Authority_2.crt, mozilla/Baltimore_CyberTrust_Root.crt, mozilla/beTRUSTed_Root_CA-Baltimore_Implementation.crt, mozilla/beTRUSTed_Root_CA.crt, mozilla/beTRUSTed_Root_CA_-_Entrust_Implementation.crt, mozilla/beTRUSTed_Root_CA_-_RSA_Implementation.crt, mozilla/Camerfirma_Chambers_of_Commerce_Root.crt, mozilla/Camerfirma_Global_Chambersign_Root.crt, mozilla/Certplus_Class_2_Primary_CA.crt, mozilla/Certum_Root_CA.crt, mozilla/Comodo_AAA_Services_root.crt, mozilla/COMODO_Certification_Authority.crt, mozilla/Comodo_Secure_Services_root.crt, mozilla/Comodo_Trusted_Services_root.crt, mozilla/DigiCert_Assured_ID_Root_CA.crt, mozilla/DigiCert_Global_Root_CA.crt, mozilla/DigiCert_High_Assurance_EV_Root_CA.crt, mozilla/Digital_Signature_Trust_Co._Global_CA_1.crt, mozilla/Digital_Signature_Trust_Co._Global_CA_2.crt, mozilla/Digital_Signature_Trust_Co._Global_CA_3.crt, mozilla/Digital_Signature_Trust_Co._Global_CA_4.crt, mozilla/DST_ACES_CA_X6.crt, mozilla/DST_Root_CA_X3.crt, mozilla/Entrust.net_Global_Secure_Personal_CA.crt, mozilla/Entrust.net_Global_Secure_Server_CA.crt, mozilla/Entrust.net_Premium_2048_Secure_Server_CA.crt, mozilla/Entrust.net_Secure_Personal_CA.crt, mozilla/Entrust.net_Secure_Server_CA.crt, mozilla/Entrust_Root_Certification_Authority.crt, mozilla/Equifax_Secure_CA.crt, mozilla/Equifax_Secure_eBusiness_CA_1.crt, mozilla/Equifax_Secure_eBusiness_CA_2.crt, mozilla/Equifax_Secure_Global_eBusiness_CA.crt, mozilla/Firmaprofesional_Root_CA.crt, mozilla/GeoTrust_Global_CA_2.crt, mozilla/GeoTrust_Global_CA.crt, mozilla/GeoTrust_Primary_Certification_Authority.crt, mozilla/GeoTrust_Universal_CA_2.crt, mozilla/GeoTrust_Universal_CA.crt, mozilla/GlobalSign_Root_CA.crt, mozilla/GlobalSign_Root_CA_-_R2.crt, mozilla/Go_Daddy_Class_2_CA.crt, mozilla/GTE_CyberTrust_Global_Root.crt, mozilla/GTE_CyberTrust_Root_CA.crt, mozilla/IPS_Chained_CAs_root.crt, mozilla/IPS_CLASE1_root.crt, mozilla/IPS_CLASE3_root.crt, mozilla/IPS_CLASEA1_root.crt, mozilla/IPS_CLASEA3_root.crt, mozilla/IPS_Servidores_root.crt, mozilla/IPS_Timestamping_root.crt, mozilla/NetLock_Business_=Class_B=_Root.crt, mozilla/NetLock_Express_=Class_C=_Root.crt, mozilla/NetLock_Notary_=Class_A=_Root.crt, mozilla/NetLock_Qualified_=Class_QA=_Root.crt, mozilla/QuoVadis_Root_CA_2.crt, mozilla/QuoVadis_Root_CA_3.crt, mozilla/QuoVadis_Root_CA.crt, mozilla/RSA_Root_Certificate_1.crt, mozilla/RSA_Security_1024_v3.crt, mozilla/RSA_Security_2048_v3.crt, mozilla/Secure_Global_CA.crt, mozilla/SecureTrust_CA.crt, mozilla/Security_Communication_Root_CA.crt, mozilla/Sonera_Class_1_Root_CA.crt, mozilla/Sonera_Class_2_Root_CA.crt, mozilla/Staat_der_Nederlanden_Root_CA.crt, mozilla/Starfield_Class_2_CA.crt, mozilla/StartCom_Certification_Authority.crt, mozilla/StartCom_Ltd..crt, mozilla/Swisscom_Root_CA_1.crt, mozilla/SwissSign_Gold_CA_-_G2.crt, mozilla/SwissSign_Platinum_CA_-_G2.crt, mozilla/SwissSign_Silver_CA_-_G2.crt, mozilla/Taiwan_GRCA.crt, mozilla/TC_TrustCenter__Germany__Class_2_CA.crt, mozilla/TC_TrustCenter__Germany__Class_3_CA.crt, mozilla/TDC_Internet_Root_CA.crt, mozilla/TDC_OCES_Root_CA.crt, mozilla/Thawte_Personal_Basic_CA.crt, mozilla/Thawte_Personal_Freemail_CA.crt, mozilla/Thawte_Personal_Premium_CA.crt, mozilla/Thawte_Premium_Server_CA.crt, mozilla/thawte_Primary_Root_CA.crt, mozilla/Thawte_Server_CA.crt, mozilla/Thawte_Time_Stamping_CA.crt, mozilla/TURKTRUST_Certificate_Services_Provider_Root_1.crt, mozilla/TURKTRUST_Certificate_Services_Provider_Root_2.crt, mozilla/UTN_DATACorp_SGC_Root_CA.crt, mozilla/UTN_USERFirst_Email_Root_CA.crt, mozilla/UTN_USERFirst_Hardware_Root_CA.crt, mozilla/UTN-USER_First-Network_Applications.crt, mozilla/UTN_USERFirst_Object_Root_CA.crt, mozilla/ValiCert_Class_1_VA.crt, mozilla/ValiCert_Class_2_VA.crt, mozilla/Verisign_Class_1_Public_Primary_Certification_Authority.crt, mozilla/Verisign_Class_1_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_1_Public_Primary_Certification_Authority_-_G3.crt, mozilla/Verisign_Class_2_Public_Primary_Certification_Authority.crt, mozilla/Verisign_Class_2_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_2_Public_Primary_Certification_Authority_-_G3.crt, mozilla/Verisign_Class_3_Public_Primary_Certification_Authority.crt, mozilla/Verisign_Class_3_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_3_Public_Primary_Certification_Authority_-_G3.crt, mozilla/VeriSign_Class_3_Public_Primary_Certification_Authority_-_G5.crt, mozilla/Verisign_Class_4_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_4_Public_Primary_Certification_Authority_-_G3.crt, mozilla/Verisign_RSA_Secure_Server_CA.crt, mozilla/Verisign_Time_Stamping_Authority_CA.crt, mozilla/Visa_eCommerce_Root.crt, mozilla/Visa_International_Global_Root_2.crt, mozilla/Wells_Fargo_Root_CA.crt, mozilla/XRamp_Global_CA_Root.crt, quovadis.bm/QuoVadis_Root_Certification_Authority.crt, signet.pl/signet_ca1_pem.crt, signet.pl/signet_ca2_pem.crt, signet.pl/signet_ca3_pem.crt, signet.pl/signet_ocspklasa2_pem.crt, signet.pl/signet_ocspklasa3_pem.crt, signet.pl/signet_pca2_pem.crt, signet.pl/signet_pca3_pem.crt, signet.pl/signet_rootca_pem.crt, signet.pl/signet_tsa1_pem.crt, spi-inc.org/spi-ca-2003.crt, spi-inc.org/spi-cacert-2008.crt, telesec.de/deutsche-telekom-root-ca-2.crt + +Name: ca-certificates/new_crts +Template: ca-certificates/new_crts +Owners: ca-certificates +Variables: + new_crts = + +Name: ca-certificates/trust_new_crts +Template: ca-certificates/trust_new_crts +Value: yes +Owners: ca-certificates + +Name: debconf-apt-progress/info +Template: debconf-apt-progress/info +Owners: debconf + +Name: debconf-apt-progress/media-change +Template: debconf-apt-progress/media-change +Owners: debconf + +Name: debconf-apt-progress/preparing +Template: debconf-apt-progress/preparing +Owners: debconf + +Name: debconf-apt-progress/title +Template: debconf-apt-progress/title +Owners: debconf + +Name: debconf/frontend +Template: debconf/frontend +Value: noninteractive +Owners: debconf + +Name: debconf/priority +Template: debconf/priority +Value: high +Owners: debconf + +Name: dhcp3-client/dhclient-needs-restarting +Template: dhcp3-client/dhclient-needs-restarting +Owners: dhcp3-client + +Name: dhcp3-client/dhclient-script_moved +Template: dhcp3-client/dhclient-script_moved +Owners: dhcp3-client + +Name: glibc/restart-failed +Template: glibc/restart-failed +Owners: libc6 + +Name: glibc/restart-services +Template: glibc/restart-services +Owners: libc6 + +Name: glibc/upgrade +Template: glibc/upgrade +Owners: libc6 + +Name: libpam-modules/disable-screensaver +Template: libpam-modules/disable-screensaver +Owners: libpam-modules + +Name: libpam0g/restart-failed +Template: libpam0g/restart-failed +Owners: libpam0g + +Name: libpam0g/restart-services +Template: libpam0g/restart-services +Owners: libpam0g + +Name: libpam0g/xdm-needs-restart +Template: libpam0g/xdm-needs-restart +Owners: libpam0g + +Name: libssl0.9.8/restart-failed +Template: libssl0.9.8/restart-failed +Owners: libssl0.9.8 + +Name: libssl0.9.8/restart-services +Template: libssl0.9.8/restart-services +Owners: libssl0.9.8 + +Name: linux-base/disk-id-convert-auto +Template: linux-base/disk-id-convert-auto +Owners: linux-base + +Name: linux-base/disk-id-convert-plan +Template: linux-base/disk-id-convert-plan +Owners: linux-base + +Name: linux-base/disk-id-convert-plan-no-relabel +Template: linux-base/disk-id-convert-plan-no-relabel +Owners: linux-base + +Name: linux-base/disk-id-manual +Template: linux-base/disk-id-manual +Owners: linux-base + +Name: linux-base/disk-id-manual-boot-loader +Template: linux-base/disk-id-manual-boot-loader +Owners: linux-base + +Name: linux-image-2.6.32-bpo.5-686/postinst/bootloader-error-2.6.32-bpo.5-686 +Template: linux-image-2.6.32-bpo.5-686/postinst/bootloader-error-2.6.32-bpo.5-686 +Owners: linux-image-2.6.32-bpo.5-686 + +Name: linux-image-2.6.32-bpo.5-686/postinst/bootloader-test-error-2.6.32-bpo.5-686 +Template: linux-image-2.6.32-bpo.5-686/postinst/bootloader-test-error-2.6.32-bpo.5-686 +Owners: linux-image-2.6.32-bpo.5-686 + +Name: linux-image-2.6.32-bpo.5-686/postinst/depmod-error-initrd-2.6.32-bpo.5-686 +Template: linux-image-2.6.32-bpo.5-686/postinst/depmod-error-initrd-2.6.32-bpo.5-686 +Owners: linux-image-2.6.32-bpo.5-686 + +Name: linux-image-2.6.32-bpo.5-686/postinst/missing-firmware-2.6.32-bpo.5-686 +Template: linux-image-2.6.32-bpo.5-686/postinst/missing-firmware-2.6.32-bpo.5-686 +Owners: linux-image-2.6.32-bpo.5-686 + +Name: linux-image-2.6.32-bpo.5-686/prerm/removing-running-kernel-2.6.32-bpo.5-686 +Template: linux-image-2.6.32-bpo.5-686/prerm/removing-running-kernel-2.6.32-bpo.5-686 +Owners: linux-image-2.6.32-bpo.5-686 + +Name: linux-image-2.6.32-bpo.5-686/prerm/would-invalidate-boot-loader-2.6.32-bpo.5-686 +Template: linux-image-2.6.32-bpo.5-686/prerm/would-invalidate-boot-loader-2.6.32-bpo.5-686 +Owners: linux-image-2.6.32-bpo.5-686 + +Name: linux-image-2.6.32-bpo.5-xen-686/postinst/bootloader-error-2.6.32-bpo.5-xen-686 +Template: linux-image-2.6.32-bpo.5-xen-686/postinst/bootloader-error-2.6.32-bpo.5-xen-686 +Owners: linux-image-2.6.32-bpo.5-xen-686 + +Name: linux-image-2.6.32-bpo.5-xen-686/postinst/bootloader-test-error-2.6.32-bpo.5-xen-686 +Template: linux-image-2.6.32-bpo.5-xen-686/postinst/bootloader-test-error-2.6.32-bpo.5-xen-686 +Owners: linux-image-2.6.32-bpo.5-xen-686 + +Name: linux-image-2.6.32-bpo.5-xen-686/postinst/depmod-error-initrd-2.6.32-bpo.5-xen-686 +Template: linux-image-2.6.32-bpo.5-xen-686/postinst/depmod-error-initrd-2.6.32-bpo.5-xen-686 +Owners: linux-image-2.6.32-bpo.5-xen-686 + +Name: linux-image-2.6.32-bpo.5-xen-686/postinst/missing-firmware-2.6.32-bpo.5-xen-686 +Template: linux-image-2.6.32-bpo.5-xen-686/postinst/missing-firmware-2.6.32-bpo.5-xen-686 +Owners: linux-image-2.6.32-bpo.5-xen-686 + +Name: linux-image-2.6.32-bpo.5-xen-686/prerm/removing-running-kernel-2.6.32-bpo.5-xen-686 +Template: linux-image-2.6.32-bpo.5-xen-686/prerm/removing-running-kernel-2.6.32-bpo.5-xen-686 +Owners: linux-image-2.6.32-bpo.5-xen-686 + +Name: linux-image-2.6.32-bpo.5-xen-686/prerm/would-invalidate-boot-loader-2.6.32-bpo.5-xen-686 +Template: linux-image-2.6.32-bpo.5-xen-686/prerm/would-invalidate-boot-loader-2.6.32-bpo.5-xen-686 +Owners: linux-image-2.6.32-bpo.5-xen-686 + +Name: locales/default_environment_locale +Template: locales/default_environment_locale +Value: en_US.UTF-8 +Owners: locales +Variables: + locales = en_US.UTF-8 + +Name: locales/locales_to_be_generated +Template: locales/locales_to_be_generated +Value: en_US.UTF-8 UTF-8 +Owners: locales +Variables: + locales = aa_DJ ISO-8859-1, aa_DJ.UTF-8 UTF-8, aa_ER UTF-8, aa_ER@saaho UTF-8, aa_ET UTF-8, af_ZA ISO-8859-1, af_ZA.UTF-8 UTF-8, am_ET UTF-8, an_ES ISO-8859-15, an_ES.UTF-8 UTF-8, ar_AE ISO-8859-6, ar_AE.UTF-8 UTF-8, ar_BH ISO-8859-6, ar_BH.UTF-8 UTF-8, ar_DZ ISO-8859-6, ar_DZ.UTF-8 UTF-8, ar_EG ISO-8859-6, ar_EG.UTF-8 UTF-8, ar_IN UTF-8, ar_IQ ISO-8859-6, ar_IQ.UTF-8 UTF-8, ar_JO ISO-8859-6, ar_JO.UTF-8 UTF-8, ar_KW ISO-8859-6, ar_KW.UTF-8 UTF-8, ar_LB ISO-8859-6, ar_LB.UTF-8 UTF-8, ar_LY ISO-8859-6, ar_LY.UTF-8 UTF-8, ar_MA ISO-8859-6, ar_MA.UTF-8 UTF-8, ar_OM ISO-8859-6, ar_OM.UTF-8 UTF-8, ar_QA ISO-8859-6, ar_QA.UTF-8 UTF-8, ar_SA ISO-8859-6, ar_SA.UTF-8 UTF-8, ar_SD ISO-8859-6, ar_SD.UTF-8 UTF-8, ar_SY ISO-8859-6, ar_SY.UTF-8 UTF-8, ar_TN ISO-8859-6, ar_TN.UTF-8 UTF-8, ar_YE ISO-8859-6, ar_YE.UTF-8 UTF-8, as_IN.UTF-8 UTF-8, ast_ES ISO-8859-15, ast_ES.UTF-8 UTF-8, az_AZ.UTF-8 UTF-8, be_BY CP1251, be_BY.UTF-8 UTF-8, be_BY@latin UTF-8, ber_DZ UTF-8, ber_MA UTF-8, bg_BG CP1251, bg_BG.UTF-8 UTF-8, bn_BD UTF-8, bn_IN UTF-8, br_FR ISO-8859-1, br_FR.UTF-8 UTF-8, br_FR@euro ISO-8859-15, bs_BA ISO-8859-2, bs_BA.UTF-8 UTF-8, byn_ER UTF-8, ca_AD ISO-8859-15, ca_AD.UTF-8 UTF-8, ca_ES ISO-8859-1, ca_ES.UTF-8 UTF-8, ca_ES.UTF-8@valencia UTF-8, ca_ES@euro ISO-8859-15, ca_ES@valencia ISO-8859-15, ca_FR ISO-8859-15, ca_FR.UTF-8 UTF-8, ca_IT ISO-8859-15, ca_IT.UTF-8 UTF-8, crh_UA UTF-8, cs_CZ ISO-8859-2, cs_CZ.UTF-8 UTF-8, csb_PL UTF-8, cy_GB ISO-8859-14, cy_GB.UTF-8 UTF-8, da_DK ISO-8859-1, da_DK.ISO-8859-15 ISO-8859-15, da_DK.UTF-8 UTF-8, de_AT ISO-8859-1, de_AT.UTF-8 UTF-8, de_AT@euro ISO-8859-15, de_BE ISO-8859-1, de_BE.UTF-8 UTF-8, de_BE@euro ISO-8859-15, de_CH ISO-8859-1, de_CH.UTF-8 UTF-8, de_DE ISO-8859-1, de_DE.UTF-8 UTF-8, de_DE@euro ISO-8859-15, de_LI.UTF-8 UTF-8, de_LU ISO-8859-1, de_LU.UTF-8 UTF-8, de_LU@euro ISO-8859-15, dz_BT UTF-8, el_CY ISO-8859-7, el_CY.UTF-8 UTF-8, el_GR ISO-8859-7, el_GR.UTF-8 UTF-8, en_AU ISO-8859-1, en_AU.UTF-8 UTF-8, en_BW ISO-8859-1, en_BW.UTF-8 UTF-8, en_CA ISO-8859-1, en_CA.UTF-8 UTF-8, en_DK ISO-8859-1, en_DK.ISO-8859-15 ISO-8859-15, en_DK.UTF-8 UTF-8, en_GB ISO-8859-1, en_GB.ISO-8859-15 ISO-8859-15, en_GB.UTF-8 UTF-8, en_HK ISO-8859-1, en_HK.UTF-8 UTF-8, en_IE ISO-8859-1, en_IE.UTF-8 UTF-8, en_IE@euro ISO-8859-15, en_IN UTF-8, en_NG UTF-8, en_NZ ISO-8859-1, en_NZ.UTF-8 UTF-8, en_PH ISO-8859-1, en_PH.UTF-8 UTF-8, en_SG ISO-8859-1, en_SG.UTF-8 UTF-8, en_US ISO-8859-1, en_US.ISO-8859-15 ISO-8859-15, en_US.UTF-8 UTF-8, en_ZA ISO-8859-1, en_ZA.UTF-8 UTF-8, en_ZW ISO-8859-1, en_ZW.UTF-8 UTF-8, eo ISO-8859-3, eo.UTF-8 UTF-8, es_AR ISO-8859-1, es_AR.UTF-8 UTF-8, es_BO ISO-8859-1, es_BO.UTF-8 UTF-8, es_CL ISO-8859-1, es_CL.UTF-8 UTF-8, es_CO ISO-8859-1, es_CO.UTF-8 UTF-8, es_CR ISO-8859-1, es_CR.UTF-8 UTF-8, es_DO ISO-8859-1, es_DO.UTF-8 UTF-8, es_EC ISO-8859-1, es_EC.UTF-8 UTF-8, es_ES ISO-8859-1, es_ES.UTF-8 UTF-8, es_ES@euro ISO-8859-15, es_GT ISO-8859-1, es_GT.UTF-8 UTF-8, es_HN ISO-8859-1, es_HN.UTF-8 UTF-8, es_MX ISO-8859-1, es_MX.UTF-8 UTF-8, es_NI ISO-8859-1, es_NI.UTF-8 UTF-8, es_PA ISO-8859-1, es_PA.UTF-8 UTF-8, es_PE ISO-8859-1, es_PE.UTF-8 UTF-8, es_PR ISO-8859-1, es_PR.UTF-8 UTF-8, es_PY ISO-8859-1, es_PY.UTF-8 UTF-8, es_SV ISO-8859-1, es_SV.UTF-8 UTF-8, es_US ISO-8859-1, es_US.UTF-8 UTF-8, es_UY ISO-8859-1, es_UY.UTF-8 UTF-8, es_VE ISO-8859-1, es_VE.UTF-8 UTF-8, et_EE ISO-8859-1, et_EE.ISO-8859-15 ISO-8859-15, et_EE.UTF-8 UTF-8, eu_ES ISO-8859-1, eu_ES.UTF-8 UTF-8, eu_ES@euro ISO-8859-15, eu_FR ISO-8859-1, eu_FR.UTF-8 UTF-8, eu_FR@euro ISO-8859-15, fa_IR UTF-8, fi_FI ISO-8859-1, fi_FI.UTF-8 UTF-8, fi_FI@euro ISO-8859-15, fil_PH UTF-8, fo_FO ISO-8859-1, fo_FO.UTF-8 UTF-8, fr_BE ISO-8859-1, fr_BE.UTF-8 UTF-8, fr_BE@euro ISO-8859-15, fr_CA ISO-8859-1, fr_CA.UTF-8 UTF-8, fr_CH ISO-8859-1, fr_CH.UTF-8 UTF-8, fr_FR ISO-8859-1, fr_FR.UTF-8 UTF-8, fr_FR@euro ISO-8859-15, fr_LU ISO-8859-1, fr_LU.UTF-8 UTF-8, fr_LU@euro ISO-8859-15, fur_IT UTF-8, fy_DE UTF-8, fy_NL UTF-8, ga_IE ISO-8859-1, ga_IE.UTF-8 UTF-8, ga_IE@euro ISO-8859-15, gd_GB ISO-8859-15, gd_GB.UTF-8 UTF-8, gez_ER UTF-8, gez_ER@abegede UTF-8, gez_ET UTF-8, gez_ET@abegede UTF-8, gl_ES ISO-8859-1, gl_ES.UTF-8 UTF-8, gl_ES@euro ISO-8859-15, gu_IN UTF-8, gv_GB ISO-8859-1, gv_GB.UTF-8 UTF-8, ha_NG UTF-8, he_IL ISO-8859-8, he_IL.UTF-8 UTF-8, hi_IN UTF-8, hr_HR ISO-8859-2, hr_HR.UTF-8 UTF-8, hsb_DE ISO-8859-2, hsb_DE.UTF-8 UTF-8, hu_HU ISO-8859-2, hu_HU.UTF-8 UTF-8, hy_AM UTF-8, hy_AM.ARMSCII-8 ARMSCII-8, ia UTF-8, id_ID ISO-8859-1, id_ID.UTF-8 UTF-8, ig_NG UTF-8, ik_CA UTF-8, is_IS ISO-8859-1, is_IS.UTF-8 UTF-8, it_CH ISO-8859-1, it_CH.UTF-8 UTF-8, it_IT ISO-8859-1, it_IT.UTF-8 UTF-8, it_IT@euro ISO-8859-15, iu_CA UTF-8, iw_IL ISO-8859-8, iw_IL.UTF-8 UTF-8, ja_JP.EUC-JP EUC-JP, ja_JP.UTF-8 UTF-8, ka_GE GEORGIAN-PS, ka_GE.UTF-8 UTF-8, kk_KZ PT154, kk_KZ.UTF-8 UTF-8, kl_GL ISO-8859-1, kl_GL.UTF-8 UTF-8, km_KH UTF-8, kn_IN UTF-8, ko_KR.EUC-KR EUC-KR, ko_KR.UTF-8 UTF-8, ks_IN UTF-8, ku_TR ISO-8859-9, ku_TR.UTF-8 UTF-8, kw_GB ISO-8859-1, kw_GB.UTF-8 UTF-8, ky_KG UTF-8, lg_UG ISO-8859-10, lg_UG.UTF-8 UTF-8, li_BE UTF-8, li_NL UTF-8, lo_LA UTF-8, lt_LT ISO-8859-13, lt_LT.UTF-8 UTF-8, lv_LV ISO-8859-13, lv_LV.UTF-8 UTF-8, mai_IN UTF-8, mg_MG ISO-8859-15, mg_MG.UTF-8 UTF-8, mi_NZ ISO-8859-13, mi_NZ.UTF-8 UTF-8, mk_MK ISO-8859-5, mk_MK.UTF-8 UTF-8, ml_IN UTF-8, mn_MN UTF-8, mr_IN UTF-8, ms_MY ISO-8859-1, ms_MY.UTF-8 UTF-8, mt_MT ISO-8859-3, mt_MT.UTF-8 UTF-8, nb_NO ISO-8859-1, nb_NO.UTF-8 UTF-8, nds_DE UTF-8, nds_NL UTF-8, ne_NP UTF-8, nl_BE ISO-8859-1, nl_BE.UTF-8 UTF-8, nl_BE@euro ISO-8859-15, nl_NL ISO-8859-1, nl_NL.UTF-8 UTF-8, nl_NL@euro ISO-8859-15, nn_NO ISO-8859-1, nn_NO.UTF-8 UTF-8, nr_ZA UTF-8, nso_ZA UTF-8, oc_FR ISO-8859-1, oc_FR.UTF-8 UTF-8, om_ET UTF-8, om_KE ISO-8859-1, om_KE.UTF-8 UTF-8, or_IN UTF-8, pa_IN UTF-8, pa_PK UTF-8, pap_AN UTF-8, pl_PL ISO-8859-2, pl_PL.UTF-8 UTF-8, pt_BR ISO-8859-1, pt_BR.UTF-8 UTF-8, pt_PT ISO-8859-1, pt_PT.UTF-8 UTF-8, pt_PT@euro ISO-8859-15, ro_RO ISO-8859-2, ro_RO.UTF-8 UTF-8, ru_RU ISO-8859-5, ru_RU.CP1251 CP1251, ru_RU.KOI8-R KOI8-R, ru_RU.UTF-8 UTF-8, ru_UA KOI8-U, ru_UA.UTF-8 UTF-8, rw_RW UTF-8, sa_IN UTF-8, sc_IT UTF-8, se_NO UTF-8, si_LK UTF-8, sid_ET UTF-8, sk_SK ISO-8859-2, sk_SK.UTF-8 UTF-8, sl_SI ISO-8859-2, sl_SI.UTF-8 UTF-8, so_DJ ISO-8859-1, so_DJ.UTF-8 UTF-8, so_ET UTF-8, so_KE ISO-8859-1, so_KE.UTF-8 UTF-8, so_SO ISO-8859-1, so_SO.UTF-8 UTF-8, sq_AL ISO-8859-1, sq_AL.UTF-8 UTF-8, sr_ME UTF-8, sr_RS UTF-8, sr_RS@latin UTF-8, ss_ZA UTF-8, st_ZA ISO-8859-1, st_ZA.UTF-8 UTF-8, sv_FI ISO-8859-1, sv_FI.UTF-8 UTF-8, sv_FI@euro ISO-8859-15, sv_SE ISO-8859-1, sv_SE.ISO-8859-15 ISO-8859-15, sv_SE.UTF-8 UTF-8, ta_IN UTF-8, te_IN UTF-8, tg_TJ KOI8-T, tg_TJ.UTF-8 UTF-8, th_TH TIS-620, th_TH.UTF-8 UTF-8, ti_ER UTF-8, ti_ET UTF-8, tig_ER UTF-8, tk_TM UTF-8, tl_PH ISO-8859-1, tl_PH.UTF-8 UTF-8, tn_ZA UTF-8, tr_CY ISO-8859-9, tr_CY.UTF-8 UTF-8, tr_TR ISO-8859-9, tr_TR.UTF-8 UTF-8, ts_ZA UTF-8, tt_RU.UTF-8 UTF-8, tt_RU@iqtelif.UTF-8 UTF-8, ug_CN UTF-8, uk_UA KOI8-U, uk_UA.UTF-8 UTF-8, ur_PK UTF-8, uz_UZ ISO-8859-1, uz_UZ.UTF-8 UTF-8, uz_UZ@cyrillic UTF-8, ve_ZA UTF-8, vi_VN UTF-8, vi_VN.TCVN TCVN5712-1, wa_BE ISO-8859-1, wa_BE.UTF-8 UTF-8, wa_BE@euro ISO-8859-15, wo_SN UTF-8, xh_ZA ISO-8859-1, xh_ZA.UTF-8 UTF-8, yi_US CP1255, yi_US.UTF-8 UTF-8, yo_NG UTF-8, zh_CN GB2312, zh_CN.GB18030 GB18030, zh_CN.GBK GBK, zh_CN.UTF-8 UTF-8, zh_HK BIG5-HKSCS, zh_HK.UTF-8 UTF-8, zh_SG GB2312, zh_SG.GBK GBK, zh_SG.UTF-8 UTF-8, zh_TW BIG5, zh_TW.EUC-TW EUC-TW, zh_TW.UTF-8 UTF-8, zu_ZA ISO-8859-1, zu_ZA.UTF-8 UTF-8 + +Name: openswan/create_rsa_key +Template: openswan/create_rsa_key +Value: true +Owners: openswan +Flags: seen + +Name: openswan/enable-oe +Template: openswan/enable-oe +Value: false +Owners: openswan +Flags: seen + +Name: openswan/existing_x509_certificate +Template: openswan/existing_x509_certificate +Value: false +Owners: openswan +Flags: seen + +Name: openswan/existing_x509_certificate_filename +Template: openswan/existing_x509_certificate_filename +Owners: openswan + +Name: openswan/existing_x509_key_filename +Template: openswan/existing_x509_key_filename +Owners: openswan + +Name: openswan/restart +Template: openswan/restart +Value: true +Owners: openswan + +Name: openswan/rsa_key_length +Template: openswan/rsa_key_length +Value: 2048 +Owners: openswan + +Name: openswan/rsa_key_type +Template: openswan/rsa_key_type +Value: x509 +Owners: openswan +Flags: seen + +Name: openswan/start_level +Template: openswan/start_level +Value: earliest +Owners: openswan + +Name: openswan/x509_common_name +Template: openswan/x509_common_name +Value: +Owners: openswan + +Name: openswan/x509_country_code +Template: openswan/x509_country_code +Value: AT +Owners: openswan + +Name: openswan/x509_email_address +Template: openswan/x509_email_address +Value: +Owners: openswan + +Name: openswan/x509_locality_name +Template: openswan/x509_locality_name +Value: +Owners: openswan + +Name: openswan/x509_organization_name +Template: openswan/x509_organization_name +Value: +Owners: openswan + +Name: openswan/x509_organizational_unit +Template: openswan/x509_organizational_unit +Value: +Owners: openswan + +Name: openswan/x509_self_signed +Template: openswan/x509_self_signed +Value: true +Owners: openswan +Flags: seen + +Name: openswan/x509_state_name +Template: openswan/x509_state_name +Value: +Owners: openswan + +Name: portmap/loopback +Template: portmap/loopback +Value: false +Owners: portmap + +Name: shared/accepted-sun-dlj-v1-1 +Template: shared/accepted-sun-dlj-v1-1 +Value: true +Owners: sun-java6-bin, sun-java6-jre +Flags: seen + +Name: shared/error-sun-dlj-v1-1 +Template: shared/error-sun-dlj-v1-1 +Owners: sun-java6-bin, sun-java6-jre + +Name: shared/kernel-image/really-run-bootloader +Template: shared/kernel-image/really-run-bootloader +Owners: linux-image-2.6.32-bpo.5-686, linux-image-2.6.32-bpo.5-xen-686 + +Name: shared/present-sun-dlj-v1-1 +Template: shared/present-sun-dlj-v1-1 +Value: true +Owners: sun-java6-bin, sun-java6-jre +Flags: seen + +Name: ssh/disable_cr_auth +Template: ssh/disable_cr_auth +Owners: openssh-server + +Name: ssh/encrypted_host_key_but_no_keygen +Template: ssh/encrypted_host_key_but_no_keygen +Owners: openssh-server + +Name: ssh/new_config +Template: ssh/new_config +Owners: openssh-server + +Name: ssh/use_old_init_script +Template: ssh/use_old_init_script +Value: true +Owners: openssh-server +Flags: seen + +Name: ssh/vulnerable_host_keys +Template: ssh/vulnerable_host_keys +Owners: openssh-server + +Name: sun-java6-jre/jcepolicy +Template: sun-java6-jre/jcepolicy +Owners: sun-java6-jre + +Name: sun-java6-jre/stopthread +Template: sun-java6-jre/stopthread +Owners: sun-java6-jre + +Name: tzdata/Areas +Template: tzdata/Areas +Value: Etc +Owners: tzdata +Flags: seen + +Name: tzdata/Zones/Africa +Template: tzdata/Zones/Africa +Owners: tzdata + +Name: tzdata/Zones/America +Template: tzdata/Zones/America +Owners: tzdata + +Name: tzdata/Zones/Antarctica +Template: tzdata/Zones/Antarctica +Owners: tzdata + +Name: tzdata/Zones/Arctic +Template: tzdata/Zones/Arctic +Owners: tzdata + +Name: tzdata/Zones/Asia +Template: tzdata/Zones/Asia +Owners: tzdata + +Name: tzdata/Zones/Atlantic +Template: tzdata/Zones/Atlantic +Owners: tzdata + +Name: tzdata/Zones/Australia +Template: tzdata/Zones/Australia +Owners: tzdata + +Name: tzdata/Zones/Etc +Template: tzdata/Zones/Etc +Value: UTC +Owners: tzdata +Flags: seen + +Name: tzdata/Zones/Europe +Template: tzdata/Zones/Europe +Owners: tzdata + +Name: tzdata/Zones/Indian +Template: tzdata/Zones/Indian +Owners: tzdata + +Name: tzdata/Zones/Pacific +Template: tzdata/Zones/Pacific +Owners: tzdata + +Name: tzdata/Zones/SystemV +Template: tzdata/Zones/SystemV +Owners: tzdata + +Name: ucf/changeprompt +Template: ucf/changeprompt +Owners: ucf + +Name: ucf/changeprompt_threeway +Template: ucf/changeprompt_threeway +Owners: ucf + +Name: ucf/show_diff +Template: ucf/show_diff +Owners: ucf + +Name: ucf/title +Template: ucf/title +Owners: ucf + +Name: udev/new_kernel_needed +Template: udev/new_kernel_needed +Owners: udev + +Name: udev/reboot_needed +Template: udev/reboot_needed +Owners: udev + diff --git a/tools/systemvm/debian/config/etc/init.d/cloud b/tools/systemvm/debian/config/etc/init.d/cloud new file mode 100755 index 00000000000..952daf79a78 --- /dev/null +++ b/tools/systemvm/debian/config/etc/init.d/cloud @@ -0,0 +1,127 @@ +#!/bin/sh -e +### BEGIN INIT INFO +# Provides: cloud-passwd-srvr +# Required-Start: mountkernfs $local_fs cloud-early-config +# Required-Stop: $local_fs +# Should-Start: +# Should-Stop: +# Default-Start: 2345 +# Default-Stop: 2345 +# Short-Description: Start up the cloud.com service +### END INIT INFO + +if [ -f /mnt/cmdline ] +then + CMDLINE=$(cat /mnt/cmdline) +else + CMDLINE=$(cat /proc/cmdline) +fi +TYPE="router" +for i in $CMDLINE + do + # search for foo=bar pattern and cut out foo + FIRSTPATTERN=$(echo $i | cut -d= -f1) + case $FIRSTPATTERN in + type) + TYPE=$(echo $i | cut -d= -f2) + ;; + esac +done + +# Source function library. +if [ -f /etc/init.d/functions ] +then + . /etc/init.d/functions +fi + +if [ -f . /lib/lsb/init-functions ] +then + . /lib/lsb/init-functions +fi + +_success() { + if [ -f /etc/init.d/functions ] + then + success + else + echo "Success" + fi +} + +_failure() { + if [ -f /etc/init.d/functions ] + then + failure + else + echo "Failed" + fi +} +RETVAL=$? +CLOUD_COM_HOME="/usr/local/cloud" + +# mkdir -p /var/log/vmops + +get_pids() { + local i + for i in $(ps -ef| grep java | grep -v grep | awk '{print $2}'); + do + echo $(pwdx $i) | grep "$CLOUD_COM_HOME" | awk -F: '{print $1}'; + done +} + +start() { + local pid=$(get_pids) + echo -n "Starting cloud.com service (type=$TYPE) " + if [ -f $CLOUD_COM_HOME/systemvm/run.sh ]; + then + if [ "$pid" == "" ] + then + (cd $CLOUD_COM_HOME/systemvm; nohup ./run.sh > /var/log/cloud/cloud.out 2>&1 & ) + pid=$(get_pids) + echo $pid > /var/run/cloud.pid + fi + _success + else + _failure + fi + echo +} + +stop() { + local pid + echo -n "Stopping cloud.com service (type=$TYPE): " + for pid in $(get_pids) + do + kill $pid + done + _success + echo +} + +status() { + local pids=$(get_pids) + if [ "$pids" == "" ] + then + echo "cloud.com service is not running" + return 1 + fi + echo "cloud.com service (type=$TYPE) is running: process id: $pids" + return 0 +} + +case "$1" in + start) start + ;; + stop) stop + ;; + status) status + ;; + restart) stop + start + ;; + *) echo $"Usage: $0 {start|stop|status|restart}" + exit 1 + ;; +esac + +exit $RETVAL diff --git a/tools/systemvm/debian/config/etc/init.d/cloud-early-config b/tools/systemvm/debian/config/etc/init.d/cloud-early-config new file mode 100755 index 00000000000..569a253a365 --- /dev/null +++ b/tools/systemvm/debian/config/etc/init.d/cloud-early-config @@ -0,0 +1,329 @@ +##!/bin/sh -e +### BEGIN INIT INFO +# Provides: cloud-early-config +# Required-Start: mountkernfs $local_fs +# Required-Stop: $local_fs +# Should-Start: +# Should-Stop: +# Default-Start: S +# Default-Stop: 0 6 +# Short-Description: configure according to cmdline +### END INIT INFO + +PATH="/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin" + +[ -x /sbin/ifup ] || exit 0 + +. /lib/lsb/init-functions + +init_interfaces() { + cat > /etc/network/interfaces << EOF +auto lo $1 $2 $3 +iface lo inet loopback + +EOF +} + +patch() { + if [ -e /dev/xvdd ]; then + mkdir -p /media/cdrom + mount -o ro /dev/xvdd /media/cdrom + tar xzf /media/cdrom/patch.tgz -C / + cat /proc/cmdline > /var/cache/cloud/cmdline + /opt/cloud/bin/patchsystemvm.sh + umount /media/cdrom + fi + if [ -f /mnt/cmdline ]; then + cat /mnt/cmdline > /var/cache/cloud/cmdline + fi +} + +setup_interface() { + local intfnum=$1 + local ip=$2 + local mask=$3 + local gw=$4 + local intf=eth${intfnum} + local bootproto="static" + + + if [ "$BOOTPROTO" == "dhcp" ] + then + if [ "$intfnum" != "0" ] + then + bootproto="dhcp" + fi + fi + + if [[ "$ip" != "0.0.0.0" && "$ip" != "" ]] + then + echo "iface $intf inet $bootproto" >> /etc/network/interfaces + echo " address $ip " >> /etc/network/interfaces + echo " netmask $mask" >> /etc/network/interfaces + fi + + ifdown $intf + ifup $intf +} + +enable_fwding() { + echo $1 > /proc/sys/net/ipv4/ip_forward +} + +enable_svc() { + local svc=$1 + local enabled=$2 + + local cfg=/etc/default/${svc} + sed -i 's/ENABLED=.*$/ENABLED=$enabled/g' $cfg +} + +disable_hvc() { + [ ! -f /proc/xen ] && sed -i 's/^vc/#vc/' /etc/inittab && telinit q +} + +setup_common() { + disable_hvc + init_interfaces $1 $2 $3 + setup_interface "0" $ETH0_IP $ETH0_MASK $GW + setup_interface "1" $ETH1_IP $ETH1_MASK $GW + setup_interface "2" $ETH2_IP $ETH2_MASK $GW + + echo $NAME > /etc/hostname + echo 'AVAHI_DAEMON_DETECT_LOCAL=0' > /etc/default/avahi-daemon + hostname $NAME + + #Nameserver + if [ -n "$NS1" ] + then + echo "nameserver $NS1" > /etc/dnsmasq-resolv.conf + echo "nameserver $NS1" > /etc/resolv.conf + fi + + if [ -n "$NS2" ] + then + echo "nameserver $NS2" >> /etc/dnsmasq-resolv.conf + echo "nameserver $NS2" >> /etc/resolv.conf + fi + if [[ -n "$MGMTNET" && -n "$LOCAL_GW" ]] + then + ip route add $MGMTNET via $LOCAL_GW dev eth1 + fi + + ip route delete default + ip route add default via $GW +} + +setup_router() { + setup_common eth0 eth1 eth2 + [ -z $DHCP_RANGE ] && DHCP_RANGE=$ETH0_IP + if [ -n "$DOMAIN" ] + then + #send domain name to dhcp clients + sed -i s/[#]*dhcp-option=15.*$/dhcp-option=15,\"$DOMAIN\"/ /etc/dnsmasq.conf + #DNS server will append $DOMAIN to local queries + sed -r -i s/^[#]?domain=.*$/domain=$DOMAIN/ /etc/dnsmasq.conf + #answer all local domain queries + sed -i -e "s/^[#]*local=.*$/local=\/$DOMAIN\//" /etc/dnsmasq.conf + fi + sed -i -e "s/^dhcp-range=.*$/dhcp-range=$DHCP_RANGE,static/" /etc/dnsmasq.conf + sed -i -e "s/^[#]*listen-address=.*$/listen-address=$ETH0_IP/" /etc/dnsmasq.conf + sed -i /gateway/d /etc/hosts + echo "$ETH0_IP $NAME" >> /etc/hosts + [ -f /etc/httpd/conf/httpd.conf ] && sed -i -e "s/^Listen.*$/Listen $ETH0_IP:80/" /etc/httpd/conf/httpd.conf + [ -f /etc/httpd/conf.d/ssl.conf ] && mv /etc/httpd/conf.d/ssl.conf /etc/httpd/conf.d/ssl.conf.bak + [ -f /etc/ssh/sshd_config ] && sed -i -e "s/^[#]*ListenAddress.*$/ListenAddress $ETH1_IP/" /etc/ssh/sshd_config + + enable_svc dnsmasq 1 + enable_svc haproxy 1 + enable_fwding 1 + cp /etc/iptables/iptables-router /etc/iptables/rules +} + +setup_dhcpsrvr() { + setup_common eth0 eth1 + [ -z $DHCP_RANGE ] && DHCP_RANGE=$ETH0_IP + [ -z $DOMAIN ] && DOMAIN="cloudnine.internal" + if [ -n "$DOMAIN" ] + then + #send domain name to dhcp clients + sed -i s/[#]*dhcp-option=15.*$/dhcp-option=15,\"$DOMAIN\"/ /etc/dnsmasq.conf + #DNS server will append $DOMAIN to local queries + sed -r -i s/^[#]?domain=.*$/domain=$DOMAIN/ /etc/dnsmasq.conf + #answer all local domain queries + sed -i -e "s/^[#]*local=.*$/local=\/$DOMAIN\//" /etc/dnsmasq.conf + fi + sed -i -e "s/^dhcp-range=.*$/dhcp-range=$DHCP_RANGE,static/" /etc/dnsmasq.conf + sed -i -e "s/^[#]*dhcp-option=option:router.*$/dhcp-option=option:router,$GW/" /etc/dnsmasq.conf + #for now set up ourself as the dns server as well + #echo "dhcp-option=6,$NS1,$NS2" >> /etc/dnsmasq.conf + sed -i /gateway/d /etc/hosts + echo "$ETH0_IP $NAME" >> /etc/hosts + [ -f /etc/httpd/conf/httpd.conf ] && sed -i -e "s/^Listen.*$/Listen $ETH0_IP:80/" /etc/httpd/conf/httpd.conf + [ -f /etc/httpd/conf.d/ssl.conf ] && mv /etc/httpd/conf.d/ssl.conf /etc/httpd/conf.d/ssl.conf.bak + + enable_svc haproxy 0 + enable_fwding 0 + cp /etc/iptables/iptables-router /etc/iptables/rules +} + +setup_secstorage() { + setup_common eth0 eth1 eth2 + sed -i /gateway/d /etc/hosts + public_ip=$ETH2_IP + [ "$ETH2_IP" == "0.0.0.0" ] && public_ip=$ETH1_IP + echo "$public_ip $NAME" >> /etc/hosts + [ -f /etc/httpd/conf/httpd.conf ] && sed -i -e "s/^Listen.*:80$/Listen $public_ip:80/" /etc/httpd/conf/httpd.conf + [ -f /etc/httpd/conf/httpd.conf ] && sed -i -e "s/^Listen.*:443$/Listen $public_ip:443/" /etc/httpd/conf/httpd.conf + + sed 's/ENABLED=.*$/ENABLED=0/g' /etc/default/haproxy + cp /etc/iptables/iptables-secstorage /etc/iptables/rules + enable_fwding 0 + enable_svc haproxy 0 + enable_fwding 0 +} + +setup_console_proxy() { + setup_common eth0 eth1 eth2 + public_ip=$ETH2_IP + [ "$ETH2_IP" == "0.0.0.0" ] && public_ip=$ETH1_IP + sed -i /gateway/d /etc/hosts + echo "$public_ip $NAME" >> /etc/hosts + sed 's/ENABLED=.*$/ENABLED=0/g' /etc/default/haproxy + cp /etc/iptables/iptables-consoleproxy /etc/iptables/rules + enable_fwding 0 + enable_svc haproxy 0 + enable_fwding 0 + chkconfig apache2 off +} + +start() { + case $TYPE in + router) + [ "$NAME" == "" ] && NAME=router + setup_router + ;; + dhcpsrvr) + [ "$NAME" == "" ] && NAME=dhcpsrvr + setup_dhcpsrvr + ;; + secstorage) + [ "$NAME" == "" ] && NAME=secstorage + setup_secstorage; + ;; + consoleproxy) + [ "$NAME" == "" ] && NAME=consoleproxy + setup_console_proxy; + ;; + esac + patch + return 0 +} + +if [ -f /mnt/cmdline ] +then + CMDLINE=$(cat /mnt/cmdline) +else + CMDLINE=$(cat /proc/cmdline) +fi + +disable_hvc + +TYPE="unknown" +BOOTPROTO="static" + +for i in $CMDLINE + do + # search for foo=bar pattern and cut out foo + KEY=$(echo $i | cut -d= -f1) + VALUE=$(echo $i | cut -d= -f2) + case $KEY in + eth0ip) + ETH0_IP=$VALUE + ;; + eth1ip) + ETH1_IP=$VALUE + ;; + eth2ip) + ETH2_IP=$VALUE + ;; + gateway) + GW=$VALUE + ;; + eth0mask) + ETH0_MASK=$VALUE + ;; + eth1mask) + ETH1_MASK=$VALUE + ;; + eth2mask) + ETH2_MASK=$VALUE + ;; + dns1) + NS1=$VALUE + ;; + dns2) + NS2=$VALUE + ;; + domain) + DOMAIN=$VALUE + ;; + mgmtcidr) + MGMTNET=$VALUE + ;; + localgw) + LOCAL_GW=$VALUE + ;; + template) + TEMPLATE=$VALUE + ;; + name) + NAME=$VALUE + ;; + dhcprange) + DHCP_RANGE=$(echo $VALUE | tr ':' ',') + ;; + bootproto) + BOOTPROTO=$VALUE + ;; + type) + TYPE=$VALUE + ;; + esac +done + + +case "$1" in +start) + + log_action_begin_msg "Executing cloud-early-config" + if start; then + log_action_end_msg $? + else + log_action_end_msg $? + fi + ;; + +stop) + log_action_begin_msg "Stopping cloud-early-config (noop)" + log_action_end_msg 0 + ;; + +force-reload|restart) + + log_warning_msg "Running $0 is deprecated because it may not enable again some interfaces" + log_action_begin_msg "Reconfiguring network interfaces" + if start; then + log_action_end_msg $? + else + log_action_end_msg $? + fi + ;; + +*) + echo "Usage: /etc/init.d/cloud-early-config {start|stop}" + exit 1 + ;; +esac + +exit 0 diff --git a/tools/systemvm/debian/config/etc/init.d/cloud-passwd-srvr b/tools/systemvm/debian/config/etc/init.d/cloud-passwd-srvr new file mode 100755 index 00000000000..52715938ec4 --- /dev/null +++ b/tools/systemvm/debian/config/etc/init.d/cloud-passwd-srvr @@ -0,0 +1,13 @@ +#!/bin/sh -e +### BEGIN INIT INFO +# Provides: cloud-passwd-srvr +# Required-Start: mountkernfs $local_fs cloud-early-config +# Required-Stop: $local_fs +# Should-Start: +# Should-Stop: +# Default-Start: S +# Default-Stop: 0 6 +# Short-Description: Web server that sends passwords to User VMs +### END INIT INFO + +bash /opt/cloud/bin/passwd_server& diff --git a/tools/systemvm/debian/config/etc/init.d/postinit b/tools/systemvm/debian/config/etc/init.d/postinit new file mode 100755 index 00000000000..ae17565c50b --- /dev/null +++ b/tools/systemvm/debian/config/etc/init.d/postinit @@ -0,0 +1,110 @@ +#! /bin/bash +# chkconfig: 35 11 90 +# description: pre-boot configuration using boot line parameters +# This file exists in /etc/init.d/ + +replace_in_file() { + local filename=$1 + local keyname=$2 + local value=$3 + sed -i /$keyname=/d $filename + echo "$keyname=$value" >> $filename + return $? +} + +setup_secstorage() { + public_ip=$ETH2_IP + sed -i /$NAME/d /etc/hosts + echo "$public_ip $NAME" >> /etc/hosts + [ -f /etc/httpd/conf/httpd.conf ] && sed -i -e "s/^Listen.*:80$/Listen $public_ip:80/" /etc/httpd/conf/httpd.conf + [ -f /etc/httpd/conf/httpd.conf ] && sed -i -e "s/^Listen.*:443$/Listen $public_ip:443/" /etc/httpd/conf/httpd.conf +} + +setup_console_proxy() { + public_ip=$ETH2_IP + sed -i /$NAME/d /etc/hosts + echo "$public_ip $NAME" >> /etc/hosts +} + +CMDLINE=$(cat /proc/cmdline) +TYPE="router" +BOOTPROTO="static" + +for i in $CMDLINE + do + # search for foo=bar pattern and cut out foo + KEY=$(echo $i | cut -d= -f1) + VALUE=$(echo $i | cut -d= -f2) + case $KEY in + eth0ip) + ETH0_IP=$VALUE + ;; + eth1ip) + ETH1_IP=$VALUE + ;; + eth2ip) + ETH2_IP=$VALUE + ;; + gateway) + GW=$VALUE + ;; + eth0mask) + ETH0_MASK=$VALUE + ;; + eth1mask) + ETH1_MASK=$VALUE + ;; + eth2mask) + ETH2_MASK=$VALUE + ;; + dns1) + NS1=$VALUE + ;; + dns2) + NS2=$VALUE + ;; + domain) + DOMAIN=$VALUE + ;; + mgmtcidr) + MGMTNET=$VALUE + ;; + localgw) + LOCAL_GW=$VALUE + ;; + template) + TEMPLATE=$VALUE + ;; + name) + NAME=$VALUE + ;; + dhcprange) + DHCP_RANGE=$(echo $VALUE | tr ':' ',') + ;; + bootproto) + BOOTPROTO=$VALUE + ;; + type) + TYPE=$VALUE + ;; + esac +done + +if [ "$BOOTPROTO" == "static" ] +then + exit 0 +fi + +ETH1_IP=$(ifconfig eth1|grep 'inet addr:'|cut -d : -f 2|cut -d \ -f 1) +ETH2_IP=$(ifconfig eth2|grep 'inet addr:'|cut -d : -f 2|cut -d \ -f 1) + +case $TYPE in + secstorage) + [ "$NAME" == "" ] && NAME=secstorage + setup_secstorage; + ;; + consoleproxy) + [ "$NAME" == "" ] && NAME=consoleproxy + setup_console_proxy; + ;; +esac diff --git a/tools/systemvm/debian/config/etc/iptables/iptables-consoleproxy b/tools/systemvm/debian/config/etc/iptables/iptables-consoleproxy new file mode 100644 index 00000000000..92a26f7b558 --- /dev/null +++ b/tools/systemvm/debian/config/etc/iptables/iptables-consoleproxy @@ -0,0 +1,20 @@ +# Generated by iptables-save v1.3.8 on Thu Oct 1 18:16:05 2009 +*nat +:PREROUTING ACCEPT [0:0] +:POSTROUTING ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +COMMIT +*filter +:INPUT DROP [0:0] +:FORWARD DROP [0:0] +:OUTPUT ACCEPT [0:0] +-A INPUT -i lo -j ACCEPT +-A INPUT -i eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A INPUT -i eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A INPUT -i eth2 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A INPUT -p icmp -j ACCEPT +-A INPUT -i eth0 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT +-A INPUT -i eth0 -p tcp -m state --state NEW -m tcp --dport 8001 -j ACCEPT +-A INPUT -i eth2 -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT +-A INPUT -i eth2 -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT +COMMIT diff --git a/tools/systemvm/debian/config/etc/iptables/iptables-router b/tools/systemvm/debian/config/etc/iptables/iptables-router new file mode 100644 index 00000000000..3bc7b50f74a --- /dev/null +++ b/tools/systemvm/debian/config/etc/iptables/iptables-router @@ -0,0 +1,24 @@ +*nat +:PREROUTING ACCEPT [0:0] +:POSTROUTING ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +COMMIT +*filter +:INPUT DROP [0:0] +:FORWARD DROP [0:0] +:OUTPUT ACCEPT [0:0] +-A INPUT -i eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A INPUT -i eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A INPUT -i eth2 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A INPUT -p icmp -j ACCEPT +-A INPUT -i lo -j ACCEPT +-A INPUT -i eth0 -p udp -m udp --dport 67 -j ACCEPT +-A INPUT -i eth0 -p udp -m udp --dport 53 -j ACCEPT +-A INPUT -i eth1 -p tcp -m state --state NEW --dport 3922 -j ACCEPT +-A INPUT -i eth0 -p tcp -m state --state NEW --dport 8080 -j ACCEPT +-A INPUT -i eth0 -p tcp -m state --state NEW --dport 80 -j ACCEPT +-A FORWARD -i eth0 -o eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A FORWARD -i eth0 -o eth2 -j ACCEPT +-A FORWARD -i eth2 -o eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT +COMMIT + diff --git a/tools/systemvm/debian/config/etc/iptables/iptables-secstorage b/tools/systemvm/debian/config/etc/iptables/iptables-secstorage new file mode 100644 index 00000000000..ef733c431a0 --- /dev/null +++ b/tools/systemvm/debian/config/etc/iptables/iptables-secstorage @@ -0,0 +1,20 @@ +# Generated by iptables-save v1.3.8 on Thu Oct 1 18:16:05 2009 +*nat +:PREROUTING ACCEPT [0:0] +:POSTROUTING ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +COMMIT +*filter +:INPUT DROP [0:0] +:FORWARD DROP [0:0] +:OUTPUT ACCEPT [0:0] +:HTTP - [0:0] +-A INPUT -i eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A INPUT -i eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A INPUT -i eth2 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A INPUT -i eth2 -p tcp -m state --state NEW -m tcp --dport 80 -j HTTP +-A INPUT -i eth2 -p tcp -m state --state NEW -m tcp --dport 80 -j DROP +-A INPUT -i lo -j ACCEPT +-A INPUT -p icmp -j ACCEPT +-A INPUT -i eth0 -p tcp -m state --state NEW --dport 3922 -j ACCEPT +COMMIT diff --git a/tools/systemvm/debian/config/etc/iptables/rules b/tools/systemvm/debian/config/etc/iptables/rules new file mode 100644 index 00000000000..3bc7b50f74a --- /dev/null +++ b/tools/systemvm/debian/config/etc/iptables/rules @@ -0,0 +1,24 @@ +*nat +:PREROUTING ACCEPT [0:0] +:POSTROUTING ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +COMMIT +*filter +:INPUT DROP [0:0] +:FORWARD DROP [0:0] +:OUTPUT ACCEPT [0:0] +-A INPUT -i eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A INPUT -i eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A INPUT -i eth2 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A INPUT -p icmp -j ACCEPT +-A INPUT -i lo -j ACCEPT +-A INPUT -i eth0 -p udp -m udp --dport 67 -j ACCEPT +-A INPUT -i eth0 -p udp -m udp --dport 53 -j ACCEPT +-A INPUT -i eth1 -p tcp -m state --state NEW --dport 3922 -j ACCEPT +-A INPUT -i eth0 -p tcp -m state --state NEW --dport 8080 -j ACCEPT +-A INPUT -i eth0 -p tcp -m state --state NEW --dport 80 -j ACCEPT +-A FORWARD -i eth0 -o eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT +-A FORWARD -i eth0 -o eth2 -j ACCEPT +-A FORWARD -i eth2 -o eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT +COMMIT + diff --git a/tools/systemvm/debian/config/etc/sysctl.conf b/tools/systemvm/debian/config/etc/sysctl.conf new file mode 100644 index 00000000000..ba5cbe6137d --- /dev/null +++ b/tools/systemvm/debian/config/etc/sysctl.conf @@ -0,0 +1,33 @@ +# Kernel sysctl configuration file for Red Hat Linux +# +# For binary values, 0 is disabled, 1 is enabled. See sysctl(8) and +# sysctl.conf(5) for more details. +# @VERSION@ + +# Controls IP packet forwarding +net.ipv4.ip_forward = 1 + +# Controls source route verification +net.ipv4.conf.default.rp_filter = 1 + +# Do not accept source routing +net.ipv4.conf.default.accept_source_route = 0 + +# Respect local interface in ARP interactions +net.ipv4.conf.default.arp_announce = 2 +net.ipv4.conf.default.arp_ignore = 2 +net.ipv4.conf.all.arp_announce = 2 +net.ipv4.conf.all.arp_ignore = 2 + + +# Controls the System Request debugging functionality of the kernel +kernel.sysrq = 0 + +# Controls whether core dumps will append the PID to the core filename. +# Useful for debugging multi-threaded applications. +kernel.core_uses_pid = 1 + +# Controls the use of TCP syncookies +net.ipv4.tcp_syncookies = 1 + +net.netfilter.nf_conntrack_max=65536 diff --git a/tools/systemvm/debian/config/root/.ssh/authorized_keys b/tools/systemvm/debian/config/root/.ssh/authorized_keys new file mode 100644 index 00000000000..f738fe6cad7 --- /dev/null +++ b/tools/systemvm/debian/config/root/.ssh/authorized_keys @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA1j2QZsaDk67SJT4dhzUDZohcuTG4AwBV/t1zn1yPkVQG7th6DkoEUck+c6qeNdSByk8ZVvf0M+24sL9RhpGTF1h/EmLp/fnfEohQ+ZxAgHI1U9AY67A9iqkw9JHnRShukUTXuJOiZte/VvTVJQlJyVNWNyAE/g9t/5sgtuNExq37veWPzyUaibhPIvdPnw3y+azb3LKnHCve/C2j0yf/qvV3S7jqf83OLCml9LIa4F6PVO6crXdCv4DnZiV8Qw/nhCRqQyKm+FXvMBT8mQziRsNUEDB4Mvmu32R7MJK0gvUxXUJOql0LoQqf6xkR8LNnMewKRrGfzuizM4XRp3UdRQ== root@gateway diff --git a/tools/systemvm/debian/config/root/clearUsageRules.sh b/tools/systemvm/debian/config/root/clearUsageRules.sh new file mode 100755 index 00000000000..2517d42e2e0 --- /dev/null +++ b/tools/systemvm/debian/config/root/clearUsageRules.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# clearUsageRules.sh - remove iptable rules for removed public interfaces +# +# +# @VERSION@ + +# if removedVifs file doesn't exist, no rules to be cleared +if [ -f /root/removedVifs ] +then + var=`cat /root/removedVifs` + # loop through even vif to be cleared + for i in $var; do + # Make sure vif doesn't exist + if [ ! -f /sys/class/net/$i ] + then + # remove rules + iptables -D NETWORK_STATS -i eth0 -o $i > /dev/null; + iptables -D NETWORK_STATS -i $i -o eth0 > /dev/null; + fi + done +rm /root/removedVifs +fi diff --git a/tools/systemvm/debian/config/root/edithosts.sh b/tools/systemvm/debian/config/root/edithosts.sh new file mode 100755 index 00000000000..5193376b8b5 --- /dev/null +++ b/tools/systemvm/debian/config/root/edithosts.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# edithosts.sh -- edit the dhcphosts file on the routing domain +# $1 : the mac address +# $2 : the associated ip address +# $3 : the hostname + +wait_for_dnsmasq () { + local _pid=$(/sbin/pidof dnsmasq) + for i in 0 1 2 3 4 5 6 7 8 9 10 + do + sleep 1 + _pid=$(/sbin/pidof dnsmasq) + [ "$_pid" != "" ] && break; + done + [ "$_pid" != "" ] && return 0; + echo "edithosts: timed out waiting for dnsmasq to start" + return 1 +} + +#delete any previous entries from the dhcp hosts file +sed -i /$1/d /etc/dhcphosts.txt +sed -i /$2,/d /etc/dhcphosts.txt +sed -i /$3,/d /etc/dhcphosts.txt + +#put in the new entry +echo "$1,$2,$3,infinite" >>/etc/dhcphosts.txt + +#delete leases to supplied mac and ip addresses +sed -i /$1/d /var/lib/misc/dnsmasq.leases +sed -i /"$2 "/d /var/lib/misc/dnsmasq.leases +sed -i /"$3 "/d /var/lib/misc/dnsmasq.leases + +#put in the new entry +echo "0 $1 $2 $3 *" >> /var/lib/misc/dnsmasq.leases + +#edit hosts file as well +sed -i /"$2 "/d /etc/hosts +sed -i /"$3"/d /etc/hosts +echo "$2 $3" >> /etc/hosts + +# make dnsmasq re-read files +pid=$(/sbin/pidof dnsmasq) +if [ "$pid" != "" ] +then + service dnsmasq restart +else + wait_for_dnsmasq +fi + +exit $? diff --git a/tools/systemvm/debian/config/root/firewall.sh b/tools/systemvm/debian/config/root/firewall.sh new file mode 100755 index 00000000000..89cd0d4a95e --- /dev/null +++ b/tools/systemvm/debian/config/root/firewall.sh @@ -0,0 +1,204 @@ +#!/usr/bin/env bash +# $Id: firewall.sh 9947 2010-06-25 19:34:24Z manuel $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/patches/xenserver/root/firewall.sh $ +# firewall.sh -- allow some ports / protocols to vm instances +# +# +# @VERSION@ + +usage() { + printf "Usage: %s: (-A|-D) -i -r -P protocol (-p port_range | -t icmp_type_code) -l -d [-f -u -y -z ] \n" $(basename $0) >&2 +} + +set -x + +get_dom0_ip () { + eval "$1=$(ifconfig eth0 | awk '/inet addr/ {split ($2,A,":"); print A[2]}')" + return 0 +} + + +#Add the tcp firewall entries into iptables in the routing domain +tcp_entry() { + local instIp=$1 + local dport=$2 + local pubIp=$3 + local port=$4 + local op=$5 + + for vif in $VIF_LIST; do + iptables -t nat $op PREROUTING --proto tcp -i $vif -d $pubIp --destination-port $port -j DNAT --to-destination $instIp:$dport >/dev/null; + done; + + iptables -t nat $op OUTPUT --proto tcp -d $pubIp --destination-port $port -j DNAT --to-destination $instIp:$dport >/dev/null; + iptables $op FORWARD -p tcp -s 0/0 -d $instIp -m state --state ESTABLISHED,RELATED -j ACCEPT > /dev/null; + iptables $op FORWARD -p tcp -s 0/0 -d $instIp --destination-port $dport --syn -j ACCEPT > /dev/null; + + return $? +} + +#Add the udp firewall entries into iptables in the routing domain +udp_entry() { + local instIp=$1 + local dport=$2 + local pubIp=$3 + local port=$4 + local op=$5 + + for vif in $VIF_LIST; do + iptables -t nat $op PREROUTING --proto udp -i $vif -d $pubIp --destination-port $port -j DNAT --to-destination $instIp:$dport >/dev/null; + done; + + iptables -t nat $op OUTPUT --proto udp -d $pubIp --destination-port $port -j DNAT --to-destination $instIp:$dport >/dev/null; + iptables $op FORWARD -p udp -s 0/0 -d $instIp --destination-port $dport -j ACCEPT > /dev/null; + + return $? +} + +#Add the icmp firewall entries into iptables in the routing domain +icmp_entry() { + local instIp=$1 + local icmptype=$2 + local pubIp=$3 + local op=$4 + + for vif in $VIF_LIST; do + iptables -t nat $op PREROUTING --proto icmp -i $vif -d $pubIp --icmp-type $icmptype -j DNAT --to-destination $instIp >/dev/null; + done; + + iptables -t nat $op OUTPUT --proto icmp -d $pubIp --icmp-type $icmptype -j DNAT --to-destination $instIp:$dport >/dev/null; + iptables $op FORWARD -p icmp -s 0/0 -d $instIp --icmp-type $icmptype -j ACCEPT > /dev/null; + + return $? +} + +get_vif_list() { + local vif_list="" + for i in /sys/class/net/eth*; do + vif=$(basename $i); + if [ "$vif" != "eth0" ] && [ "$vif" != "eth1" ] + then + vif_list="$vif_list $vif"; + fi + done + + echo $vif_list +} + +reverse_op() { + local op=$1 + + if [ "$op" == "-A" ] + then + echo "-D" + else + echo "-A" + fi +} + +rflag= +iflag= +Pflag= +pflag= +tflag= +lflag= +dflag= +oflag= +wflag= +xflag= +nflag= +Nflag= +op="" +oldPrivateIP="" +oldPrivatePort="" + +while getopts 'ADr:i:P:p:t:l:d:w:x:n:N:' OPTION +do + case $OPTION in + A) Aflag=1 + op="-A" + ;; + D) Dflag=1 + op="-D" + ;; + i) iflag=1 + domRIp="$OPTARG" + ;; + r) rflag=1 + instanceIp="$OPTARG" + ;; + P) Pflag=1 + protocol="$OPTARG" + ;; + p) pflag=1 + ports="$OPTARG" + ;; + t) tflag=1 + icmptype="$OPTARG" + ;; + l) lflag=1 + publicIp="$OPTARG" + ;; + d) dflag=1 + dport="$OPTARG" + ;; + w) wflag=1 + oldPrivateIP="$OPTARG" + ;; + x) xflag=1 + oldPrivatePort="$OPTARG" + ;; + n) nflag=1 + domRName="$OPTARG" + ;; + N) Nflag=1 + netmask="$OPTARG" + ;; + ?) usage + exit 2 + ;; + esac +done + +reverseOp=$(reverse_op $op) + +VIF_LIST=$(get_vif_list) + +case $protocol in + "tcp") + # If oldPrivateIP was passed in, this is an update. Delete the old rule from DomR. + if [ "$oldPrivateIP" != "" ] + then + tcp_entry $oldPrivateIP $oldPrivatePort $publicIp $ports "-D" + fi + + # Add/delete the new rule + tcp_entry $instanceIp $dport $publicIp $ports $op + exit $? + ;; + "udp") + # If oldPrivateIP was passed in, this is an update. Delete the old rule from DomR. + if [ "$oldPrivateIP" != "" ] + then + udp_entry $oldPrivateIP $oldPrivatePort $publicIp $ports "-D" + fi + + # Add/delete the new rule + udp_entry $instanceIp $dport $publicIp $ports $op + exit $? + ;; + "icmp") + # If oldPrivateIP was passed in, this is an update. Delete the old rule from DomR. + if [ "$oldPrivateIP" != "" ] + then + icmp_entry $oldPrivateIp $icmptype $publicIp "-D" + fi + + # Add/delete the new rule + icmp_entry $instanceIp $icmptype $publicIp $op + exit $? + ;; + *) + printf "Invalid protocol-- must be tcp, udp or icmp\n" >&2 + exit 5 + ;; +esac diff --git a/tools/systemvm/debian/config/root/loadbalancer.sh b/tools/systemvm/debian/config/root/loadbalancer.sh new file mode 100755 index 00000000000..f6c2c5d7e93 --- /dev/null +++ b/tools/systemvm/debian/config/root/loadbalancer.sh @@ -0,0 +1,167 @@ +#!/usr/bin/env bash +# $Id: loadbalancer.sh 9947 2010-06-25 19:34:24Z manuel $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/patches/xenserver/root/loadbalancer.sh $ +# loadbalancer.sh -- reconfigure loadbalancer rules +# +# +# @VERSION@ + +usage() { + printf "Usage: %s: -i -a -d -f \n" $(basename $0) >&2 +} + +# set -x + +# check if gateway domain is up and running +check_gw() { + ping -c 1 -n -q $1 > /dev/null + if [ $? -gt 0 ] + then + sleep 1 + ping -c 1 -n -q $1 > /dev/null + fi + return $?; +} + +# firewall entry to ensure that haproxy can receive on specified port +fw_entry() { + local added=$1 + local removed=$2 + + if [ "$added" == "none" ] + then + added="" + fi + + if [ "$removed" == "none" ] + then + removed="" + fi + + local a=$(echo $added | cut -d, -f1- --output-delimiter=" ") + local r=$(echo $removed | cut -d, -f1- --output-delimiter=" ") + + for i in $a + do + local pubIp=$(echo $i | cut -d: -f1) + local dport=$(echo $i | cut -d: -f2) + + for vif in $VIF_LIST; do + iptables -D INPUT -i $vif -p tcp -d $pubIp --dport $dport -j ACCEPT 2> /dev/null + iptables -A INPUT -i $vif -p tcp -d $pubIp --dport $dport -j ACCEPT + + if [ $? -gt 0 ] + then + return 1 + fi + done + done + + for i in $r + do + local pubIp=$(echo $i | cut -d: -f1) + local dport=$(echo $i | cut -d: -f2) + + for vif in $VIF_LIST; do + iptables -D INPUT -i $vif -p tcp -d $pubIp --dport $dport -j ACCEPT + done + done + + return 0 +} + +#Hot reconfigure HA Proxy in the routing domain +reconfig_lb() { + /root/reconfigLB.sh + return $? +} + +# Restore the HA Proxy to its previous state, and revert iptables rules on DomR +restore_lb() { + # Copy the old version of haproxy.cfg into the file that reconfigLB.sh uses + cp /etc/haproxy/haproxy.cfg.old /etc/haproxy/haproxy.cfg.new + + if [ $? -eq 0 ] + then + # Run reconfigLB.sh again + /root/reconfigLB.sh + fi +} + +get_vif_list() { + local vif_list="" + for i in /sys/class/net/eth*; do + vif=$(basename $i); + if [ "$vif" != "eth0" ] && [ "$vif" != "eth1" ] + then + vif_list="$vif_list $vif"; + fi + done + + echo $vif_list +} + +mflag= +iflag= +aflag= +dflag= +fflag= + +while getopts 'i:a:d:f:' OPTION +do + case $OPTION in + i) iflag=1 + domRIp="$OPTARG" + ;; + a) aflag=1 + addedIps="$OPTARG" + ;; + d) dflag=1 + removedIps="$OPTARG" + ;; + f) fflag=1 + cfgfile="$OPTARG" + ;; + ?) usage + exit 2 + ;; + esac +done + +VIF_LIST=$(get_vif_list) + +# hot reconfigure haproxy +reconfig_lb $cfgfile + +if [ $? -gt 0 ] +then + printf "Reconfiguring loadbalancer failed\n" + exit 1 +fi + +if [ "$addedIps" == "" ] +then + addedIps="none" +fi + +if [ "$removedIps" == "" ] +then + removedIps="none" +fi + +# iptables entry to ensure that haproxy receives traffic +fw_entry $addedIps $removedIps + +if [ $? -gt 0 ] +then + # Restore the LB + restore_lb + + # Revert iptables rules on DomR, with addedIps and removedIps swapped + fw_entry $removedIps $addedIps + + exit 1 +fi + +exit 0 + + diff --git a/tools/systemvm/debian/config/root/reconfigLB.sh b/tools/systemvm/debian/config/root/reconfigLB.sh new file mode 100755 index 00000000000..0ce93a06d69 --- /dev/null +++ b/tools/systemvm/debian/config/root/reconfigLB.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# save previous state + mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.old + mv /var/run/haproxy.pid /var/run/haproxy.pid.old + + mv /etc/haproxy/haproxy.cfg.new /etc/haproxy/haproxy.cfg + kill -TTOU $(cat /var/run/haproxy.pid.old) + sleep 2 + if haproxy -D -p /var/run/haproxy.pid -f /etc/haproxy/haproxy.cfg; then + echo "New haproxy instance successfully loaded, stopping previous one." + kill -KILL $(cat /var/run/haproxy.pid.old) + rm -f /var/run/haproxy.pid.old + exit 0 + else + echo "New instance failed to start, resuming previous one." + kill -TTIN $(cat /var/run/haproxy.pid.old) + rm -f /var/run/haproxy.pid + mv /var/run/haproxy.pid.old /var/run/haproxy.pid + mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.new + mv /etc/haproxy/haproxy.cfg.old /etc/haproxy/haproxy.cfg + exit 1 + fi From 9fc85d5f35961c9ed51ca8327e623f66f3b59537 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Tue, 31 Aug 2010 16:44:32 -0700 Subject: [PATCH 046/145] bin got ignored by gitignore --- .../debian/config/opt/cloud/bin/passwd_server | 16 +++ .../config/opt/cloud/bin/patchsystemvm.sh | 118 ++++++++++++++++++ .../config/opt/cloud/bin/serve_password.sh | 73 +++++++++++ 3 files changed, 207 insertions(+) create mode 100755 tools/systemvm/debian/config/opt/cloud/bin/passwd_server create mode 100755 tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh create mode 100755 tools/systemvm/debian/config/opt/cloud/bin/serve_password.sh diff --git a/tools/systemvm/debian/config/opt/cloud/bin/passwd_server b/tools/systemvm/debian/config/opt/cloud/bin/passwd_server new file mode 100755 index 00000000000..c0326485f70 --- /dev/null +++ b/tools/systemvm/debian/config/opt/cloud/bin/passwd_server @@ -0,0 +1,16 @@ +#!/bin/bash + +guestIp=$(ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}') + +while true +do + socat TCP4-LISTEN:8080,reuseaddr,crnl,bind=$guestIp SYSTEM:"/opt/cloud/bin/serve_password.sh \"\$SOCAT_PEERADDR\"" + + rc=$? + if [ $rc -ne 0 ] + then + logger "Socat failed with error code $rc. Restarting socat..." + sleep 3 + fi + +done diff --git a/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh b/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh new file mode 100755 index 00000000000..f9476724a61 --- /dev/null +++ b/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh @@ -0,0 +1,118 @@ +#/bin/bash +# $Id: patchsystemvm.sh 10800 2010-07-16 13:48:39Z edison $ $HeadURL: svn://svn.lab.vmops.com/repos/branches/2.1.x/java/scripts/vm/hypervisor/xenserver/prepsystemvm.sh $ + +#set -x +logfile="/var/log/patchsystemvm.log" +# +# To use existing console proxy .zip-based package file +# +patch_console_proxy() { + local patchfile=$1 + rm /usr/local/cloud/systemvm -rf + mkdir -p /usr/local/cloud/systemvm + echo "All" | unzip $patchfile -d /usr/local/cloud/systemvm >$logfile 2>&1 + find /usr/local/cloud/systemvm/ -name \*.sh | xargs chmod 555 + return 0 +} + +consoleproxy_svcs() { + chkconfig cloud on + chkconfig postinit on + chkconfig domr_webserver off + chkconfig haproxy off ; + chkconfig dnsmasq off + chkconfig sshd on + chkconfig httpd off + chkconfig nfs off + chkconfig nfslock off + chkconfig rpcbind off + chkconfig rpcidmap off + mkdir -p /var/log/cloud +} + +secstorage_svcs() { + chkconfig cloud on + chkconfig postinit on + chkconfig domr_webserver off + chkconfig haproxy off ; + chkconfig dnsmasq off + chkconfig sshd on + chkconfig httpd off + mkdir -p /var/log/cloud +} + +routing_svcs() { + chkconfig cloud off + chkconfig domr_webserver on ; + chkconfig haproxy on ; + chkconfig dnsmasq on + chkconfig sshd on + chkconfig nfs off + chkconfig nfslock off + chkconfig rpcbind off + chkconfig rpcidmap off +} + +CMDLINE=$(cat /var/cache/cloud/cmdline) +TYPE="router" + +for i in $CMDLINE + do + # search for foo=bar pattern and cut out foo + KEY=$(echo $i | cut -d= -f1) + VALUE=$(echo $i | cut -d= -f2) + case $KEY in + type) + TYPE=$VALUE + ;; + *) + ;; + esac +done + +if [ "$TYPE" == "consoleproxy" ] || [ "$TYPE" == "secstorage" ] && [ -f /media/cdrom/systemvm.zip ] +then + patch_console_proxy /media/cdrom/systemvm.zip + if [ $? -gt 0 ] + then + printf "Failed to apply patch systemvm\n" >$logfile + exit 5 + fi +fi + + +#empty known hosts +echo "" > /root/.ssh/known_hosts + +if [ "$TYPE" == "router" ] +then + routing_svcs + if [ $? -gt 0 ] + then + printf "Failed to execute routing_svcs\n" >$logfile + exit 6 + fi +fi + + +if [ "$TYPE" == "consoleproxy" ] +then + consoleproxy_svcs + if [ $? -gt 0 ] + then + printf "Failed to execute consoleproxy_svcs\n" >$logfile + exit 7 + fi +fi + +if [ "$TYPE" == "secstorage" ] +then + secstorage_svcs + if [ $? -gt 0 ] + then + printf "Failed to execute secstorage_svcs\n" >$logfile + exit 8 + fi +fi + +exit $? diff --git a/tools/systemvm/debian/config/opt/cloud/bin/serve_password.sh b/tools/systemvm/debian/config/opt/cloud/bin/serve_password.sh new file mode 100755 index 00000000000..d66f6553745 --- /dev/null +++ b/tools/systemvm/debian/config/opt/cloud/bin/serve_password.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +# set -x + +#replace a line in a file of the form key=value +# $1 filename +# $2 keyname +# $3 value +replace_in_file() { + local filename=$1 + local keyname=$2 + local value=$3 + sed -i /$keyname=/d $filename + echo "$keyname=$value" >> $filename + return $? +} + +#get a value from a file in the form key=value +# $1 filename +# $2 keyname +get_value() { + local filename=$1 + local keyname=$2 + grep -i $keyname= $filename | cut -d= -f2 +} + +ip=$1 + +logger "serve_password called to service a request for $ip." + +while read input +do + if [ "$input" == "" ] + then + break + fi + + request=$(echo $input | grep "VM Request:" | cut -d: -f2 | sed 's/^[ \t]*//') + + if [ "$request" != "" ] + then + break + fi +done + +# echo -e \"\\\"HTTP/1.0 200 OK\\\nDocumentType: text/plain\\\n\\\n\\\"\"; + +if [ "$request" == "send_my_password" ] +then + password=$(get_value /root/passwords $ip) + if [ "$password" == "" ] + then + logger "send_password_to_domu sent bad_request to $ip." + echo "bad_request" + else + logger "send_password_to_domu sent a password to $ip." + echo $password + fi +else + if [ "$request" == "saved_password" ] + then + replace_in_file /root/passwords $ip "saved_password" + logger "send_password_to_domu sent saved_password to $ip." + echo "saved_password" + else + logger "send_password_to_domu sent bad_request to $ip." + echo "bad_request" + fi +fi + +# echo -e \"\\\"\\\n\\\"\" + +exit 0 From 8e1643f71cd8f089433498bfee5ee06a95c3dce7 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Wed, 1 Sep 2010 16:53:45 -0700 Subject: [PATCH 047/145] More fixes... --- tools/systemvm/debian/config/etc/dnsmasq.conf | 463 ++++++++ .../debian/config/etc/httpd/conf/httpd.conf | 990 ++++++++++++++++++ tools/systemvm/debian/config/etc/init.d/cloud | 14 +- .../config/etc/init.d/cloud-early-config | 24 +- .../debian/config/etc/init.d/postinit | 59 +- .../debian/config/etc/ssh/sshd_config | 128 +++ tools/systemvm/debian/config/etc/sysctl.conf | 2 +- .../config/opt/cloud/bin/patchsystemvm.sh | 28 +- .../config/var/www/html/latest/.htaccess | 5 + .../config/var/www/html/userdata/.htaccess | 1 + 10 files changed, 1666 insertions(+), 48 deletions(-) create mode 100644 tools/systemvm/debian/config/etc/dnsmasq.conf create mode 100644 tools/systemvm/debian/config/etc/httpd/conf/httpd.conf create mode 100644 tools/systemvm/debian/config/etc/ssh/sshd_config create mode 100644 tools/systemvm/debian/config/var/www/html/latest/.htaccess create mode 100644 tools/systemvm/debian/config/var/www/html/userdata/.htaccess diff --git a/tools/systemvm/debian/config/etc/dnsmasq.conf b/tools/systemvm/debian/config/etc/dnsmasq.conf new file mode 100644 index 00000000000..b908c2e4bee --- /dev/null +++ b/tools/systemvm/debian/config/etc/dnsmasq.conf @@ -0,0 +1,463 @@ +# Configuration file for dnsmasq. +# +# Format is one option per line, legal options are the same +# as the long options legal on the command line. See +# "/usr/sbin/dnsmasq --help" or "man 8 dnsmasq" for details. + +# The following two options make you a better netizen, since they +# tell dnsmasq to filter out queries which the public DNS cannot +# answer, and which load the servers (especially the root servers) +# uneccessarily. If you have a dial-on-demand link they also stop +# these requests from bringing up the link uneccessarily. + +# Never forward plain names (without a dot or domain part) +domain-needed +# Never forward addresses in the non-routed address spaces. +bogus-priv + + +# Uncomment this to filter useless windows-originated DNS requests +# which can trigger dial-on-demand links needlessly. +# Note that (amongst other things) this blocks all SRV requests, +# so don't use it if you use eg Kerberos. +# This option only affects forwarding, SRV records originating for +# dnsmasq (via srv-host= lines) are not suppressed by it. +#filterwin2k + +# Change this line if you want dns to get its upstream servers from +# somewhere other that /etc/resolv.conf +resolv-file=/etc/dnsmasq-resolv.conf + +# By default, dnsmasq will send queries to any of the upstream +# servers it knows about and tries to favour servers to are known +# to be up. Uncommenting this forces dnsmasq to try each query +# with each server strictly in the order they appear in +# /etc/resolv.conf +#strict-order + +# If you don't want dnsmasq to read /etc/resolv.conf or any other +# file, getting its servers from this file instead (see below), then +# uncomment this. +#no-resolv + +# If you don't want dnsmasq to poll /etc/resolv.conf or other resolv +# files for changes and re-read them then uncomment this. +#no-poll + +# Add other name servers here, with domain specs if they are for +# non-public domains. +#server=/localnet/192.168.0.1 + +# Example of routing PTR queries to nameservers: this will send all +# address->name queries for 192.168.3/24 to nameserver 10.1.2.3 +#server=/3.168.192.in-addr.arpa/10.1.2.3 + +# Add local-only domains here, queries in these domains are answered +# from /etc/hosts or DHCP only. +local=/2.vmops-test.vmops.com/ + +# Add domains which you want to force to an IP address here. +# The example below send any host in doubleclick.net to a local +# webserver. +#address=/doubleclick.net/127.0.0.1 + +# If you want dnsmasq to change uid and gid to something other +# than the default, edit the following lines. +#user= +#group= + +# If you want dnsmasq to listen for DHCP and DNS requests only on +# specified interfaces (and the loopback) give the name of the +# interface (eg eth0) here. +# Repeat the line for more than one interface. +interface=eth0 +# Or you can specify which interface _not_ to listen on +except-interface=eth1 +except-interface=eth2 +except-interface=lo +# Or which to listen on by address (remember to include 127.0.0.1 if +# you use this.) +#listen-address= +# If you want dnsmasq to provide only DNS service on an interface, +# configure it as shown above, and then use the following line to +# disable DHCP on it. +no-dhcp-interface=eth1 +no-dhcp-interface=eth2 + +# On systems which support it, dnsmasq binds the wildcard address, +# even when it is listening on only some interfaces. It then discards +# requests that it shouldn't reply to. This has the advantage of +# working even when interfaces come and go and change address. If you +# want dnsmasq to really bind only the interfaces it is listening on, +# uncomment this option. About the only time you may need this is when +# running another nameserver on the same machine. +bind-interfaces + +# If you don't want dnsmasq to read /etc/hosts, uncomment the +# following line. +#no-hosts +# or if you want it to read another file, as well as /etc/hosts, use +# this. +#addn-hosts=/etc/banner_add_hosts + +# Set this (and domain: see below) if you want to have a domain +# automatically added to simple names in a hosts-file. +expand-hosts + +# Set the domain for dnsmasq. this is optional, but if it is set, it +# does the following things. +# 1) Allows DHCP hosts to have fully qualified domain names, as long +# as the domain part matches this setting. +# 2) Sets the "domain" DHCP option thereby potentially setting the +# domain of all systems configured by DHCP +# 3) Provides the domain part for "expand-hosts" +domain=2.vmops-test.vmops.com + +# Uncomment this to enable the integrated DHCP server, you need +# to supply the range of addresses available for lease and optionally +# a lease time. If you have more than one network, you will need to +# repeat this for each network on which you want to supply DHCP +# service. +dhcp-range=10.1.1.1,static +#dhcp-range=10.0.0.1,10.255.255.255 +dhcp-hostsfile=/etc/dhcphosts.txt + +# This is an example of a DHCP range where the netmask is given. This +# is needed for networks we reach the dnsmasq DHCP server via a relay +# agent. If you don't know what a DHCP relay agent is, you probably +# don't need to worry about this. +#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h + +# This is an example of a DHCP range with a network-id, so that +# some DHCP options may be set only for this network. +#dhcp-range=red,192.168.0.50,192.168.0.150 + +# Supply parameters for specified hosts using DHCP. There are lots +# of valid alternatives, so we will give examples of each. Note that +# IP addresses DO NOT have to be in the range given above, they just +# need to be on the same network. The order of the parameters in these +# do not matter, it's permissble to give name,adddress and MAC in any order + +# Always allocate the host with ethernet address 11:22:33:44:55:66 +# The IP address 192.168.0.60 +#dhcp-host=11:22:33:44:55:66,192.168.0.60 + +# Always set the name of the host with hardware address +# 11:22:33:44:55:66 to be "fred" +#dhcp-host=11:22:33:44:55:66,fred + +# Always give the host with ethernet address 11:22:33:44:55:66 +# the name fred and IP address 192.168.0.60 and lease time 45 minutes +#dhcp-host=11:22:33:44:55:66,fred,192.168.0.60,45m + +# Give the machine which says it's name is "bert" IP address +# 192.168.0.70 and an infinite lease +#dhcp-host=bert,192.168.0.70,infinite + +# Always give the host with client identifier 01:02:02:04 +# the IP address 192.168.0.60 +#dhcp-host=id:01:02:02:04,192.168.0.60 + +# Always give the host with client identifier "marjorie" +# the IP address 192.168.0.60 +#dhcp-host=id:marjorie,192.168.0.60 + +# Enable the address given for "judge" in /etc/hosts +# to be given to a machine presenting the name "judge" when +# it asks for a DHCP lease. +#dhcp-host=judge + +# Never offer DHCP service to a machine whose ethernet +# address is 11:22:33:44:55:66 +#dhcp-host=11:22:33:44:55:66,ignore + +# Ignore any client-id presented by the machine with ethernet +# address 11:22:33:44:55:66. This is useful to prevent a machine +# being treated differently when running under different OS's or +# between PXE boot and OS boot. +#dhcp-host=11:22:33:44:55:66,id:* + +# Send extra options which are tagged as "red" to +# the machine with ethernet address 11:22:33:44:55:66 +#dhcp-host=11:22:33:44:55:66,net:red + +# Send extra options which are tagged as "red" to +# any machine with ethernet address starting 11:22:33: +#dhcp-host=11:22:33:*:*:*,net:red + +# Ignore any clients which are specified in dhcp-host lines +# or /etc/ethers. Equivalent to ISC "deny unkown-clients". +# This relies on the special "known" tag which is set when +# a host is matched. +#dhcp-ignore=#known + +# Send extra options which are tagged as "red" to any machine whose +# DHCP vendorclass string includes the substring "Linux" +#dhcp-vendorclass=red,Linux + +# Send extra options which are tagged as "red" to any machine one +# of whose DHCP userclass strings includes the substring "accounts" +#dhcp-userclass=red,accounts + +# Send extra options which are tagged as "red" to any machine whose +# MAC address matches the pattern. +#dhcp-mac=red,00:60:8C:*:*:* + +# If this line is uncommented, dnsmasq will read /etc/ethers and act +# on the ethernet-address/IP pairs found there just as if they had +# been given as --dhcp-host options. Useful if you keep +# MAC-address/host mappings there for other purposes. +#read-ethers + +# Send options to hosts which ask for a DHCP lease. +# See RFC 2132 for details of available options. +# Common options can be given to dnsmasq by name: +# run "dnsmasq --help dhcp" to get a list. +# Note that all the common settings, such as netmask and +# broadcast address, DNS server and default route, are given +# sane defaults by dnsmasq. You very likely will not need +# any dhcp-options. If you use Windows clients and Samba, there +# are some options which are recommended, they are detailed at the +# end of this section. + +# Override the default route supplied by dnsmasq, which assumes the +# router is the same machine as the one running dnsmasq. +#dhcp-option=3,1.2.3.4 + +# Do the same thing, but using the option name +#dhcp-option=option:router,1.2.3.4 + +# Override the default route supplied by dnsmasq and send no default +# route at all. Note that this only works for the options sent by +# default (1, 3, 6, 12, 28) the same line will send a zero-length option +# for all other option numbers. +#dhcp-option=3 + +# Set the NTP time server addresses to 192.168.0.4 and 10.10.0.5 +#dhcp-option=option:ntp-server,192.168.0.4,10.10.0.5 + +# Set the NTP time server address to be the same machine as +# is running dnsmasq +#dhcp-option=42,0.0.0.0 + +# Set the NIS domain name to "welly" +#dhcp-option=40,welly + +# Set the default time-to-live to 50 +#dhcp-option=23,50 + +# Set the "all subnets are local" flag +#dhcp-option=27,1 + +# Set the domain +dhcp-option=15,"2.vmops-test.vmops.com" + +# Send the etherboot magic flag and then etherboot options (a string). +#dhcp-option=128,e4:45:74:68:00:00 +#dhcp-option=129,NIC=eepro100 + +# Specify an option which will only be sent to the "red" network +# (see dhcp-range for the declaration of the "red" network) +# Note that the net: part must precede the option: part. +#dhcp-option = net:red, option:ntp-server, 192.168.1.1 + +# The following DHCP options set up dnsmasq in the same way as is specified +# for the ISC dhcpcd in +# http://www.samba.org/samba/ftp/docs/textdocs/DHCP-Server-Configuration.txt +# adapted for a typical dnsmasq installation where the host running +# dnsmasq is also the host running samba. +# you may want to uncomment them if you use Windows clients and Samba. +#dhcp-option=19,0 # option ip-forwarding off +#dhcp-option=44,0.0.0.0 # set netbios-over-TCP/IP nameserver(s) aka WINS server(s) +#dhcp-option=45,0.0.0.0 # netbios datagram distribution server +#dhcp-option=46,8 # netbios node type +#dhcp-option=47 # empty netbios scope. + +# Send RFC-3397 DNS domain search DHCP option. WARNING: Your DHCP client +# probably doesn't support this...... +#dhcp-option=option:domain-search,eng.apple.com,marketing.apple.com + +# Send RFC-3442 classless static routes (note the netmask encoding) +#dhcp-option=121,192.168.1.0/24,1.2.3.4,10.0.0.0/8,5.6.7.8 + +# Send vendor-class specific options encapsulated in DHCP option 43. +# The meaning of the options is defined by the vendor-class so +# options are sent only when the client supplied vendor class +# matches the class given here. (A substring match is OK, so "MSFT" +# matches "MSFT" and "MSFT 5.0"). This example sets the +# mtftp address to 0.0.0.0 for PXEClients. +#dhcp-option=vendor:PXEClient,1,0.0.0.0 + +# Send microsoft-specific option to tell windows to release the DHCP lease +# when it shuts down. Note the "i" flag, to tell dnsmasq to send the +# value as a four-byte integer - that's what microsoft wants. See +# http://technet2.microsoft.com/WindowsServer/en/library/a70f1bb7-d2d4-49f0-96d6-4b7414ecfaae1033.mspx?mfr=true +dhcp-option=vendor:MSFT,2,1i + +# Send the Encapsulated-vendor-class ID needed by some configurations of +# Etherboot to allow is to recognise the DHCP server. +#dhcp-option=vendor:Etherboot,60,"Etherboot" + +# Send options to PXELinux. Note that we need to send the options even +# though they don't appear in the parameter request list, so we need +# to use dhcp-option-force here. +# See http://syslinux.zytor.com/pxe.php#special for details. +# Magic number - needed before anything else is recognised +#dhcp-option-force=208,f1:00:74:7e +# Configuration file name +#dhcp-option-force=209,configs/common +# Path prefix +#dhcp-option-force=210,/tftpboot/pxelinux/files/ +# Reboot time. (Note 'i' to send 32-bit value) +#dhcp-option-force=211,30i + +# Set the boot filename for BOOTP. You will only need +# this is you want to boot machines over the network and you will need +# a TFTP server; either dnsmasq's built in TFTP server or an +# external one. (See below for how to enable the TFTP server.) +#dhcp-boot=pxelinux.0 + +# Enable dnsmasq's built-in TFTP server +#enable-tftp + +# Set the root directory for files availble via FTP. +#tftp-root=/var/ftpd + +# Make the TFTP server more secure: with this set, only files owned by +# the user dnsmasq is running as will be send over the net. +#tftp-secure + +# Set the boot file name only when the "red" tag is set. +#dhcp-boot=net:red,pxelinux.red-net + +# An example of dhcp-boot with an external server: the name and IP +# address of the server are given after the filename. +#dhcp-boot=/var/ftpd/pxelinux.0,boothost,192.168.0.3 + +# Set the limit on DHCP leases, the default is 150 +#dhcp-lease-max=150 + +# The DHCP server needs somewhere on disk to keep its lease database. +# This defaults to a sane location, but if you want to change it, use +# the line below. +#dhcp-leasefile=/var/lib/misc/dnsmasq.leases +leasefile-ro + +# Set the DHCP server to authoritative mode. In this mode it will barge in +# and take over the lease for any client which broadcasts on the network, +# whether it has a record of the lease or not. This avoids long timeouts +# when a machine wakes up on a new network. DO NOT enable this if there's +# the slighest chance that you might end up accidentally configuring a DHCP +# server for your campus/company accidentally. The ISC server uses +# the same option, and this URL provides more information: +# http://www.isc.org/index.pl?/sw/dhcp/authoritative.php +#dhcp-authoritative + +# Run an executable when a DHCP lease is created or destroyed. +# The arguments sent to the script are "add" or "del", +# then the MAC address, the IP address and finally the hostname +# if there is one. +#dhcp-script=/bin/echo + +# Set the cachesize here. +#cache-size=150 + +# If you want to disable negative caching, uncomment this. +#no-negcache + +# Normally responses which come form /etc/hosts and the DHCP lease +# file have Time-To-Live set as zero, which conventionally means +# do not cache further. If you are happy to trade lower load on the +# server for potentially stale date, you can set a time-to-live (in +# seconds) here. +#local-ttl= + +# If you want dnsmasq to detect attempts by Verisign to send queries +# to unregistered .com and .net hosts to its sitefinder service and +# have dnsmasq instead return the correct NXDOMAIN response, uncomment +# this line. You can add similar lines to do the same for other +# registries which have implemented wildcard A records. +#bogus-nxdomain=64.94.110.11 + +# If you want to fix up DNS results from upstream servers, use the +# alias option. This only works for IPv4. +# This alias makes a result of 1.2.3.4 appear as 5.6.7.8 +#alias=1.2.3.4,5.6.7.8 +# and this maps 1.2.3.x to 5.6.7.x +#alias=1.2.3.0,5.6.7.0,255.255.255.0 + + +# Change these lines if you want dnsmasq to serve MX records. + +# Return an MX record named "maildomain.com" with target +# servermachine.com and preference 50 +#mx-host=maildomain.com,servermachine.com,50 + +# Set the default target for MX records created using the localmx option. +#mx-target=servermachine.com + +# Return an MX record pointing to the mx-target for all local +# machines. +#localmx + +# Return an MX record pointing to itself for all local machines. +#selfmx + +# Change the following lines if you want dnsmasq to serve SRV +# records. These are useful if you want to serve ldap requests for +# Active Directory and other windows-originated DNS requests. +# See RFC 2782. +# You may add multiple srv-host lines. +# The fields are ,,,, +# If the domain part if missing from the name (so that is just has the +# service and protocol sections) then the domain given by the domain= +# config option is used. (Note that expand-hosts does not need to be +# set for this to work.) + +# A SRV record sending LDAP for the example.com domain to +# ldapserver.example.com port 289 +#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389 + +# A SRV record sending LDAP for the example.com domain to +# ldapserver.example.com port 289 (using domain=) +###domain=example.com +#srv-host=_ldap._tcp,ldapserver.example.com,389 + +# Two SRV records for LDAP, each with different priorities +#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,1 +#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,2 + +# A SRV record indicating that there is no LDAP server for the domain +# example.com +#srv-host=_ldap._tcp.example.com + +# The following line shows how to make dnsmasq serve an arbitrary PTR +# record. This is useful for DNS-SD. (Note that the +# domain-name expansion done for SRV records _does_not +# occur for PTR records.) +#ptr-record=_http._tcp.dns-sd-services,"New Employee Page._http._tcp.dns-sd-services" + +# Change the following lines to enable dnsmasq to serve TXT records. +# These are used for things like SPF and zeroconf. (Note that the +# domain-name expansion done for SRV records _does_not +# occur for TXT records.) + +#Example SPF. +#txt-record=example.com,"v=spf1 a -all" + +#Example zeroconf +#txt-record=_http._tcp.example.com,name=value,paper=A4 + + +# For debugging purposes, log each DNS query as it passes through +# dnsmasq. +log-queries + +# Log lots of extra information about DHCP transactions. +#log-dhcp + +log-facility=/var/log/dnsmasq.log + +# Include a another lot of configuration options. +#conf-file=/etc/dnsmasq.more.conf +conf-dir=/etc/dnsmasq.d diff --git a/tools/systemvm/debian/config/etc/httpd/conf/httpd.conf b/tools/systemvm/debian/config/etc/httpd/conf/httpd.conf new file mode 100644 index 00000000000..e11384ef772 --- /dev/null +++ b/tools/systemvm/debian/config/etc/httpd/conf/httpd.conf @@ -0,0 +1,990 @@ +# +# This is the main Apache server configuration file. It contains the +# configuration directives that give the server its instructions. +# See for detailed information. +# In particular, see +# +# for a discussion of each configuration directive. +# +# +# Do NOT simply read the instructions in here without understanding +# what they do. They're here only as hints or reminders. If you are unsure +# consult the online docs. You have been warned. +# +# The configuration directives are grouped into three basic sections: +# 1. Directives that control the operation of the Apache server process as a +# whole (the 'global environment'). +# 2. Directives that define the parameters of the 'main' or 'default' server, +# which responds to requests that aren't handled by a virtual host. +# These directives also provide default values for the settings +# of all virtual hosts. +# 3. Settings for virtual hosts, which allow Web requests to be sent to +# different IP addresses or hostnames and have them handled by the +# same Apache server process. +# +# Configuration and logfile names: If the filenames you specify for many +# of the server's control files begin with "/" (or "drive:/" for Win32), the +# server will use that explicit path. If the filenames do *not* begin +# with "/", the value of ServerRoot is prepended -- so "logs/foo.log" +# with ServerRoot set to "/etc/httpd" will be interpreted by the +# server as "/etc/httpd/logs/foo.log". +# + +### Section 1: Global Environment +# +# The directives in this section affect the overall operation of Apache, +# such as the number of concurrent requests it can handle or where it +# can find its configuration files. +# + +# +# Don't give away too much information about all the subcomponents +# we are running. Comment out this line if you don't mind remote sites +# finding out what major optional modules you are running +ServerTokens OS + +# +# ServerRoot: The top of the directory tree under which the server's +# configuration, error, and log files are kept. +# +# NOTE! If you intend to place this on an NFS (or otherwise network) +# mounted filesystem then please read the LockFile documentation +# (available at ); +# you will save yourself a lot of trouble. +# +# Do NOT add a slash at the end of the directory path. +# +ServerRoot "/etc/httpd" + +# +# PidFile: The file in which the server should record its process +# identification number when it starts. +# +PidFile run/httpd.pid + +# +# Timeout: The number of seconds before receives and sends time out. +# +Timeout 120 + +# +# KeepAlive: Whether or not to allow persistent connections (more than +# one request per connection). Set to "Off" to deactivate. +# +KeepAlive Off + +# +# MaxKeepAliveRequests: The maximum number of requests to allow +# during a persistent connection. Set to 0 to allow an unlimited amount. +# We recommend you leave this number high, for maximum performance. +# +MaxKeepAliveRequests 100 + +# +# KeepAliveTimeout: Number of seconds to wait for the next request from the +# same client on the same connection. +# +KeepAliveTimeout 15 + +## +## Server-Pool Size Regulation (MPM specific) +## + +# prefork MPM +# StartServers: number of server processes to start +# MinSpareServers: minimum number of server processes which are kept spare +# MaxSpareServers: maximum number of server processes which are kept spare +# ServerLimit: maximum value for MaxClients for the lifetime of the server +# MaxClients: maximum number of server processes allowed to start +# MaxRequestsPerChild: maximum number of requests a server process serves + +StartServers 8 +MinSpareServers 5 +MaxSpareServers 20 +ServerLimit 256 +MaxClients 256 +MaxRequestsPerChild 4000 + + +# worker MPM +# StartServers: initial number of server processes to start +# MaxClients: maximum number of simultaneous client connections +# MinSpareThreads: minimum number of worker threads which are kept spare +# MaxSpareThreads: maximum number of worker threads which are kept spare +# ThreadsPerChild: constant number of worker threads in each server process +# MaxRequestsPerChild: maximum number of requests a server process serves + +StartServers 2 +MaxClients 150 +MinSpareThreads 25 +MaxSpareThreads 75 +ThreadsPerChild 25 +MaxRequestsPerChild 0 + + +# +# Listen: Allows you to bind Apache to specific IP addresses and/or +# ports, in addition to the default. See also the +# directive. +# +# Change this to Listen on specific IP addresses as shown below to +# prevent Apache from glomming onto all bound IP addresses (0.0.0.0) +# +#Listen 12.34.56.78:80 +Listen 10.1.1.1:80 + +# +# Dynamic Shared Object (DSO) Support +# +# To be able to use the functionality of a module which was built as a DSO you +# have to place corresponding `LoadModule' lines at this location so the +# directives contained in it are actually available _before_ they are used. +# Statically compiled modules (those listed by `httpd -l') do not need +# to be loaded here. +# +# Example: +# LoadModule foo_module modules/mod_foo.so +# +LoadModule auth_basic_module modules/mod_auth_basic.so +LoadModule auth_digest_module modules/mod_auth_digest.so +LoadModule authn_file_module modules/mod_authn_file.so +LoadModule authn_alias_module modules/mod_authn_alias.so +LoadModule authn_anon_module modules/mod_authn_anon.so +LoadModule authn_dbm_module modules/mod_authn_dbm.so +LoadModule authn_default_module modules/mod_authn_default.so +LoadModule authz_host_module modules/mod_authz_host.so +LoadModule authz_user_module modules/mod_authz_user.so +LoadModule authz_owner_module modules/mod_authz_owner.so +LoadModule authz_groupfile_module modules/mod_authz_groupfile.so +LoadModule authz_dbm_module modules/mod_authz_dbm.so +LoadModule authz_default_module modules/mod_authz_default.so +LoadModule ldap_module modules/mod_ldap.so +LoadModule authnz_ldap_module modules/mod_authnz_ldap.so +LoadModule include_module modules/mod_include.so +LoadModule log_config_module modules/mod_log_config.so +LoadModule logio_module modules/mod_logio.so +LoadModule env_module modules/mod_env.so +LoadModule ext_filter_module modules/mod_ext_filter.so +LoadModule mime_magic_module modules/mod_mime_magic.so +LoadModule expires_module modules/mod_expires.so +LoadModule deflate_module modules/mod_deflate.so +LoadModule headers_module modules/mod_headers.so +LoadModule usertrack_module modules/mod_usertrack.so +LoadModule setenvif_module modules/mod_setenvif.so +LoadModule mime_module modules/mod_mime.so +LoadModule dav_module modules/mod_dav.so +LoadModule status_module modules/mod_status.so +LoadModule autoindex_module modules/mod_autoindex.so +LoadModule info_module modules/mod_info.so +LoadModule dav_fs_module modules/mod_dav_fs.so +LoadModule vhost_alias_module modules/mod_vhost_alias.so +LoadModule negotiation_module modules/mod_negotiation.so +LoadModule dir_module modules/mod_dir.so +LoadModule actions_module modules/mod_actions.so +LoadModule speling_module modules/mod_speling.so +LoadModule userdir_module modules/mod_userdir.so +LoadModule alias_module modules/mod_alias.so +LoadModule rewrite_module modules/mod_rewrite.so +LoadModule proxy_module modules/mod_proxy.so +LoadModule proxy_balancer_module modules/mod_proxy_balancer.so +LoadModule proxy_ftp_module modules/mod_proxy_ftp.so +LoadModule proxy_http_module modules/mod_proxy_http.so +LoadModule proxy_connect_module modules/mod_proxy_connect.so +LoadModule cache_module modules/mod_cache.so +LoadModule suexec_module modules/mod_suexec.so +LoadModule disk_cache_module modules/mod_disk_cache.so +LoadModule file_cache_module modules/mod_file_cache.so +LoadModule mem_cache_module modules/mod_mem_cache.so +LoadModule cgi_module modules/mod_cgi.so + +# +# The following modules are not loaded by default: +# +#LoadModule cern_meta_module modules/mod_cern_meta.so +#LoadModule asis_module modules/mod_asis.so + +# +# Load config files from the config directory "/etc/httpd/conf.d". +# +Include conf.d/*.conf + +# +# ExtendedStatus controls whether Apache will generate "full" status +# information (ExtendedStatus On) or just basic information (ExtendedStatus +# Off) when the "server-status" handler is called. The default is Off. +# +#ExtendedStatus On + +# +# If you wish httpd to run as a different user or group, you must run +# httpd as root initially and it will switch. +# +# User/Group: The name (or #number) of the user/group to run httpd as. +# . On SCO (ODT 3) use "User nouser" and "Group nogroup". +# . On HPUX you may not be able to use shared memory as nobody, and the +# suggested workaround is to create a user www and use that user. +# NOTE that some kernels refuse to setgid(Group) or semctl(IPC_SET) +# when the value of (unsigned)Group is above 60000; +# don't use Group #-1 on these systems! +# +User apache +Group apache + +### Section 2: 'Main' server configuration +# +# The directives in this section set up the values used by the 'main' +# server, which responds to any requests that aren't handled by a +# definition. These values also provide defaults for +# any containers you may define later in the file. +# +# All of these directives may appear inside containers, +# in which case these default settings will be overridden for the +# virtual host being defined. +# + +# +# ServerAdmin: Your address, where problems with the server should be +# e-mailed. This address appears on some server-generated pages, such +# as error documents. e.g. admin@your-domain.com +# +ServerAdmin root@localhost + +# +# ServerName gives the name and port that the server uses to identify itself. +# This can often be determined automatically, but we recommend you specify +# it explicitly to prevent problems during startup. +# +# If this is not set to valid DNS name for your host, server-generated +# redirections will not work. See also the UseCanonicalName directive. +# +# If your host doesn't have a registered DNS name, enter its IP address here. +# You will have to access it by its address anyway, and this will make +# redirections work in a sensible way. +# +#ServerName www.example.com:80 + +# +# UseCanonicalName: Determines how Apache constructs self-referencing +# URLs and the SERVER_NAME and SERVER_PORT variables. +# When set "Off", Apache will use the Hostname and Port supplied +# by the client. When set "On", Apache will use the value of the +# ServerName directive. +# +UseCanonicalName Off + +# +# DocumentRoot: The directory out of which you will serve your +# documents. By default, all requests are taken from this directory, but +# symbolic links and aliases may be used to point to other locations. +# +DocumentRoot "/var/www/html" + +# +# Each directory to which Apache has access can be configured with respect +# to which services and features are allowed and/or disabled in that +# directory (and its subdirectories). +# +# First, we configure the "default" to be a very restrictive set of +# features. +# + + Options FollowSymLinks + AllowOverride None + + +# +# Note that from this point forward you must specifically allow +# particular features to be enabled - so if something's not working as +# you might expect, make sure that you have specifically enabled it +# below. +# + +# +# This should be changed to whatever you set DocumentRoot to. +# + + +# +# Possible values for the Options directive are "None", "All", +# or any combination of: +# Indexes Includes FollowSymLinks SymLinksifOwnerMatch ExecCGI MultiViews +# +# Note that "MultiViews" must be named *explicitly* --- "Options All" +# doesn't give it to you. +# +# The Options directive is both complicated and important. Please see +# http://httpd.apache.org/docs/2.2/mod/core.html#options +# for more information. +# + Options Indexes FollowSymLinks + +# +# AllowOverride controls what directives may be placed in .htaccess files. +# It can be "All", "None", or any combination of the keywords: +# Options FileInfo AuthConfig Limit +# + AllowOverride All + +# +# Controls who can get stuff from this server. +# + Order allow,deny + Allow from all + + + +# +# UserDir: The name of the directory that is appended onto a user's home +# directory if a ~user request is received. +# +# The path to the end user account 'public_html' directory must be +# accessible to the webserver userid. This usually means that ~userid +# must have permissions of 711, ~userid/public_html must have permissions +# of 755, and documents contained therein must be world-readable. +# Otherwise, the client will only receive a "403 Forbidden" message. +# +# See also: http://httpd.apache.org/docs/misc/FAQ.html#forbidden +# + + # + # UserDir is disabled by default since it can confirm the presence + # of a username on the system (depending on home directory + # permissions). + # + UserDir disable + + # + # To enable requests to /~user/ to serve the user's public_html + # directory, remove the "UserDir disable" line above, and uncomment + # the following line instead: + # + #UserDir public_html + + + +# +# Control access to UserDir directories. The following is an example +# for a site where these directories are restricted to read-only. +# +# +# AllowOverride FileInfo AuthConfig Limit +# Options MultiViews Indexes SymLinksIfOwnerMatch IncludesNoExec +# +# Order allow,deny +# Allow from all +# +# +# Order deny,allow +# Deny from all +# +# + +# +# DirectoryIndex: sets the file that Apache will serve if a directory +# is requested. +# +# The index.html.var file (a type-map) is used to deliver content- +# negotiated documents. The MultiViews Option can be used for the +# same purpose, but it is much slower. +# +DirectoryIndex index.html index.html.var + +# +# AccessFileName: The name of the file to look for in each directory +# for additional configuration directives. See also the AllowOverride +# directive. +# +AccessFileName .htaccess + +# +# The following lines prevent .htaccess and .htpasswd files from being +# viewed by Web clients. +# + + Order allow,deny + Deny from all + + +# +# TypesConfig describes where the mime.types file (or equivalent) is +# to be found. +# +TypesConfig /etc/mime.types + +# +# DefaultType is the default MIME type the server will use for a document +# if it cannot otherwise determine one, such as from filename extensions. +# If your server contains mostly text or HTML documents, "text/plain" is +# a good value. If most of your content is binary, such as applications +# or images, you may want to use "application/octet-stream" instead to +# keep browsers from trying to display binary files as though they are +# text. +# +DefaultType text/plain + +# +# The mod_mime_magic module allows the server to use various hints from the +# contents of the file itself to determine its type. The MIMEMagicFile +# directive tells the module where the hint definitions are located. +# + +# MIMEMagicFile /usr/share/magic.mime + MIMEMagicFile conf/magic + + +# +# HostnameLookups: Log the names of clients or just their IP addresses +# e.g., www.apache.org (on) or 204.62.129.132 (off). +# The default is off because it'd be overall better for the net if people +# had to knowingly turn this feature on, since enabling it means that +# each client request will result in AT LEAST one lookup request to the +# nameserver. +# +HostnameLookups Off + +# +# EnableMMAP: Control whether memory-mapping is used to deliver +# files (assuming that the underlying OS supports it). +# The default is on; turn this off if you serve from NFS-mounted +# filesystems. On some systems, turning it off (regardless of +# filesystem) can improve performance; for details, please see +# http://httpd.apache.org/docs/2.2/mod/core.html#enablemmap +# +#EnableMMAP off + +# +# EnableSendfile: Control whether the sendfile kernel support is +# used to deliver files (assuming that the OS supports it). +# The default is on; turn this off if you serve from NFS-mounted +# filesystems. Please see +# http://httpd.apache.org/docs/2.2/mod/core.html#enablesendfile +# +#EnableSendfile off + +# +# ErrorLog: The location of the error log file. +# If you do not specify an ErrorLog directive within a +# container, error messages relating to that virtual host will be +# logged here. If you *do* define an error logfile for a +# container, that host's errors will be logged there and not here. +# +ErrorLog logs/error_log + +# +# LogLevel: Control the number of messages logged to the error_log. +# Possible values include: debug, info, notice, warn, error, crit, +# alert, emerg. +# +LogLevel warn + +# +# The following directives define some format nicknames for use with +# a CustomLog directive (see below). +# +LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined +LogFormat "%h %l %u %t \"%r\" %>s %b" common +LogFormat "%{Referer}i -> %U" referer +LogFormat "%{User-agent}i" agent + +# "combinedio" includes actual counts of actual bytes received (%I) and sent (%O); this +# requires the mod_logio module to be loaded. +#LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio + +# +# The location and format of the access logfile (Common Logfile Format). +# If you do not define any access logfiles within a +# container, they will be logged here. Contrariwise, if you *do* +# define per- access logfiles, transactions will be +# logged therein and *not* in this file. +# +#CustomLog logs/access_log common + +# +# If you would like to have separate agent and referer logfiles, uncomment +# the following directives. +# +#CustomLog logs/referer_log referer +#CustomLog logs/agent_log agent + +# +# For a single logfile with access, agent, and referer information +# (Combined Logfile Format), use the following directive: +# +CustomLog logs/access_log combined + +# +# Optionally add a line containing the server version and virtual host +# name to server-generated pages (internal error documents, FTP directory +# listings, mod_status and mod_info output etc., but not CGI generated +# documents or custom error documents). +# Set to "EMail" to also include a mailto: link to the ServerAdmin. +# Set to one of: On | Off | EMail +# +ServerSignature On + +# +# Aliases: Add here as many aliases as you need (with no limit). The format is +# Alias fakename realname +# +# Note that if you include a trailing / on fakename then the server will +# require it to be present in the URL. So "/icons" isn't aliased in this +# example, only "/icons/". If the fakename is slash-terminated, then the +# realname must also be slash terminated, and if the fakename omits the +# trailing slash, the realname must also omit it. +# +# We include the /icons/ alias for FancyIndexed directory listings. If you +# do not use FancyIndexing, you may comment this out. +# +Alias /icons/ "/var/www/icons/" + + + Options Indexes MultiViews FollowSymLinks + AllowOverride None + Order allow,deny + Allow from all + + +# +# WebDAV module configuration section. +# + + # Location of the WebDAV lock database. + DAVLockDB /var/lib/dav/lockdb + + +# +# ScriptAlias: This controls which directories contain server scripts. +# ScriptAliases are essentially the same as Aliases, except that +# documents in the realname directory are treated as applications and +# run by the server when requested rather than as documents sent to the client. +# The same rules about trailing "/" apply to ScriptAlias directives as to +# Alias. +# +ScriptAlias /cgi-bin/ "/var/www/cgi-bin/" + +# +# "/var/www/cgi-bin" should be changed to whatever your ScriptAliased +# CGI directory exists, if you have that configured. +# + + AllowOverride None + Options None + Order allow,deny + Allow from all + + +# +# Redirect allows you to tell clients about documents which used to exist in +# your server's namespace, but do not anymore. This allows you to tell the +# clients where to look for the relocated document. +# Example: +# Redirect permanent /foo http://www.example.com/bar + +# +# Directives controlling the display of server-generated directory listings. +# + +# +# IndexOptions: Controls the appearance of server-generated directory +# listings. +# +IndexOptions FancyIndexing VersionSort NameWidth=* HTMLTable + +# +# AddIcon* directives tell the server which icon to show for different +# files or filename extensions. These are only displayed for +# FancyIndexed directories. +# +AddIconByEncoding (CMP,/icons/compressed.gif) x-compress x-gzip + +AddIconByType (TXT,/icons/text.gif) text/* +AddIconByType (IMG,/icons/image2.gif) image/* +AddIconByType (SND,/icons/sound2.gif) audio/* +AddIconByType (VID,/icons/movie.gif) video/* + +AddIcon /icons/binary.gif .bin .exe +AddIcon /icons/binhex.gif .hqx +AddIcon /icons/tar.gif .tar +AddIcon /icons/world2.gif .wrl .wrl.gz .vrml .vrm .iv +AddIcon /icons/compressed.gif .Z .z .tgz .gz .zip +AddIcon /icons/a.gif .ps .ai .eps +AddIcon /icons/layout.gif .html .shtml .htm .pdf +AddIcon /icons/text.gif .txt +AddIcon /icons/c.gif .c +AddIcon /icons/p.gif .pl .py +AddIcon /icons/f.gif .for +AddIcon /icons/dvi.gif .dvi +AddIcon /icons/uuencoded.gif .uu +AddIcon /icons/script.gif .conf .sh .shar .csh .ksh .tcl +AddIcon /icons/tex.gif .tex +AddIcon /icons/bomb.gif core + +AddIcon /icons/back.gif .. +AddIcon /icons/hand.right.gif README +AddIcon /icons/folder.gif ^^DIRECTORY^^ +AddIcon /icons/blank.gif ^^BLANKICON^^ + +# +# DefaultIcon is which icon to show for files which do not have an icon +# explicitly set. +# +DefaultIcon /icons/unknown.gif + +# +# AddDescription allows you to place a short description after a file in +# server-generated indexes. These are only displayed for FancyIndexed +# directories. +# Format: AddDescription "description" filename +# +#AddDescription "GZIP compressed document" .gz +#AddDescription "tar archive" .tar +#AddDescription "GZIP compressed tar archive" .tgz + +# +# ReadmeName is the name of the README file the server will look for by +# default, and append to directory listings. +# +# HeaderName is the name of a file which should be prepended to +# directory indexes. +ReadmeName README.html +HeaderName HEADER.html + +# +# IndexIgnore is a set of filenames which directory indexing should ignore +# and not include in the listing. Shell-style wildcarding is permitted. +# +IndexIgnore .??* *~ *# HEADER* README* RCS CVS *,v *,t + +# +# DefaultLanguage and AddLanguage allows you to specify the language of +# a document. You can then use content negotiation to give a browser a +# file in a language the user can understand. +# +# Specify a default language. This means that all data +# going out without a specific language tag (see below) will +# be marked with this one. You probably do NOT want to set +# this unless you are sure it is correct for all cases. +# +# * It is generally better to not mark a page as +# * being a certain language than marking it with the wrong +# * language! +# +# DefaultLanguage nl +# +# Note 1: The suffix does not have to be the same as the language +# keyword --- those with documents in Polish (whose net-standard +# language code is pl) may wish to use "AddLanguage pl .po" to +# avoid the ambiguity with the common suffix for perl scripts. +# +# Note 2: The example entries below illustrate that in some cases +# the two character 'Language' abbreviation is not identical to +# the two character 'Country' code for its country, +# E.g. 'Danmark/dk' versus 'Danish/da'. +# +# Note 3: In the case of 'ltz' we violate the RFC by using a three char +# specifier. There is 'work in progress' to fix this and get +# the reference data for rfc1766 cleaned up. +# +# Catalan (ca) - Croatian (hr) - Czech (cs) - Danish (da) - Dutch (nl) +# English (en) - Esperanto (eo) - Estonian (et) - French (fr) - German (de) +# Greek-Modern (el) - Hebrew (he) - Italian (it) - Japanese (ja) +# Korean (ko) - Luxembourgeois* (ltz) - Norwegian Nynorsk (nn) +# Norwegian (no) - Polish (pl) - Portugese (pt) +# Brazilian Portuguese (pt-BR) - Russian (ru) - Swedish (sv) +# Simplified Chinese (zh-CN) - Spanish (es) - Traditional Chinese (zh-TW) +# +AddLanguage ca .ca +AddLanguage cs .cz .cs +AddLanguage da .dk +AddLanguage de .de +AddLanguage el .el +AddLanguage en .en +AddLanguage eo .eo +AddLanguage es .es +AddLanguage et .et +AddLanguage fr .fr +AddLanguage he .he +AddLanguage hr .hr +AddLanguage it .it +AddLanguage ja .ja +AddLanguage ko .ko +AddLanguage ltz .ltz +AddLanguage nl .nl +AddLanguage nn .nn +AddLanguage no .no +AddLanguage pl .po +AddLanguage pt .pt +AddLanguage pt-BR .pt-br +AddLanguage ru .ru +AddLanguage sv .sv +AddLanguage zh-CN .zh-cn +AddLanguage zh-TW .zh-tw + +# +# LanguagePriority allows you to give precedence to some languages +# in case of a tie during content negotiation. +# +# Just list the languages in decreasing order of preference. We have +# more or less alphabetized them here. You probably want to change this. +# +LanguagePriority en ca cs da de el eo es et fr he hr it ja ko ltz nl nn no pl pt pt-BR ru sv zh-CN zh-TW + +# +# ForceLanguagePriority allows you to serve a result page rather than +# MULTIPLE CHOICES (Prefer) [in case of a tie] or NOT ACCEPTABLE (Fallback) +# [in case no accepted languages matched the available variants] +# +ForceLanguagePriority Prefer Fallback + +# +# Specify a default charset for all content served; this enables +# interpretation of all content as UTF-8 by default. To use the +# default browser choice (ISO-8859-1), or to allow the META tags +# in HTML content to override this choice, comment out this +# directive: +# +AddDefaultCharset UTF-8 + +# +# AddType allows you to add to or override the MIME configuration +# file mime.types for specific file types. +# +#AddType application/x-tar .tgz + +# +# AddEncoding allows you to have certain browsers uncompress +# information on the fly. Note: Not all browsers support this. +# Despite the name similarity, the following Add* directives have nothing +# to do with the FancyIndexing customization directives above. +# +#AddEncoding x-compress .Z +#AddEncoding x-gzip .gz .tgz + +# If the AddEncoding directives above are commented-out, then you +# probably should define those extensions to indicate media types: +# +AddType application/x-compress .Z +AddType application/x-gzip .gz .tgz + +# +# AddHandler allows you to map certain file extensions to "handlers": +# actions unrelated to filetype. These can be either built into the server +# or added with the Action directive (see below) +# +# To use CGI scripts outside of ScriptAliased directories: +# (You will also need to add "ExecCGI" to the "Options" directive.) +# +#AddHandler cgi-script .cgi + +# +# For files that include their own HTTP headers: +# +#AddHandler send-as-is asis + +# +# For type maps (negotiated resources): +# (This is enabled by default to allow the Apache "It Worked" page +# to be distributed in multiple languages.) +# +AddHandler type-map var + +# +# Filters allow you to process content before it is sent to the client. +# +# To parse .shtml files for server-side includes (SSI): +# (You will also need to add "Includes" to the "Options" directive.) +# +AddType text/html .shtml +AddOutputFilter INCLUDES .shtml + +# +# Action lets you define media types that will execute a script whenever +# a matching file is called. This eliminates the need for repeated URL +# pathnames for oft-used CGI file processors. +# Format: Action media/type /cgi-script/location +# Format: Action handler-name /cgi-script/location +# + +# +# Customizable error responses come in three flavors: +# 1) plain text 2) local redirects 3) external redirects +# +# Some examples: +#ErrorDocument 500 "The server made a boo boo." +#ErrorDocument 404 /missing.html +#ErrorDocument 404 "/cgi-bin/missing_handler.pl" +#ErrorDocument 402 http://www.example.com/subscription_info.html +# + +# +# Putting this all together, we can internationalize error responses. +# +# We use Alias to redirect any /error/HTTP_.html.var response to +# our collection of by-error message multi-language collections. We use +# includes to substitute the appropriate text. +# +# You can modify the messages' appearance without changing any of the +# default HTTP_.html.var files by adding the line: +# +# Alias /error/include/ "/your/include/path/" +# +# which allows you to create your own set of files by starting with the +# /var/www/error/include/ files and +# copying them to /your/include/path/, even on a per-VirtualHost basis. +# + +Alias /error/ "/var/www/error/" + + + + + AllowOverride None + Options IncludesNoExec + AddOutputFilter Includes html + AddHandler type-map var + Order allow,deny + Allow from all + LanguagePriority en es de fr + ForceLanguagePriority Prefer Fallback + + +# ErrorDocument 400 /error/HTTP_BAD_REQUEST.html.var +# ErrorDocument 401 /error/HTTP_UNAUTHORIZED.html.var +# ErrorDocument 403 /error/HTTP_FORBIDDEN.html.var +# ErrorDocument 404 /error/HTTP_NOT_FOUND.html.var +# ErrorDocument 405 /error/HTTP_METHOD_NOT_ALLOWED.html.var +# ErrorDocument 408 /error/HTTP_REQUEST_TIME_OUT.html.var +# ErrorDocument 410 /error/HTTP_GONE.html.var +# ErrorDocument 411 /error/HTTP_LENGTH_REQUIRED.html.var +# ErrorDocument 412 /error/HTTP_PRECONDITION_FAILED.html.var +# ErrorDocument 413 /error/HTTP_REQUEST_ENTITY_TOO_LARGE.html.var +# ErrorDocument 414 /error/HTTP_REQUEST_URI_TOO_LARGE.html.var +# ErrorDocument 415 /error/HTTP_UNSUPPORTED_MEDIA_TYPE.html.var +# ErrorDocument 500 /error/HTTP_INTERNAL_SERVER_ERROR.html.var +# ErrorDocument 501 /error/HTTP_NOT_IMPLEMENTED.html.var +# ErrorDocument 502 /error/HTTP_BAD_GATEWAY.html.var +# ErrorDocument 503 /error/HTTP_SERVICE_UNAVAILABLE.html.var +# ErrorDocument 506 /error/HTTP_VARIANT_ALSO_VARIES.html.var + + + + +# +# The following directives modify normal HTTP response behavior to +# handle known problems with browser implementations. +# +BrowserMatch "Mozilla/2" nokeepalive +BrowserMatch "MSIE 4\.0b2;" nokeepalive downgrade-1.0 force-response-1.0 +BrowserMatch "RealPlayer 4\.0" force-response-1.0 +BrowserMatch "Java/1\.0" force-response-1.0 +BrowserMatch "JDK/1\.0" force-response-1.0 + +# +# The following directive disables redirects on non-GET requests for +# a directory that does not include the trailing slash. This fixes a +# problem with Microsoft WebFolders which does not appropriately handle +# redirects for folders with DAV methods. +# Same deal with Apple's DAV filesystem and Gnome VFS support for DAV. +# +BrowserMatch "Microsoft Data Access Internet Publishing Provider" redirect-carefully +BrowserMatch "MS FrontPage" redirect-carefully +BrowserMatch "^WebDrive" redirect-carefully +BrowserMatch "^WebDAVFS/1.[0123]" redirect-carefully +BrowserMatch "^gnome-vfs/1.0" redirect-carefully +BrowserMatch "^XML Spy" redirect-carefully +BrowserMatch "^Dreamweaver-WebDAV-SCM1" redirect-carefully + +# +# Allow server status reports generated by mod_status, +# with the URL of http://servername/server-status +# Change the ".example.com" to match your domain to enable. +# +# +# SetHandler server-status +# Order deny,allow +# Deny from all +# Allow from .example.com +# + +# +# Allow remote server configuration reports, with the URL of +# http://servername/server-info (requires that mod_info.c be loaded). +# Change the ".example.com" to match your domain to enable. +# +# +# SetHandler server-info +# Order deny,allow +# Deny from all +# Allow from .example.com +# + +# +# Proxy Server directives. Uncomment the following lines to +# enable the proxy server: +# +# +#ProxyRequests On +# +# +# Order deny,allow +# Deny from all +# Allow from .example.com +# + +# +# Enable/disable the handling of HTTP/1.1 "Via:" headers. +# ("Full" adds the server version; "Block" removes all outgoing Via: headers) +# Set to one of: Off | On | Full | Block +# +#ProxyVia On + +# +# To enable a cache of proxied content, uncomment the following lines. +# See http://httpd.apache.org/docs/2.2/mod/mod_cache.html for more details. +# +# +# CacheEnable disk / +# CacheRoot "/var/cache/mod_proxy" +# +# + +# +# End of proxy directives. + +### Section 3: Virtual Hosts +# +# VirtualHost: If you want to maintain multiple domains/hostnames on your +# machine you can setup VirtualHost containers for them. Most configurations +# use only name-based virtual hosts so the server doesn't need to worry about +# IP addresses. This is indicated by the asterisks in the directives below. +# +# Please see the documentation at +# +# for further details before you try to setup virtual hosts. +# +# You may use the command line option '-S' to verify your virtual host +# configuration. + +# +# Use name-based virtual hosting. +# +#NameVirtualHost *:80 +# +# NOTE: NameVirtualHost cannot be used without a port specifier +# (e.g. :80) if mod_ssl is being used, due to the nature of the +# SSL protocol. +# + +# +# VirtualHost example: +# Almost any Apache directive may go into a VirtualHost container. +# The first VirtualHost section is used for requests without a known +# server name. +# +# +# ServerAdmin webmaster@dummy-host.example.com +# DocumentRoot /www/docs/dummy-host.example.com +# ServerName dummy-host.example.com +# ErrorLog logs/dummy-host.example.com-error_log +# CustomLog logs/dummy-host.example.com-access_log common +# diff --git a/tools/systemvm/debian/config/etc/init.d/cloud b/tools/systemvm/debian/config/etc/init.d/cloud index 952daf79a78..09d189a6a0a 100755 --- a/tools/systemvm/debian/config/etc/init.d/cloud +++ b/tools/systemvm/debian/config/etc/init.d/cloud @@ -1,15 +1,17 @@ -#!/bin/sh -e +#!/bin/bash -e ### BEGIN INIT INFO -# Provides: cloud-passwd-srvr +# Provides: cloud # Required-Start: mountkernfs $local_fs cloud-early-config # Required-Stop: $local_fs # Should-Start: # Should-Stop: -# Default-Start: 2345 -# Default-Stop: 2345 +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 # Short-Description: Start up the cloud.com service ### END INIT INFO +#set -x + if [ -f /mnt/cmdline ] then CMDLINE=$(cat /mnt/cmdline) @@ -34,7 +36,7 @@ then . /etc/init.d/functions fi -if [ -f . /lib/lsb/init-functions ] +if [ -f ./lib/lsb/init-functions ] then . /lib/lsb/init-functions fi @@ -119,7 +121,7 @@ case "$1" in restart) stop start ;; - *) echo $"Usage: $0 {start|stop|status|restart}" + *) echo "Usage: $0 {start|stop|status|restart}" exit 1 ;; esac diff --git a/tools/systemvm/debian/config/etc/init.d/cloud-early-config b/tools/systemvm/debian/config/etc/init.d/cloud-early-config index 569a253a365..df547ffb5c7 100755 --- a/tools/systemvm/debian/config/etc/init.d/cloud-early-config +++ b/tools/systemvm/debian/config/etc/init.d/cloud-early-config @@ -1,4 +1,4 @@ -##!/bin/sh -e +#!/bin/bash -e ### BEGIN INIT INFO # Provides: cloud-early-config # Required-Start: mountkernfs $local_fs @@ -28,7 +28,7 @@ patch() { if [ -e /dev/xvdd ]; then mkdir -p /media/cdrom mount -o ro /dev/xvdd /media/cdrom - tar xzf /media/cdrom/patch.tgz -C / + #tar xzf /media/cdrom/patch.tgz -C / cat /proc/cmdline > /var/cache/cloud/cmdline /opt/cloud/bin/patchsystemvm.sh umount /media/cdrom @@ -55,7 +55,7 @@ setup_interface() { fi fi - if [[ "$ip" != "0.0.0.0" && "$ip" != "" ]] + if [ "$ip" != "0.0.0.0" -a "$ip" != "" ] then echo "iface $intf inet $bootproto" >> /etc/network/interfaces echo " address $ip " >> /etc/network/interfaces @@ -67,7 +67,8 @@ setup_interface() { } enable_fwding() { - echo $1 > /proc/sys/net/ipv4/ip_forward + echo "enable_fwding = $1" + echo "$1" > /proc/sys/net/ipv4/ip_forward } enable_svc() { @@ -75,15 +76,15 @@ enable_svc() { local enabled=$2 local cfg=/etc/default/${svc} - sed -i 's/ENABLED=.*$/ENABLED=$enabled/g' $cfg + sed -i "s/ENABLED=.*$/ENABLED=$enabled/" $cfg } disable_hvc() { - [ ! -f /proc/xen ] && sed -i 's/^vc/#vc/' /etc/inittab && telinit q + [ ! -d /proc/xen ] && sed -i 's/^vc/#vc/' /etc/inittab && telinit q + [ -d /proc/xen ] && sed -i 's/^#vc/vc/' /etc/inittab && telinit q } setup_common() { - disable_hvc init_interfaces $1 $2 $3 setup_interface "0" $ETH0_IP $ETH0_MASK $GW setup_interface "1" $ETH1_IP $ETH1_MASK $GW @@ -105,7 +106,7 @@ setup_common() { echo "nameserver $NS2" >> /etc/dnsmasq-resolv.conf echo "nameserver $NS2" >> /etc/resolv.conf fi - if [[ -n "$MGMTNET" && -n "$LOCAL_GW" ]] + if [ -n "$MGMTNET" -a -n "$LOCAL_GW" ] then ip route add $MGMTNET via $LOCAL_GW dev eth1 fi @@ -180,7 +181,7 @@ setup_secstorage() { cp /etc/iptables/iptables-secstorage /etc/iptables/rules enable_fwding 0 enable_svc haproxy 0 - enable_fwding 0 + enable_svc dnsmasq 0 } setup_console_proxy() { @@ -193,11 +194,12 @@ setup_console_proxy() { cp /etc/iptables/iptables-consoleproxy /etc/iptables/rules enable_fwding 0 enable_svc haproxy 0 - enable_fwding 0 + enable_svc dnsmasq 0 chkconfig apache2 off } start() { + patch case $TYPE in router) [ "$NAME" == "" ] && NAME=router @@ -216,7 +218,6 @@ start() { setup_console_proxy; ;; esac - patch return 0 } @@ -227,7 +228,6 @@ else CMDLINE=$(cat /proc/cmdline) fi -disable_hvc TYPE="unknown" BOOTPROTO="static" diff --git a/tools/systemvm/debian/config/etc/init.d/postinit b/tools/systemvm/debian/config/etc/init.d/postinit index ae17565c50b..d063f077daf 100755 --- a/tools/systemvm/debian/config/etc/init.d/postinit +++ b/tools/systemvm/debian/config/etc/init.d/postinit @@ -1,7 +1,14 @@ -#! /bin/bash -# chkconfig: 35 11 90 -# description: pre-boot configuration using boot line parameters -# This file exists in /etc/init.d/ +#!/bin/bash -e +### BEGIN INIT INFO +# Provides: postinit +# Required-Start: mountkernfs $local_fs cloud-early-config +# Required-Stop: $local_fs +# Should-Start: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: post-init +### END INIT INFO replace_in_file() { local filename=$1 @@ -26,6 +33,27 @@ setup_console_proxy() { echo "$public_ip $NAME" >> /etc/hosts } +start() { + case $TYPE in + secstorage) + [ "$NAME" == "" ] && NAME=secstorage + setup_secstorage; + ;; + consoleproxy) + [ "$NAME" == "" ] && NAME=consoleproxy + setup_console_proxy; + ;; + esac +} + +stop() { + +} + +status() { + +} + CMDLINE=$(cat /proc/cmdline) TYPE="router" BOOTPROTO="static" @@ -98,13 +126,18 @@ fi ETH1_IP=$(ifconfig eth1|grep 'inet addr:'|cut -d : -f 2|cut -d \ -f 1) ETH2_IP=$(ifconfig eth2|grep 'inet addr:'|cut -d : -f 2|cut -d \ -f 1) -case $TYPE in - secstorage) - [ "$NAME" == "" ] && NAME=secstorage - setup_secstorage; - ;; - consoleproxy) - [ "$NAME" == "" ] && NAME=consoleproxy - setup_console_proxy; - ;; + +case "$1" in + start) start + ;; + stop) stop + ;; + status) status + ;; + restart) stop + start + ;; + *) echo "Usage: $0 {start|stop|status|restart}" + exit 1 + ;; esac diff --git a/tools/systemvm/debian/config/etc/ssh/sshd_config b/tools/systemvm/debian/config/etc/ssh/sshd_config new file mode 100644 index 00000000000..2bcd6e5e580 --- /dev/null +++ b/tools/systemvm/debian/config/etc/ssh/sshd_config @@ -0,0 +1,128 @@ +# $OpenBSD: sshd_config,v 1.75 2007/03/19 01:01:29 djm Exp $ + +# This is the sshd server system-wide configuration file. See +# sshd_config(5) for more information. + +# This sshd was compiled with PATH=/usr/local/bin:/bin:/usr/bin + +# The strategy used for options in the default sshd_config shipped with +# OpenSSH is to specify options with their default value where +# possible, but leave them commented. Uncommented options change a +# default value. + +Port 3922 +#AddressFamily any +#ListenAddress 0.0.0.0 +#ListenAddress :: + +# Disable legacy (protocol version 1) support in the server for new +# installations. In future the default will change to require explicit +# activation of protocol 1 +Protocol 2 + +# HostKey for protocol version 1 +#HostKey /etc/ssh/ssh_host_key +# HostKeys for protocol version 2 +#HostKey /etc/ssh/ssh_host_rsa_key +#HostKey /etc/ssh/ssh_host_dsa_key + +# Lifetime and size of ephemeral version 1 server key +#KeyRegenerationInterval 1h +#ServerKeyBits 768 + +# Logging +# obsoletes QuietMode and FascistLogging +#SyslogFacility AUTH +SyslogFacility AUTHPRIV +#LogLevel INFO + +# Authentication: + +#LoginGraceTime 2m +PermitRootLogin yes +#StrictModes yes +#MaxAuthTries 6 + +#RSAAuthentication yes +#PubkeyAuthentication yes +#AuthorizedKeysFile .ssh/authorized_keys + +# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts +#RhostsRSAAuthentication no +# similar for protocol version 2 +#HostbasedAuthentication no +# Change to yes if you don't trust ~/.ssh/known_hosts for +# RhostsRSAAuthentication and HostbasedAuthentication +#IgnoreUserKnownHosts no +# Don't read the user's ~/.rhosts and ~/.shosts files +#IgnoreRhosts yes + +# To disable tunneled clear text passwords, change to no here! +#PasswordAuthentication yes +#PermitEmptyPasswords no +PasswordAuthentication no + +# Change to no to disable s/key passwords +#ChallengeResponseAuthentication yes +ChallengeResponseAuthentication no + +# Kerberos options +#KerberosAuthentication no +#KerberosOrLocalPasswd yes +#KerberosTicketCleanup yes +#KerberosGetAFSToken no + +# GSSAPI options +#GSSAPIAuthentication no +GSSAPIAuthentication no +#GSSAPICleanupCredentials yes +GSSAPICleanupCredentials yes + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and ChallengeResponseAuthentication to 'no'. +#UsePAM no +UsePAM yes + +# Accept locale-related environment variables +AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES +AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT +AcceptEnv LC_IDENTIFICATION LC_ALL +#AllowTcpForwarding yes +#GatewayPorts no +#X11Forwarding no +#X11Forwarding yes +#X11DisplayOffset 10 +#X11UseLocalhost yes +#PrintMotd yes +#PrintLastLog yes +#TCPKeepAlive yes +#UseLogin no +#UsePrivilegeSeparation yes +#PermitUserEnvironment no +#Compression delayed +#ClientAliveInterval 0 +#ClientAliveCountMax 3 +#ShowPatchLevel no +UseDNS no +#PidFile /var/run/sshd.pid +#MaxStartups 10 +#PermitTunnel no + +# no default banner path +#Banner /some/path + +# override default of no subsystems +Subsystem sftp /usr/libexec/openssh/sftp-server + +# Example of overriding settings on a per-user basis +#Match User anoncvs +# X11Forwarding no +# AllowTcpForwarding no +# ForceCommand cvs server diff --git a/tools/systemvm/debian/config/etc/sysctl.conf b/tools/systemvm/debian/config/etc/sysctl.conf index ba5cbe6137d..d5fe5d43e8e 100644 --- a/tools/systemvm/debian/config/etc/sysctl.conf +++ b/tools/systemvm/debian/config/etc/sysctl.conf @@ -30,4 +30,4 @@ kernel.core_uses_pid = 1 # Controls the use of TCP syncookies net.ipv4.tcp_syncookies = 1 -net.netfilter.nf_conntrack_max=65536 +net.ipv4.netfilter.ip_conntrack_max=65536 diff --git a/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh b/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh index f9476724a61..93f8d54465e 100755 --- a/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh +++ b/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh @@ -18,39 +18,35 @@ patch_console_proxy() { consoleproxy_svcs() { chkconfig cloud on chkconfig postinit on - chkconfig domr_webserver off + chkconfig cloud-passwd-srvr off chkconfig haproxy off ; chkconfig dnsmasq off - chkconfig sshd on - chkconfig httpd off - chkconfig nfs off - chkconfig nfslock off - chkconfig rpcbind off - chkconfig rpcidmap off + chkconfig ssh on + chkconfig apache2 off + chkconfig nfs-common off + chkconfig portmap off mkdir -p /var/log/cloud } secstorage_svcs() { chkconfig cloud on chkconfig postinit on - chkconfig domr_webserver off + chkconfig cloud-passwd-srvr off chkconfig haproxy off ; chkconfig dnsmasq off - chkconfig sshd on - chkconfig httpd off + chkconfig ssh on + chkconfig apache2 off mkdir -p /var/log/cloud } routing_svcs() { chkconfig cloud off - chkconfig domr_webserver on ; + chkconfig cloud-passwd-srvr on ; chkconfig haproxy on ; chkconfig dnsmasq on - chkconfig sshd on - chkconfig nfs off - chkconfig nfslock off - chkconfig rpcbind off - chkconfig rpcidmap off + chkconfig ssh on + chkconfig nfs-common off + chkconfig portmap off } CMDLINE=$(cat /var/cache/cloud/cmdline) diff --git a/tools/systemvm/debian/config/var/www/html/latest/.htaccess b/tools/systemvm/debian/config/var/www/html/latest/.htaccess new file mode 100644 index 00000000000..038a4c933cf --- /dev/null +++ b/tools/systemvm/debian/config/var/www/html/latest/.htaccess @@ -0,0 +1,5 @@ +Options +FollowSymLinks +RewriteEngine On +#RewriteBase / + +RewriteRule ^user-data$ ../userdata/%{REMOTE_ADDR}/user-data [L,NC,QSA] diff --git a/tools/systemvm/debian/config/var/www/html/userdata/.htaccess b/tools/systemvm/debian/config/var/www/html/userdata/.htaccess new file mode 100644 index 00000000000..5a928f6da25 --- /dev/null +++ b/tools/systemvm/debian/config/var/www/html/userdata/.htaccess @@ -0,0 +1 @@ +Options -Indexes From 2fda7f56994b9feb87725e4de454fdba9f2d9955 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Wed, 1 Sep 2010 22:57:45 -0700 Subject: [PATCH 048/145] More fixes... --- .../config/etc/init.d/cloud-early-config | 33 ++++++++++++++++--- .../config/opt/cloud/bin/patchsystemvm.sh | 5 +-- 2 files changed, 31 insertions(+), 7 deletions(-) diff --git a/tools/systemvm/debian/config/etc/init.d/cloud-early-config b/tools/systemvm/debian/config/etc/init.d/cloud-early-config index df547ffb5c7..ff283804151 100755 --- a/tools/systemvm/debian/config/etc/init.d/cloud-early-config +++ b/tools/systemvm/debian/config/etc/init.d/cloud-early-config @@ -25,13 +25,35 @@ EOF } patch() { + local PATCH_MOUNT=/media/cdrom + local patchfile=$PATCH_MOUNT/cloud-scripts.tgz + local md5file=/var/cache/cloud/cloud-scripts-signature + local shouldpatch=false + mkdir -p $PATCH_MOUNT if [ -e /dev/xvdd ]; then - mkdir -p /media/cdrom - mount -o ro /dev/xvdd /media/cdrom - #tar xzf /media/cdrom/patch.tgz -C / + mount -o ro /dev/xvdd $PATCH_MOUNT + local oldmd5= + [ -f ${md5file} ] && oldmd5=$(cat ${md5file}) + local newmd5= + [ -f ${patchfile} ] && newmd5=$(md5sum ${patchfile} | awk '{print $1}') + + if [ "$oldmd5" != "$newmd5" ] && [ -f ${patchfile} ] && [ "$newmd5" != "" ] + then + shouldpatch=true + logger -t cloud "Patching scripts" + tar xzf $patchfile -C ${path} + echo ${newmd5} > ${md5file} + fi cat /proc/cmdline > /var/cache/cloud/cmdline - /opt/cloud/bin/patchsystemvm.sh - umount /media/cdrom + logger -t cloud "Patching cloud service" + /opt/cloud/bin/patchsystemvm.sh $PATCH_MOUNT + umount $PATCH_MOUNT + if [ "$shouldpatch" == "true" ] + then + logger -t cloud "Rebooting system since we patched init scripts" + sleep 2 + reboot + fi fi if [ -f /mnt/cmdline ]; then cat /mnt/cmdline > /var/cache/cloud/cmdline @@ -297,6 +319,7 @@ case "$1" in start) log_action_begin_msg "Executing cloud-early-config" + logger -t cloud "Executing cloud-early-config" if start; then log_action_end_msg $? else diff --git a/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh b/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh index 93f8d54465e..30104ebf379 100755 --- a/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh +++ b/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh @@ -51,6 +51,7 @@ routing_svcs() { CMDLINE=$(cat /var/cache/cloud/cmdline) TYPE="router" +PATCH_MOUNT=$1 for i in $CMDLINE do @@ -66,9 +67,9 @@ for i in $CMDLINE esac done -if [ "$TYPE" == "consoleproxy" ] || [ "$TYPE" == "secstorage" ] && [ -f /media/cdrom/systemvm.zip ] +if [ "$TYPE" == "consoleproxy" ] || [ "$TYPE" == "secstorage" ] && [ -f ${PATCH_MOUNT}/systemvm.zip ] then - patch_console_proxy /media/cdrom/systemvm.zip + patch_console_proxy ${PATCH_MOUNT}/systemvm.zip if [ $? -gt 0 ] then printf "Failed to apply patch systemvm\n" >$logfile From 7065605b5323e91da6ed00745e461ec34799f8c8 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Wed, 1 Sep 2010 22:58:05 -0700 Subject: [PATCH 049/145] More fixes... --- .../debian/config/etc/apache2/httpd.conf | 0 .../etc/apache2/sites-available/default | 41 +++++ .../etc/apache2/sites-available/default-ssl | 172 ++++++++++++++++++ .../etc/apache2/sites-enabled/000-default | 41 +++++ 4 files changed, 254 insertions(+) create mode 100644 tools/systemvm/debian/config/etc/apache2/httpd.conf create mode 100644 tools/systemvm/debian/config/etc/apache2/sites-available/default create mode 100644 tools/systemvm/debian/config/etc/apache2/sites-available/default-ssl create mode 100644 tools/systemvm/debian/config/etc/apache2/sites-enabled/000-default diff --git a/tools/systemvm/debian/config/etc/apache2/httpd.conf b/tools/systemvm/debian/config/etc/apache2/httpd.conf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tools/systemvm/debian/config/etc/apache2/sites-available/default b/tools/systemvm/debian/config/etc/apache2/sites-available/default new file mode 100644 index 00000000000..75d4c4059f2 --- /dev/null +++ b/tools/systemvm/debian/config/etc/apache2/sites-available/default @@ -0,0 +1,41 @@ + + ServerAdmin webmaster@localhost + + DocumentRoot /var/www/html + + Options FollowSymLinks + AllowOverride None + + + Options Indexes FollowSymLinks MultiViews + AllowOverride All + Order allow,deny + allow from all + + + ScriptAlias /cgi-bin/ /usr/lib/cgi-bin/ + + AllowOverride None + Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch + Order allow,deny + Allow from all + + + ErrorLog ${APACHE_LOG_DIR}/error.log + + # Possible values include: debug, info, notice, warn, error, crit, + # alert, emerg. + LogLevel warn + + CustomLog ${APACHE_LOG_DIR}/access.log combined + + Alias /doc/ "/usr/share/doc/" + + Options Indexes MultiViews FollowSymLinks + AllowOverride None + Order deny,allow + Deny from all + Allow from 127.0.0.0/255.0.0.0 ::1/128 + + + diff --git a/tools/systemvm/debian/config/etc/apache2/sites-available/default-ssl b/tools/systemvm/debian/config/etc/apache2/sites-available/default-ssl new file mode 100644 index 00000000000..ca44850f2f2 --- /dev/null +++ b/tools/systemvm/debian/config/etc/apache2/sites-available/default-ssl @@ -0,0 +1,172 @@ + + + ServerAdmin webmaster@localhost + + DocumentRoot /var/www/html + + Options FollowSymLinks + AllowOverride None + + + Options Indexes FollowSymLinks MultiViews + AllowOverride all + Order allow,deny + allow from all + + + ScriptAlias /cgi-bin/ /usr/lib/cgi-bin/ + + AllowOverride None + Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch + Order allow,deny + Allow from all + + + ErrorLog ${APACHE_LOG_DIR}/error.log + + # Possible values include: debug, info, notice, warn, error, crit, + # alert, emerg. + LogLevel warn + + CustomLog ${APACHE_LOG_DIR}/ssl_access.log combined + + Alias /doc/ "/usr/share/doc/" + + Options Indexes MultiViews FollowSymLinks + AllowOverride None + Order deny,allow + Deny from all + Allow from 127.0.0.0/255.0.0.0 ::1/128 + + + # SSL Engine Switch: + # Enable/Disable SSL for this virtual host. + SSLEngine on + + # A self-signed (snakeoil) certificate can be created by installing + # the ssl-cert package. See + # /usr/share/doc/apache2.2-common/README.Debian.gz for more info. + # If both key and certificate are stored in the same file, only the + # SSLCertificateFile directive is needed. + SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem + SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key + + # Server Certificate Chain: + # Point SSLCertificateChainFile at a file containing the + # concatenation of PEM encoded CA certificates which form the + # certificate chain for the server certificate. Alternatively + # the referenced file can be the same as SSLCertificateFile + # when the CA certificates are directly appended to the server + # certificate for convinience. + #SSLCertificateChainFile /etc/apache2/ssl.crt/server-ca.crt + + # Certificate Authority (CA): + # Set the CA certificate verification path where to find CA + # certificates for client authentication or alternatively one + # huge file containing all of them (file must be PEM encoded) + # Note: Inside SSLCACertificatePath you need hash symlinks + # to point to the certificate files. Use the provided + # Makefile to update the hash symlinks after changes. + #SSLCACertificatePath /etc/ssl/certs/ + #SSLCACertificateFile /etc/apache2/ssl.crt/ca-bundle.crt + + # Certificate Revocation Lists (CRL): + # Set the CA revocation path where to find CA CRLs for client + # authentication or alternatively one huge file containing all + # of them (file must be PEM encoded) + # Note: Inside SSLCARevocationPath you need hash symlinks + # to point to the certificate files. Use the provided + # Makefile to update the hash symlinks after changes. + #SSLCARevocationPath /etc/apache2/ssl.crl/ + #SSLCARevocationFile /etc/apache2/ssl.crl/ca-bundle.crl + + # Client Authentication (Type): + # Client certificate verification type and depth. Types are + # none, optional, require and optional_no_ca. Depth is a + # number which specifies how deeply to verify the certificate + # issuer chain before deciding the certificate is not valid. + #SSLVerifyClient require + #SSLVerifyDepth 10 + + # Access Control: + # With SSLRequire you can do per-directory access control based + # on arbitrary complex boolean expressions containing server + # variable checks and other lookup directives. The syntax is a + # mixture between C and Perl. See the mod_ssl documentation + # for more details. + # + #SSLRequire ( %{SSL_CIPHER} !~ m/^(EXP|NULL)/ \ + # and %{SSL_CLIENT_S_DN_O} eq "Snake Oil, Ltd." \ + # and %{SSL_CLIENT_S_DN_OU} in {"Staff", "CA", "Dev"} \ + # and %{TIME_WDAY} >= 1 and %{TIME_WDAY} <= 5 \ + # and %{TIME_HOUR} >= 8 and %{TIME_HOUR} <= 20 ) \ + # or %{REMOTE_ADDR} =~ m/^192\.76\.162\.[0-9]+$/ + # + + # SSL Engine Options: + # Set various options for the SSL engine. + # o FakeBasicAuth: + # Translate the client X.509 into a Basic Authorisation. This means that + # the standard Auth/DBMAuth methods can be used for access control. The + # user name is the `one line' version of the client's X.509 certificate. + # Note that no password is obtained from the user. Every entry in the user + # file needs this password: `xxj31ZMTZzkVA'. + # o ExportCertData: + # This exports two additional environment variables: SSL_CLIENT_CERT and + # SSL_SERVER_CERT. These contain the PEM-encoded certificates of the + # server (always existing) and the client (only existing when client + # authentication is used). This can be used to import the certificates + # into CGI scripts. + # o StdEnvVars: + # This exports the standard SSL/TLS related `SSL_*' environment variables. + # Per default this exportation is switched off for performance reasons, + # because the extraction step is an expensive operation and is usually + # useless for serving static content. So one usually enables the + # exportation for CGI and SSI requests only. + # o StrictRequire: + # This denies access when "SSLRequireSSL" or "SSLRequire" applied even + # under a "Satisfy any" situation, i.e. when it applies access is denied + # and no other module can change it. + # o OptRenegotiate: + # This enables optimized SSL connection renegotiation handling when SSL + # directives are used in per-directory context. + #SSLOptions +FakeBasicAuth +ExportCertData +StrictRequire + + SSLOptions +StdEnvVars + + + SSLOptions +StdEnvVars + + + # SSL Protocol Adjustments: + # The safe and default but still SSL/TLS standard compliant shutdown + # approach is that mod_ssl sends the close notify alert but doesn't wait for + # the close notify alert from client. When you need a different shutdown + # approach you can use one of the following variables: + # o ssl-unclean-shutdown: + # This forces an unclean shutdown when the connection is closed, i.e. no + # SSL close notify alert is send or allowed to received. This violates + # the SSL/TLS standard but is needed for some brain-dead browsers. Use + # this when you receive I/O errors because of the standard approach where + # mod_ssl sends the close notify alert. + # o ssl-accurate-shutdown: + # This forces an accurate shutdown when the connection is closed, i.e. a + # SSL close notify alert is send and mod_ssl waits for the close notify + # alert of the client. This is 100% SSL/TLS standard compliant, but in + # practice often causes hanging connections with brain-dead browsers. Use + # this only for browsers where you know that their SSL implementation + # works correctly. + # Notice: Most problems of broken clients are also related to the HTTP + # keep-alive facility, so you usually additionally want to disable + # keep-alive for those clients, too. Use variable "nokeepalive" for this. + # Similarly, one has to force some clients to use HTTP/1.0 to workaround + # their broken HTTP/1.1 implementation. Use variables "downgrade-1.0" and + # "force-response-1.0" for this. + BrowserMatch "MSIE [2-6]" \ + nokeepalive ssl-unclean-shutdown \ + downgrade-1.0 force-response-1.0 + # MSIE 7 and newer should be able to use keepalive + BrowserMatch "MSIE [17-9]" ssl-unclean-shutdown + + + diff --git a/tools/systemvm/debian/config/etc/apache2/sites-enabled/000-default b/tools/systemvm/debian/config/etc/apache2/sites-enabled/000-default new file mode 100644 index 00000000000..75d4c4059f2 --- /dev/null +++ b/tools/systemvm/debian/config/etc/apache2/sites-enabled/000-default @@ -0,0 +1,41 @@ + + ServerAdmin webmaster@localhost + + DocumentRoot /var/www/html + + Options FollowSymLinks + AllowOverride None + + + Options Indexes FollowSymLinks MultiViews + AllowOverride All + Order allow,deny + allow from all + + + ScriptAlias /cgi-bin/ /usr/lib/cgi-bin/ + + AllowOverride None + Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch + Order allow,deny + Allow from all + + + ErrorLog ${APACHE_LOG_DIR}/error.log + + # Possible values include: debug, info, notice, warn, error, crit, + # alert, emerg. + LogLevel warn + + CustomLog ${APACHE_LOG_DIR}/access.log combined + + Alias /doc/ "/usr/share/doc/" + + Options Indexes MultiViews FollowSymLinks + AllowOverride None + Order deny,allow + Deny from all + Allow from 127.0.0.0/255.0.0.0 ::1/128 + + + From e0335739f8beb16473d5080748c8c485b0f3892f Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Wed, 1 Sep 2010 22:58:36 -0700 Subject: [PATCH 050/145] apache fixes... --- tools/systemvm/debian/buildsystemvm.sh | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/tools/systemvm/debian/buildsystemvm.sh b/tools/systemvm/debian/buildsystemvm.sh index 870b8f2b931..f0df06e1a62 100755 --- a/tools/systemvm/debian/buildsystemvm.sh +++ b/tools/systemvm/debian/buildsystemvm.sh @@ -110,7 +110,7 @@ ff02::3 ip6-allhosts EOF cat >> etc/network/interfaces << EOF -auto lo +auto lo eth0 iface lo inet loopback # The primary network interface @@ -324,7 +324,7 @@ packages() { DEBCONF_DB_OVERRIDE=’File{/root/config.dat}’ export DEBIAN_FRONTEND DEBIAN_PRIORITY DEBCONF_DB_OVERRIDE - chroot . apt-get --no-install-recommends -q -y --force-yes install rsyslog chkconfig insserv net-tools ifupdown vim-tiny netbase iptables openssh-server grub e2fsprogs dhcp3-client dnsmasq tcpdump socat wget apache2 python bzip2 sed gawk diff grep gzip less tar telnet xl2tpd traceroute openswan psmisc inetutils-ping arping httping dnsutils + chroot . apt-get --no-install-recommends -q -y --force-yes install rsyslog chkconfig insserv net-tools ifupdown vim-tiny netbase iptables openssh-server grub e2fsprogs dhcp3-client dnsmasq tcpdump socat wget apache2 ssl-cert python bzip2 sed gawk diff grep gzip less tar telnet xl2tpd traceroute openswan psmisc inetutils-ping arping httping dnsutils zip unzip ethtool uuid chroot . apt-get --no-install-recommends -q -y --force-yes install haproxy nfs-common @@ -343,10 +343,18 @@ password() { chroot . echo "root:$PASSWORD" | chroot . chpasswd } +apache2() { + chroot . a2enmod ssl rewrite auth-basic auth-digest + chroot . a2ensite default-ssl + cp etc/apache2/sites-available/default etc/apache2/sites-available/default.orig + cp etc/apache2/sites-available/default-ssl etc/apache2/sites-available/default.orig +} + services() { mkdir -p ./var/www/html mkdir -p ./opt/cloud/bin mkdir -p ./var/cache/cloud + mkdir -p ./usr/share/cloud mkdir -p ./usr/local/cloud mkdir -p ./root/.ssh @@ -372,12 +380,17 @@ cleanup() { rm -rf usr/share/locale/[a-d]* rm -rf usr/share/locale/[f-z]* rm -rf usr/share/doc/* - size=$(df | grep $MOUNTPOINT | awk '{print $4}') + size=$(df $MOUNTPOINT | awk '{print $4}' | grep -v Available) dd if=/dev/zero of=$MOUNTPOINT/zeros.img bs=1M count=$((((size-200000)) / 1000)) rm -f $MOUNTPOINT/zeros.img fi } +signature() { + (cd ${scriptdir}/config; tar cvzf ${MOUNTPOINT}/usr/share/cloud/cloud-scripts.tgz *) + md5sum ${MOUNTPOINT}/usr/share/cloud/cloud-scripts.tgz |awk '{print $1}' > ${MOUNTPOINT}/var/cache/cloud/cloud-scripts-signature +} + mkdir -p $IMAGENAME mkdir -p $LOCATION MOUNTPOINT=/mnt/$IMAGENAME/ @@ -438,9 +451,15 @@ password echo "*************CONFIGURING SERVICES********************" services +echo "*************CONFIGURING APACHE********************" +apache2 + echo "*************CLEANING UP********************" cleanup +echo "*************GENERATING SIGNATURE********************" +signature + cd $scriptdir umount $MOUNTPOINT/proc From 5434d570d37dbd8b39322c54fbc7823470ba126e Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Thu, 2 Sep 2010 17:08:14 -0700 Subject: [PATCH 051/145] cloud services fixes... --- tools/systemvm/debian/buildsystemvm.sh | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/tools/systemvm/debian/buildsystemvm.sh b/tools/systemvm/debian/buildsystemvm.sh index f0df06e1a62..3ff17f126fe 100755 --- a/tools/systemvm/debian/buildsystemvm.sh +++ b/tools/systemvm/debian/buildsystemvm.sh @@ -114,7 +114,7 @@ auto lo eth0 iface lo inet loopback # The primary network interface -iface eth0 inet dhcp +iface eth0 inet static EOF } @@ -151,6 +151,8 @@ EOF fixgrub() { + kern=$(basename $(ls boot/vmlinuz-*)) + ver=${kern#vmlinuz-} cat > boot/grub/menu.lst << EOF default 0 timeout 2 @@ -160,10 +162,10 @@ color cyan/blue white/blue # kopt=root=LABEL=ROOT ro ## ## End Default Options ## -title Debian GNU/Linux, kernel 2.6.32-5-686-bigmem +title Debian GNU/Linux, kernel $ver root (hd0,0) -kernel /boot/vmlinuz-2.6.32-5-686-bigmem root=LABEL=ROOT ro console=tty0 xencons=ttyS0,115200 console=hvc0 quiet -initrd /boot/initrd.img-2.6.32-5-686-bigmem +kernel /boot/$kern root=LABEL=ROOT ro console=tty0 xencons=ttyS0,115200 console=hvc0 quiet +initrd /boot/initrd.img-$ver ### END DEBIAN AUTOMAGIC KERNELS LIST EOF @@ -186,6 +188,7 @@ EOF } fixacpid() { + mkdir -p etc/acpi/events cat >> etc/acpi/events/power << EOF event=button/power.* action=/usr/local/sbin/power.sh "%e" @@ -198,6 +201,10 @@ EOF } fixiptables() { +cat >> etc/modules << EOF +nf_conntrack +nf_conntrack_ipv4 +EOF cat > etc/init.d/iptables-persistent << EOF #!/bin/sh ### BEGIN INIT INFO @@ -344,10 +351,10 @@ password() { } apache2() { - chroot . a2enmod ssl rewrite auth-basic auth-digest + chroot . a2enmod ssl rewrite auth_basic auth_digest chroot . a2ensite default-ssl cp etc/apache2/sites-available/default etc/apache2/sites-available/default.orig - cp etc/apache2/sites-available/default-ssl etc/apache2/sites-available/default.orig + cp etc/apache2/sites-available/default-ssl etc/apache2/sites-available/default-ssl.orig } services() { @@ -387,7 +394,7 @@ cleanup() { } signature() { - (cd ${scriptdir}/config; tar cvzf ${MOUNTPOINT}/usr/share/cloud/cloud-scripts.tgz *) + (cd ${scriptdir}/config; tar czf ${MOUNTPOINT}/usr/share/cloud/cloud-scripts.tgz *) md5sum ${MOUNTPOINT}/usr/share/cloud/cloud-scripts.tgz |awk '{print $1}' > ${MOUNTPOINT}/var/cache/cloud/cloud-scripts-signature } @@ -398,7 +405,7 @@ IMAGELOC=$LOCATION/$IMAGENAME.img scriptdir=$(dirname $PWD/$0) rm -f $IMAGELOC - +begin=$(date +%s) echo "*************INSTALLING BASEIMAGE********************" baseimage @@ -465,4 +472,7 @@ cd $scriptdir umount $MOUNTPOINT/proc umount $MOUNTPOINT/dev umount $MOUNTPOINT +fin=$(date +%s) +t=$((fin-begin)) +echo "Finished building image $IMAGELOC in $t seconds" From 1ddc229e6b2c8b1462011f66bbdc2b5d681533e5 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Thu, 2 Sep 2010 17:08:53 -0700 Subject: [PATCH 052/145] More cloud services fixes... --- .../systemvm/debian/config/etc/default/cloud | 2 + .../config/etc/default/cloud-passwd-srvr | 2 + tools/systemvm/debian/config/etc/init.d/cloud | 8 +- .../config/etc/init.d/cloud-early-config | 108 +++++++++++------- .../config/etc/init.d/cloud-passwd-srvr | 52 ++++++++- .../debian/config/etc/init.d/postinit | 4 +- tools/systemvm/debian/config/etc/rc.local | 15 +++ .../debian/config/opt/cloud/bin/passwd_server | 6 +- .../config/opt/cloud/bin/patchsystemvm.sh | 6 + .../config/opt/cloud/bin/serve_password.sh | 18 +-- 10 files changed, 163 insertions(+), 58 deletions(-) create mode 100644 tools/systemvm/debian/config/etc/default/cloud create mode 100644 tools/systemvm/debian/config/etc/default/cloud-passwd-srvr create mode 100755 tools/systemvm/debian/config/etc/rc.local diff --git a/tools/systemvm/debian/config/etc/default/cloud b/tools/systemvm/debian/config/etc/default/cloud new file mode 100644 index 00000000000..6da9d9466df --- /dev/null +++ b/tools/systemvm/debian/config/etc/default/cloud @@ -0,0 +1,2 @@ +#set ENABLED to 1 if you want the init script to start the password server +ENABLED=0 diff --git a/tools/systemvm/debian/config/etc/default/cloud-passwd-srvr b/tools/systemvm/debian/config/etc/default/cloud-passwd-srvr new file mode 100644 index 00000000000..6da9d9466df --- /dev/null +++ b/tools/systemvm/debian/config/etc/default/cloud-passwd-srvr @@ -0,0 +1,2 @@ +#set ENABLED to 1 if you want the init script to start the password server +ENABLED=0 diff --git a/tools/systemvm/debian/config/etc/init.d/cloud b/tools/systemvm/debian/config/etc/init.d/cloud index 09d189a6a0a..c437f77350f 100755 --- a/tools/systemvm/debian/config/etc/init.d/cloud +++ b/tools/systemvm/debian/config/etc/init.d/cloud @@ -1,4 +1,4 @@ -#!/bin/bash -e +#!/bin/bash ### BEGIN INIT INFO # Provides: cloud # Required-Start: mountkernfs $local_fs cloud-early-config @@ -12,12 +12,16 @@ #set -x +ENABLED=0 +[ -e /etc/default/cloud ] && . /etc/default/cloud + if [ -f /mnt/cmdline ] then CMDLINE=$(cat /mnt/cmdline) else CMDLINE=$(cat /proc/cmdline) fi + TYPE="router" for i in $CMDLINE do @@ -111,6 +115,8 @@ status() { return 0 } +[ "$ENABLED" != 0 ] || exit 0 + case "$1" in start) start ;; diff --git a/tools/systemvm/debian/config/etc/init.d/cloud-early-config b/tools/systemvm/debian/config/etc/init.d/cloud-early-config index ff283804151..c90297821b9 100755 --- a/tools/systemvm/debian/config/etc/init.d/cloud-early-config +++ b/tools/systemvm/debian/config/etc/init.d/cloud-early-config @@ -1,4 +1,4 @@ -#!/bin/bash -e +#!/bin/bash ### BEGIN INIT INFO # Provides: cloud-early-config # Required-Start: mountkernfs $local_fs @@ -58,6 +58,7 @@ patch() { if [ -f /mnt/cmdline ]; then cat /mnt/cmdline > /var/cache/cloud/cmdline fi + return 0 } setup_interface() { @@ -80,8 +81,11 @@ setup_interface() { if [ "$ip" != "0.0.0.0" -a "$ip" != "" ] then echo "iface $intf inet $bootproto" >> /etc/network/interfaces - echo " address $ip " >> /etc/network/interfaces - echo " netmask $mask" >> /etc/network/interfaces + if [ "$bootproto" == "static" ] + then + echo " address $ip " >> /etc/network/interfaces + echo " netmask $mask" >> /etc/network/interfaces + fi fi ifdown $intf @@ -89,7 +93,7 @@ setup_interface() { } enable_fwding() { - echo "enable_fwding = $1" + logger -t cloud "enable_fwding = $1" echo "$1" > /proc/sys/net/ipv4/ip_forward } @@ -97,6 +101,7 @@ enable_svc() { local svc=$1 local enabled=$2 + logger -t cloud "Enable service ${svc} = $enabled" local cfg=/etc/default/${svc} sed -i "s/ENABLED=.*$/ENABLED=$enabled/" $cfg } @@ -137,34 +142,7 @@ setup_common() { ip route add default via $GW } -setup_router() { - setup_common eth0 eth1 eth2 - [ -z $DHCP_RANGE ] && DHCP_RANGE=$ETH0_IP - if [ -n "$DOMAIN" ] - then - #send domain name to dhcp clients - sed -i s/[#]*dhcp-option=15.*$/dhcp-option=15,\"$DOMAIN\"/ /etc/dnsmasq.conf - #DNS server will append $DOMAIN to local queries - sed -r -i s/^[#]?domain=.*$/domain=$DOMAIN/ /etc/dnsmasq.conf - #answer all local domain queries - sed -i -e "s/^[#]*local=.*$/local=\/$DOMAIN\//" /etc/dnsmasq.conf - fi - sed -i -e "s/^dhcp-range=.*$/dhcp-range=$DHCP_RANGE,static/" /etc/dnsmasq.conf - sed -i -e "s/^[#]*listen-address=.*$/listen-address=$ETH0_IP/" /etc/dnsmasq.conf - sed -i /gateway/d /etc/hosts - echo "$ETH0_IP $NAME" >> /etc/hosts - [ -f /etc/httpd/conf/httpd.conf ] && sed -i -e "s/^Listen.*$/Listen $ETH0_IP:80/" /etc/httpd/conf/httpd.conf - [ -f /etc/httpd/conf.d/ssl.conf ] && mv /etc/httpd/conf.d/ssl.conf /etc/httpd/conf.d/ssl.conf.bak - [ -f /etc/ssh/sshd_config ] && sed -i -e "s/^[#]*ListenAddress.*$/ListenAddress $ETH1_IP/" /etc/ssh/sshd_config - - enable_svc dnsmasq 1 - enable_svc haproxy 1 - enable_fwding 1 - cp /etc/iptables/iptables-router /etc/iptables/rules -} - -setup_dhcpsrvr() { - setup_common eth0 eth1 +setup_dnsmasq() { [ -z $DHCP_RANGE ] && DHCP_RANGE=$ETH0_IP [ -z $DOMAIN ] && DOMAIN="cloudnine.internal" if [ -n "$DOMAIN" ] @@ -177,15 +155,44 @@ setup_dhcpsrvr() { sed -i -e "s/^[#]*local=.*$/local=\/$DOMAIN\//" /etc/dnsmasq.conf fi sed -i -e "s/^dhcp-range=.*$/dhcp-range=$DHCP_RANGE,static/" /etc/dnsmasq.conf - sed -i -e "s/^[#]*dhcp-option=option:router.*$/dhcp-option=option:router,$GW/" /etc/dnsmasq.conf - #for now set up ourself as the dns server as well - #echo "dhcp-option=6,$NS1,$NS2" >> /etc/dnsmasq.conf + sed -i -e "s/^[#]*listen-address=.*$/listen-address=$ETH0_IP/" /etc/dnsmasq.conf + +} + +setup_sshd(){ + [ -f /etc/ssh/sshd_config ] && sed -i -e "s/^[#]*ListenAddress.*$/ListenAddress $ETH1_IP/" /etc/ssh/sshd_config +} + +setup_router() { + setup_common eth0 eth1 eth2 + setup_dnsmasq + sed -i /gateway/d /etc/hosts echo "$ETH0_IP $NAME" >> /etc/hosts - [ -f /etc/httpd/conf/httpd.conf ] && sed -i -e "s/^Listen.*$/Listen $ETH0_IP:80/" /etc/httpd/conf/httpd.conf - [ -f /etc/httpd/conf.d/ssl.conf ] && mv /etc/httpd/conf.d/ssl.conf /etc/httpd/conf.d/ssl.conf.bak + setup_sshd + + enable_svc dnsmasq 1 + enable_svc haproxy 1 + enable_svc cloud-passwd-srvr 1 + enable_svc cloud 0 + enable_fwding 1 + cp /etc/iptables/iptables-router /etc/iptables/rules +} + +setup_dhcpsrvr() { + setup_common eth0 eth1 + setup_dnsmasq + + sed -i /gateway/d /etc/hosts + echo "$ETH0_IP $NAME" >> /etc/hosts + + setup_sshd + + enable_svc dnsmasq 1 enable_svc haproxy 0 + enable_svc cloud-passwd-srvr 1 + enable_svc cloud 0 enable_fwding 0 cp /etc/iptables/iptables-router /etc/iptables/rules } @@ -196,14 +203,13 @@ setup_secstorage() { public_ip=$ETH2_IP [ "$ETH2_IP" == "0.0.0.0" ] && public_ip=$ETH1_IP echo "$public_ip $NAME" >> /etc/hosts - [ -f /etc/httpd/conf/httpd.conf ] && sed -i -e "s/^Listen.*:80$/Listen $public_ip:80/" /etc/httpd/conf/httpd.conf - [ -f /etc/httpd/conf/httpd.conf ] && sed -i -e "s/^Listen.*:443$/Listen $public_ip:443/" /etc/httpd/conf/httpd.conf - sed 's/ENABLED=.*$/ENABLED=0/g' /etc/default/haproxy cp /etc/iptables/iptables-secstorage /etc/iptables/rules enable_fwding 0 enable_svc haproxy 0 enable_svc dnsmasq 0 + enable_svc cloud-passwd-srvr 0 + enable_svc cloud 1 } setup_console_proxy() { @@ -212,12 +218,23 @@ setup_console_proxy() { [ "$ETH2_IP" == "0.0.0.0" ] && public_ip=$ETH1_IP sed -i /gateway/d /etc/hosts echo "$public_ip $NAME" >> /etc/hosts - sed 's/ENABLED=.*$/ENABLED=0/g' /etc/default/haproxy cp /etc/iptables/iptables-consoleproxy /etc/iptables/rules + enable_fwding 0 enable_svc haproxy 0 enable_svc dnsmasq 0 - chkconfig apache2 off + enable_svc cloud-passwd-srvr 0 + enable_svc cloud 1 +} + +setup_default() { + cat > /etc/network/interfaces << EOF +auto lo eth0 +iface lo inet loopback + +iface eth0 inet dhcp + +EOF } start() { @@ -239,10 +256,15 @@ start() { [ "$NAME" == "" ] && NAME=consoleproxy setup_console_proxy; ;; + unknown) + [ "$NAME" == "" ] && NAME=systemvm + setup_default; + ;; esac return 0 } +disable_hvc if [ -f /mnt/cmdline ] then CMDLINE=$(cat /mnt/cmdline) @@ -335,7 +357,7 @@ stop) force-reload|restart) log_warning_msg "Running $0 is deprecated because it may not enable again some interfaces" - log_action_begin_msg "Reconfiguring network interfaces" + log_action_begin_msg "Executing cloud-early-config" if start; then log_action_end_msg $? else diff --git a/tools/systemvm/debian/config/etc/init.d/cloud-passwd-srvr b/tools/systemvm/debian/config/etc/init.d/cloud-passwd-srvr index 52715938ec4..f990e232a41 100755 --- a/tools/systemvm/debian/config/etc/init.d/cloud-passwd-srvr +++ b/tools/systemvm/debian/config/etc/init.d/cloud-passwd-srvr @@ -1,4 +1,4 @@ -#!/bin/sh -e +#!/bin/bash ### BEGIN INIT INFO # Provides: cloud-passwd-srvr # Required-Start: mountkernfs $local_fs cloud-early-config @@ -10,4 +10,52 @@ # Short-Description: Web server that sends passwords to User VMs ### END INIT INFO -bash /opt/cloud/bin/passwd_server& + +ENABLED=0 +[ -e /etc/default/cloud-passwd-srvr ] && . /etc/default/cloud-passwd-srvr + +start() { + [ "$ENABLED" != 0 ] || exit 0 + nohup bash /opt/cloud/bin/passwd_server& +} + +getpid() { + pid=$(ps -ef | grep passwd_server | grep -v grep | awk '{print $2}') + echo $pid +} + +stop_socat() { + spid=$(pidof socat) + [ "$spid" != "" ] && kill -9 $spid && echo "Killed socat (pid=$spid)" + return 0 +} + +stop () { + stop_socat + pid=$(getpid) + [ "$pid" != "" ] && kill -9 $pid && echo "Stopped password server (pid=$pid)" && stop_socat && return 0 + echo "Password server is not running" && return 0 +} + +status () { + pid=$(getpid) + [ "$pid" != "" ] && echo "Password server is running (pid=$pid)" && return 0 + echo "Password server is not running" && return 0 +} + +case "$1" in + start) start + ;; + stop) stop + ;; + status) status + ;; + restart) stop + start + ;; + *) echo "Usage: $0 {start|stop|status|restart}" + exit 1 + ;; +esac + +exit 0 diff --git a/tools/systemvm/debian/config/etc/init.d/postinit b/tools/systemvm/debian/config/etc/init.d/postinit index d063f077daf..f9502408978 100755 --- a/tools/systemvm/debian/config/etc/init.d/postinit +++ b/tools/systemvm/debian/config/etc/init.d/postinit @@ -47,11 +47,11 @@ start() { } stop() { - + echo "" } status() { - + echo "" } CMDLINE=$(cat /proc/cmdline) diff --git a/tools/systemvm/debian/config/etc/rc.local b/tools/systemvm/debian/config/etc/rc.local new file mode 100755 index 00000000000..cb434a23526 --- /dev/null +++ b/tools/systemvm/debian/config/etc/rc.local @@ -0,0 +1,15 @@ +#/bin/bash + +[ ! -f /var/cache/cloud/enabled_svcs ] && touch /var/cache/cloud/enabled_svcs +for svc in $(cat /var/cache/cloud/enabled_svcs) +do + logger -t cloud "Starting $svc" + service $svc start +done + +[ ! -f /var/cache/cloud/disabled_svcs ] && touch /var/cache/cloud/disabled_svcs +for svc in $(cat /var/cache/cloud/disabled_svcs) +do + logger -t cloud "Stopping $svc" + service $svc stop +done diff --git a/tools/systemvm/debian/config/opt/cloud/bin/passwd_server b/tools/systemvm/debian/config/opt/cloud/bin/passwd_server index c0326485f70..ee9e531d72e 100755 --- a/tools/systemvm/debian/config/opt/cloud/bin/passwd_server +++ b/tools/systemvm/debian/config/opt/cloud/bin/passwd_server @@ -1,16 +1,18 @@ #!/bin/bash +. /etc/default/cloud-passwd-srvr guestIp=$(ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}') -while true +while [ "$ENABLED" == "1" ] do socat TCP4-LISTEN:8080,reuseaddr,crnl,bind=$guestIp SYSTEM:"/opt/cloud/bin/serve_password.sh \"\$SOCAT_PEERADDR\"" rc=$? if [ $rc -ne 0 ] then - logger "Socat failed with error code $rc. Restarting socat..." + logger -t cloud "Password server failed with error code $rc. Restarting socat..." sleep 3 fi + . /etc/default/cloud-passwd-srvr done diff --git a/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh b/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh index 30104ebf379..51f0bf1fbe6 100755 --- a/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh +++ b/tools/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh @@ -25,6 +25,8 @@ consoleproxy_svcs() { chkconfig apache2 off chkconfig nfs-common off chkconfig portmap off + echo "cloud postinit ssh" > /var/cache/cloud/enabled_svcs + echo "cloud-passwd-srvr haproxy dnsmasq apache2 nfs-common portmap" > /var/cache/cloud/disabled_svcs mkdir -p /var/log/cloud } @@ -36,6 +38,8 @@ secstorage_svcs() { chkconfig dnsmasq off chkconfig ssh on chkconfig apache2 off + echo "cloud postinit ssh nfs-common portmap" > /var/cache/cloud/enabled_svcs + echo "cloud-passwd-srvr haproxy dnsmasq" > /var/cache/cloud/disabled_svcs mkdir -p /var/log/cloud } @@ -47,6 +51,8 @@ routing_svcs() { chkconfig ssh on chkconfig nfs-common off chkconfig portmap off + echo "cloud-passwd-srvr ssh dnsmasq haproxy apache2" > /var/cache/cloud/enabled_svcs + echo "cloud nfs-common portmap" > /var/cache/cloud/disabled_svcs } CMDLINE=$(cat /var/cache/cloud/cmdline) diff --git a/tools/systemvm/debian/config/opt/cloud/bin/serve_password.sh b/tools/systemvm/debian/config/opt/cloud/bin/serve_password.sh index d66f6553745..398a5591266 100755 --- a/tools/systemvm/debian/config/opt/cloud/bin/serve_password.sh +++ b/tools/systemvm/debian/config/opt/cloud/bin/serve_password.sh @@ -1,6 +1,8 @@ #!/bin/bash -# set -x +# set -x + +PASSWD_FILE=/var/cache/cloud/passwords #replace a line in a file of the form key=value # $1 filename @@ -26,7 +28,7 @@ get_value() { ip=$1 -logger "serve_password called to service a request for $ip." +logger -t cloud "serve_password called to service a request for $ip." while read input do @@ -47,23 +49,23 @@ done if [ "$request" == "send_my_password" ] then - password=$(get_value /root/passwords $ip) + password=$(get_value $PASSWD_FILE $ip) if [ "$password" == "" ] then - logger "send_password_to_domu sent bad_request to $ip." + logger -t cloud "serve_password sent bad_request to $ip." echo "bad_request" else - logger "send_password_to_domu sent a password to $ip." + logger -t cloud "serve_password sent a password to $ip." echo $password fi else if [ "$request" == "saved_password" ] then - replace_in_file /root/passwords $ip "saved_password" - logger "send_password_to_domu sent saved_password to $ip." + replace_in_file $PASSWD_FILE $ip "saved_password" + logger -t cloud "serve_password sent saved_password to $ip." echo "saved_password" else - logger "send_password_to_domu sent bad_request to $ip." + logger -t cloud "serve_password sent bad_request to $ip." echo "bad_request" fi fi From f94299fe0c1e02e530d580b7e27b461e7f07d98f Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Thu, 2 Sep 2010 17:09:54 -0700 Subject: [PATCH 053/145] changes for the new debian-based systemvm --- console-proxy/scripts/config_auth.sh | 5 +++ console-proxy/scripts/config_ssl.sh | 36 +++++++++++++++++- .../xen/resource/CitrixResourceBase.java | 6 +-- tools/systemvm/debian/systemvm.xml | 37 +++++++++++++++++++ 4 files changed, 79 insertions(+), 5 deletions(-) create mode 100644 tools/systemvm/debian/systemvm.xml diff --git a/console-proxy/scripts/config_auth.sh b/console-proxy/scripts/config_auth.sh index 893920d2be2..503c90f1d0a 100755 --- a/console-proxy/scripts/config_auth.sh +++ b/console-proxy/scripts/config_auth.sh @@ -2,7 +2,12 @@ BASE_DIR="/var/www/html/copy/template/" HTACCESS="$BASE_DIR/.htaccess" + PASSWDFILE="/etc/httpd/.htpasswd" +if [ -d /etc/apache2 ] +then + PASSWDFILE="/etc/apache2/.htpasswd" +fi config_htaccess() { mkdir -p $BASE_DIR diff --git a/console-proxy/scripts/config_ssl.sh b/console-proxy/scripts/config_ssl.sh index a3be8d32dff..ef59852d69b 100755 --- a/console-proxy/scripts/config_ssl.sh +++ b/console-proxy/scripts/config_ssl.sh @@ -15,6 +15,17 @@ config_httpd_conf() { echo "" >> /etc/httpd/conf/httpd.conf } +config_apache2_conf() { + local ip=$1 + local srvr=$2 + cp -f /etc/apache2/sites-available/default.orig /etc/apache2/sites-available/default + cp -f /etc/apache2/sites-available/default-ssl.orig /etc/apache2/sites-available/default-ssl + sed -i -e "s/VirtualHost.*:80$/VirtualHost $ip:80/" /etc/httpd/conf/httpd.conf + sed -i 's/_default_/$ip/' /etc/apache2/sites-available/default-ssl + sed -i 's/ssl-cert-snakeoil.key/realhostip.key/' /etc/apache2/sites-available/default-ssl + sed -i 's/ssl-cert-snakeoil.pem/realhostip.crt/' /etc/apache2/sites-available/default-ssl +} + copy_certs() { local certdir=$(dirname $0)/certs local mydir=$(dirname $0) @@ -25,16 +36,37 @@ copy_certs() { return 1 } +copy_certs_apache2() { + local certdir=$(dirname $0)/certs + local mydir=$(dirname $0) + if [ -d $certdir ] && [ -f $certdir/realhostip.key ] && [ -f $certdir/realhostip.crt ] ; then + cp $certdir/realhostip.key /etc/ssl/private/ && cp $certdir/realhostip.crt /etc/ssl/certs/ + return $? + fi + return 1 +} + if [ $# -ne 2 ] ; then echo $"Usage: `basename $0` ipaddr servername " exit 0 fi -copy_certs +if [ -d /etc/apache2 ] +then + copy_certs_apache2 +else + copy_certs +fi + if [ $? -ne 0 ] then echo "Failed to copy certificates" exit 2 fi -config_httpd_conf $1 $2 +if [ -d /etc/apache2 ] +then + config_apache2_conf $1 $2 +else + config_httpd_conf $1 $2 +fi diff --git a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index 22591ae59fd..d79bfffc56c 100644 --- a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -3175,11 +3175,11 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR Ternary mount = mounts.get(0); - Set templates = VM.getByNameLabel(conn, "CentOS 5.3"); + Set templates = VM.getByNameLabel(conn, "Debian Lenny 5.0 (32-bit)"); if (templates.size() == 0) { - templates = VM.getByNameLabel(conn, "CentOS 5.3 (64-bit)"); + templates = VM.getByNameLabel(conn, "Debian Lenny 5.0 (32-bit)"); if (templates.size() == 0) { - String msg = " can not find template CentOS 5.3 "; + String msg = " can not find template Debian Lenny 5.0 (32-bit) "; s_logger.warn(msg); return msg; } diff --git a/tools/systemvm/debian/systemvm.xml b/tools/systemvm/debian/systemvm.xml new file mode 100644 index 00000000000..ce6ecaf6e49 --- /dev/null +++ b/tools/systemvm/debian/systemvm.xml @@ -0,0 +1,37 @@ + + systemvm2 + 1572864 + 1572864 + 1 + + hvm + + + + + + + + destroy + restart + restart + + + /usr/bin/qemu-kvm + + + + + + + + + + + + + + + + + From 53e4db39ed88998828a50d52b72fcfba322ebe0a Mon Sep 17 00:00:00 2001 From: abhishek Date: Thu, 2 Sep 2010 18:09:47 -0700 Subject: [PATCH 054/145] bug 5927: incremental checkin --- .../com/cloud/server/ManagementServer.java | 4 +-- core/src/com/cloud/storage/dao/VolumeDao.java | 1 + .../com/cloud/storage/dao/VolumeDaoImpl.java | 14 ++++++++++ .../cloud/api/commands/DetachVolumeCmd.java | 22 ++++++++++++++-- .../executor/VolumeOperationExecutor.java | 2 +- .../async/executor/VolumeOperationParam.java | 8 +++--- .../cloud/server/ManagementServerImpl.java | 8 +++--- server/src/com/cloud/vm/UserVmManager.java | 2 +- .../src/com/cloud/vm/UserVmManagerImpl.java | 26 ++++++++++++++++--- 9 files changed, 70 insertions(+), 17 deletions(-) diff --git a/core/src/com/cloud/server/ManagementServer.java b/core/src/com/cloud/server/ManagementServer.java index ab380004dfe..9bfaf1cddae 100644 --- a/core/src/com/cloud/server/ManagementServer.java +++ b/core/src/com/cloud/server/ManagementServer.java @@ -615,8 +615,8 @@ public interface ManagementServer { * @volumeId * @throws InvalidParameterValueException, InternalErrorException */ - void detachVolumeFromVM(long volumeId, long startEventId) throws InternalErrorException; - long detachVolumeFromVMAsync(long volumeId) throws InvalidParameterValueException; + void detachVolumeFromVM(long volumeId, long startEventId, long deviceId, long instanceId) throws InternalErrorException; + long detachVolumeFromVMAsync(long volumeId, long deviceId, long instanceId) throws InvalidParameterValueException; /** * Attaches an ISO to the virtual CDROM device of the specified VM. Will fail if the VM already has an ISO mounted. diff --git a/core/src/com/cloud/storage/dao/VolumeDao.java b/core/src/com/cloud/storage/dao/VolumeDao.java index 39979744aab..3bd43faf9bc 100755 --- a/core/src/com/cloud/storage/dao/VolumeDao.java +++ b/core/src/com/cloud/storage/dao/VolumeDao.java @@ -46,4 +46,5 @@ public interface VolumeDao extends GenericDao { List listRemovedButNotDestroyed(); List findCreatedByInstance(long id); List findByPoolId(long poolId); + List findByInstanceAndDeviceId(long instanceId, long deviceId); } diff --git a/core/src/com/cloud/storage/dao/VolumeDaoImpl.java b/core/src/com/cloud/storage/dao/VolumeDaoImpl.java index b9040983db2..633f21b6982 100755 --- a/core/src/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/core/src/com/cloud/storage/dao/VolumeDaoImpl.java @@ -61,6 +61,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol protected final GenericSearchBuilder ActiveTemplateSearch; protected final SearchBuilder RemovedButNotDestroyedSearch; protected final SearchBuilder PoolIdSearch; + protected final SearchBuilder InstanceAndDeviceIdSearch; protected static final String SELECT_VM_SQL = "SELECT DISTINCT instance_id from volumes v where v.host_id = ? and v.mirror_state = ?"; protected static final String SELECT_VM_ID_SQL = "SELECT DISTINCT instance_id from volumes v where v.host_id = ?"; @@ -117,6 +118,14 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol sc.setParameters("instanceId", id); return listActiveBy(sc); } + + @Override + public List findByInstanceAndDeviceId(long instanceId, long deviceId){ + SearchCriteria sc = InstanceAndDeviceIdSearch.create(); + sc.setParameters("instanceId", instanceId); + sc.setParameters("deviceId", deviceId); + return listActiveBy(sc); + } @Override public List findByPoolId(long poolId) { @@ -304,6 +313,11 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol InstanceIdSearch.and("instanceId", InstanceIdSearch.entity().getInstanceId(), SearchCriteria.Op.EQ); InstanceIdSearch.done(); + InstanceAndDeviceIdSearch = createSearchBuilder(); + InstanceAndDeviceIdSearch.and("instanceId", InstanceAndDeviceIdSearch.entity().getInstanceId(), SearchCriteria.Op.EQ); + InstanceAndDeviceIdSearch.and("deviceId", InstanceAndDeviceIdSearch.entity().getDeviceId(), SearchCriteria.Op.EQ); + InstanceAndDeviceIdSearch.done(); + PoolIdSearch = createSearchBuilder(); PoolIdSearch.and("poolId", PoolIdSearch.entity().getPoolId(), SearchCriteria.Op.EQ); PoolIdSearch.done(); diff --git a/server/src/com/cloud/api/commands/DetachVolumeCmd.java b/server/src/com/cloud/api/commands/DetachVolumeCmd.java index d8f69b965d4..c4a8bd0920a 100644 --- a/server/src/com/cloud/api/commands/DetachVolumeCmd.java +++ b/server/src/com/cloud/api/commands/DetachVolumeCmd.java @@ -37,7 +37,9 @@ public class DetachVolumeCmd extends BaseCmd { static { s_properties.add(new Pair(BaseCmd.Properties.ACCOUNT_OBJ, Boolean.FALSE)); - s_properties.add(new Pair(BaseCmd.Properties.ID, Boolean.TRUE)); + s_properties.add(new Pair(BaseCmd.Properties.ID, Boolean.FALSE)); + s_properties.add(new Pair(BaseCmd.Properties.DEVICE_ID, Boolean.FALSE)); + s_properties.add(new Pair(BaseCmd.Properties.VIRTUAL_MACHINE_ID, Boolean.FALSE)); } public String getName() { @@ -56,6 +58,22 @@ public class DetachVolumeCmd extends BaseCmd { public List> execute(Map params) { Account account = (Account) params.get(BaseCmd.Properties.ACCOUNT_OBJ.getName()); Long volumeId = (Long) params.get(BaseCmd.Properties.ID.getName()); + Long deviceId = (Long) params.get(BaseCmd.Properties.DEVICE_ID.getName()); + Long instanceId = (Long) params.get(BaseCmd.Properties.VIRTUAL_MACHINE_ID.getName()); + + if((volumeId==null && (deviceId==null && instanceId==null)) || (volumeId!=null && (deviceId!=null || instanceId!=null)) || (volumeId==null && (deviceId==null || instanceId==null))) + { + throw new ServerApiException(BaseCmd.PARAM_ERROR, "Please provide either a volume id, or a tuple(device id, instance id)"); + } + + if(volumeId!=null) + { + deviceId = instanceId = Long.valueOf("0"); + } + else + { + volumeId = Long.valueOf("0");; + } boolean isAdmin; if (account == null) { @@ -82,7 +100,7 @@ public class DetachVolumeCmd extends BaseCmd { } try { - long jobId = getManagementServer().detachVolumeFromVMAsync(volumeId); + long jobId = getManagementServer().detachVolumeFromVMAsync(volumeId,deviceId,instanceId); if (jobId == 0) { s_logger.warn("Unable to schedule async-job for DetachVolume comamnd"); diff --git a/server/src/com/cloud/async/executor/VolumeOperationExecutor.java b/server/src/com/cloud/async/executor/VolumeOperationExecutor.java index 9f32126c568..750bf2f6f9e 100644 --- a/server/src/com/cloud/async/executor/VolumeOperationExecutor.java +++ b/server/src/com/cloud/async/executor/VolumeOperationExecutor.java @@ -86,7 +86,7 @@ public class VolumeOperationExecutor extends BaseAsyncJobExecutor { eventType = EventTypes.EVENT_VOLUME_DETACH; failureDescription = "Failed to detach volume"; - asyncMgr.getExecutorContext().getManagementServer().detachVolumeFromVM(param.getVolumeId(), param.getEventId()); + asyncMgr.getExecutorContext().getManagementServer().detachVolumeFromVM(param.getVolumeId(), param.getEventId(),param.getDeviceId(),param.getVmId()); success = true; asyncMgr.completeAsyncJob(getJob().getId(), AsyncJobResult.STATUS_SUCCEEDED, 0, null); } else { diff --git a/server/src/com/cloud/async/executor/VolumeOperationParam.java b/server/src/com/cloud/async/executor/VolumeOperationParam.java index 4ee39e087a0..3fd1e0f50b9 100644 --- a/server/src/com/cloud/async/executor/VolumeOperationParam.java +++ b/server/src/com/cloud/async/executor/VolumeOperationParam.java @@ -40,8 +40,8 @@ public class VolumeOperationParam { // Used for Attach, Detach, and Delete private long volumeId; private long eventId; - private Long deviceId; - + private long deviceId; + public VolumeOperationParam() { } @@ -117,11 +117,11 @@ public class VolumeOperationParam { return eventId; } - public void setDeviceId(Long deviceId) { + public void setDeviceId(long deviceId) { this.deviceId = deviceId; } - public Long getDeviceId() { + public long getDeviceId() { return deviceId; } diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 053bd0484a4..a86b44987c7 100644 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -1985,12 +1985,12 @@ public class ManagementServerImpl implements ManagementServer { } @Override - public void detachVolumeFromVM(long volumeId, long startEventId) throws InternalErrorException { - _vmMgr.detachVolumeFromVM(volumeId, startEventId); + public void detachVolumeFromVM(long volumeId, long startEventId, long deviceId, long instanceId) throws InternalErrorException { + _vmMgr.detachVolumeFromVM(volumeId, startEventId, deviceId, instanceId); } @Override - public long detachVolumeFromVMAsync(long volumeId) throws InvalidParameterValueException { + public long detachVolumeFromVMAsync(long volumeId, long deviceId, long instanceId) throws InvalidParameterValueException { VolumeVO volume = _volumeDao.findById(volumeId); // Check that the volume is a data volume @@ -2022,6 +2022,8 @@ public class ManagementServerImpl implements ManagementServer { param.setAccountId(volume.getAccountId()); param.setOp(VolumeOp.Detach); param.setVolumeId(volumeId); + param.setDeviceId(deviceId); + param.setVmId(instanceId); param.setEventId(eventId); Gson gson = GsonHelper.getBuilder().create(); diff --git a/server/src/com/cloud/vm/UserVmManager.java b/server/src/com/cloud/vm/UserVmManager.java index 4c2a3e98e3d..f4f09bc81eb 100644 --- a/server/src/com/cloud/vm/UserVmManager.java +++ b/server/src/com/cloud/vm/UserVmManager.java @@ -117,7 +117,7 @@ public interface UserVmManager extends Manager, VirtualMachineManager * @param volumeId * @throws InternalErrorException */ - void detachVolumeFromVM(long volumeId, long startEventId) throws InternalErrorException; + void detachVolumeFromVM(long volumeId, long startEventId, long deviceId, long instanceId) throws InternalErrorException; /** * Attaches an ISO to the virtual CDROM device of the specified VM. Will eject any existing virtual CDROM if isoPath is null. diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index 66a7e8f335b..1fa47d211b5 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -418,10 +418,28 @@ public class UserVmManagerImpl implements UserVmManager { } @Override - public void detachVolumeFromVM(long volumeId, long startEventId) throws InternalErrorException { - VolumeVO volume = _volsDao.findById(volumeId); + public void detachVolumeFromVM(long volumeId, long startEventId, long deviceId, long instanceId) throws InternalErrorException { + VolumeVO volume = null; - Long vmId = volume.getInstanceId(); + if(volumeId!=0) + { + volume = _volsDao.findById(volumeId); + } + else + { + volume = _volsDao.findByInstanceAndDeviceId(instanceId, deviceId).get(0); + } + + Long vmId = null; + + if(instanceId!=0) + { + vmId = volume.getInstanceId(); + } + else + { + vmId = instanceId; + } if (vmId == null) { return; @@ -454,7 +472,7 @@ public class UserVmManagerImpl implements UserVmManager { Answer answer = null; if (sendCommand) { - AttachVolumeCommand cmd = new AttachVolumeCommand(false, vm.getInstanceName(), volume.getPoolType(), volume.getFolder(), volume.getPath(), volume.getName(), volume.getDeviceId()); + AttachVolumeCommand cmd = new AttachVolumeCommand(false, vm.getInstanceName(), volume.getPoolType(), volume.getFolder(), volume.getPath(), volume.getName(), deviceId!=0 ? deviceId : volume.getDeviceId()); try { answer = _agentMgr.send(vm.getHostId(), cmd); From d3e323b9ecba8bbcb14d4719d68ffcb5121f9749 Mon Sep 17 00:00:00 2001 From: abhishek Date: Thu, 2 Sep 2010 19:10:13 -0700 Subject: [PATCH 055/145] bug 5927: incremental checkin --- .../com/cloud/server/ManagementServer.java | 1 + .../cloud/api/commands/DetachVolumeCmd.java | 16 ++++++++++++--- .../async/executor/VolumeOperationParam.java | 6 +++--- .../cloud/server/ManagementServerImpl.java | 20 ++++++++++++++++++- .../src/com/cloud/vm/UserVmManagerImpl.java | 2 +- 5 files changed, 37 insertions(+), 8 deletions(-) diff --git a/core/src/com/cloud/server/ManagementServer.java b/core/src/com/cloud/server/ManagementServer.java index 9bfaf1cddae..12ae65f682a 100644 --- a/core/src/com/cloud/server/ManagementServer.java +++ b/core/src/com/cloud/server/ManagementServer.java @@ -2189,4 +2189,5 @@ public interface ManagementServer { Map listCapabilities(); GuestOSCategoryVO getGuestOsCategory(Long guestOsId); + VolumeVO findVolumeByInstanceAndDeviceId(long instanceId, long deviceId); } diff --git a/server/src/com/cloud/api/commands/DetachVolumeCmd.java b/server/src/com/cloud/api/commands/DetachVolumeCmd.java index c4a8bd0920a..a5ddaeec23e 100644 --- a/server/src/com/cloud/api/commands/DetachVolumeCmd.java +++ b/server/src/com/cloud/api/commands/DetachVolumeCmd.java @@ -60,6 +60,7 @@ public class DetachVolumeCmd extends BaseCmd { Long volumeId = (Long) params.get(BaseCmd.Properties.ID.getName()); Long deviceId = (Long) params.get(BaseCmd.Properties.DEVICE_ID.getName()); Long instanceId = (Long) params.get(BaseCmd.Properties.VIRTUAL_MACHINE_ID.getName()); + VolumeVO volume = null; if((volumeId==null && (deviceId==null && instanceId==null)) || (volumeId!=null && (deviceId!=null || instanceId!=null)) || (volumeId==null && (deviceId==null || instanceId==null))) { @@ -85,9 +86,18 @@ public class DetachVolumeCmd extends BaseCmd { } // Check that the volume ID is valid - VolumeVO volume = getManagementServer().findVolumeById(volumeId); - if (volume == null) - throw new ServerApiException(BaseCmd.PARAM_ERROR, "Unable to find volume with ID: " + volumeId); + if(volumeId != 0) + { + volume = getManagementServer().findVolumeById(volumeId); + if (volume == null) + throw new ServerApiException(BaseCmd.PARAM_ERROR, "Unable to find volume with ID: " + volumeId); + } + else + { + volume = getManagementServer().findVolumeByInstanceAndDeviceId(instanceId, deviceId); + if (volume == null) + throw new ServerApiException(BaseCmd.PARAM_ERROR, "Unable to find volume with ID: " + volumeId); + } // If the account is not an admin, check that the volume is owned by the account that was passed in if (!isAdmin) { diff --git a/server/src/com/cloud/async/executor/VolumeOperationParam.java b/server/src/com/cloud/async/executor/VolumeOperationParam.java index 3fd1e0f50b9..b59ddd51f2f 100644 --- a/server/src/com/cloud/async/executor/VolumeOperationParam.java +++ b/server/src/com/cloud/async/executor/VolumeOperationParam.java @@ -40,7 +40,7 @@ public class VolumeOperationParam { // Used for Attach, Detach, and Delete private long volumeId; private long eventId; - private long deviceId; + private Long deviceId; public VolumeOperationParam() { } @@ -117,11 +117,11 @@ public class VolumeOperationParam { return eventId; } - public void setDeviceId(long deviceId) { + public void setDeviceId(Long deviceId) { this.deviceId = deviceId; } - public long getDeviceId() { + public Long getDeviceId() { return deviceId; } diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index a86b44987c7..8284d0c05f4 100644 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -1991,7 +1991,11 @@ public class ManagementServerImpl implements ManagementServer { @Override public long detachVolumeFromVMAsync(long volumeId, long deviceId, long instanceId) throws InvalidParameterValueException { - VolumeVO volume = _volumeDao.findById(volumeId); + VolumeVO volume = null; + if(volumeId!=0) + volume = _volumeDao.findById(volumeId); + else + volume = _volumeDao.findByInstanceAndDeviceId(instanceId, deviceId).get(0); // Check that the volume is a data volume if (volume.getVolumeType() != VolumeType.DATADISK) { @@ -5559,6 +5563,20 @@ public class ManagementServerImpl implements ManagementServer { return null; } } + + @Override + public VolumeVO findVolumeByInstanceAndDeviceId(long instanceId, long deviceId) + { + VolumeVO volume = _volumeDao.findByInstanceAndDeviceId(instanceId, deviceId).get(0); + if (volume != null && !volume.getDestroyed() && volume.getRemoved() == null) + { + return volume; + } + else + { + return null; + } + } @Override diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index 1fa47d211b5..c183ba9eceb 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -432,7 +432,7 @@ public class UserVmManagerImpl implements UserVmManager { Long vmId = null; - if(instanceId!=0) + if(instanceId==0) { vmId = volume.getInstanceId(); } From 45c997a0512b998abb9a15e56d219cea9c3cbc61 Mon Sep 17 00:00:00 2001 From: abhishek Date: Fri, 3 Sep 2010 09:40:55 -0700 Subject: [PATCH 056/145] bug 6085: Fixed this issue status 6085: resolved fixed --- server/src/com/cloud/api/commands/ListVolumesCmd.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/com/cloud/api/commands/ListVolumesCmd.java b/server/src/com/cloud/api/commands/ListVolumesCmd.java index 435c4bdeeba..006f1565ca2 100755 --- a/server/src/com/cloud/api/commands/ListVolumesCmd.java +++ b/server/src/com/cloud/api/commands/ListVolumesCmd.java @@ -143,7 +143,7 @@ public class ListVolumesCmd extends BaseCmd{ List volumes = getManagementServer().searchForVolumes(c); - if (volumes == null || volumes.size()==0) { + if (volumes == null) { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "unable to find volumes"); } From 2e6c22d706c17b2e5556f0903cad2a18169b554a Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Fri, 3 Sep 2010 10:32:11 -0700 Subject: [PATCH 057/145] MySQLdb is required for build --- cloud.spec | 1 + debian/control | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud.spec b/cloud.spec index 24983320b19..ae49260915f 100644 --- a/cloud.spec +++ b/cloud.spec @@ -35,6 +35,7 @@ BuildRequires: jpackage-utils BuildRequires: gcc BuildRequires: glibc-devel BuildRequires: /usr/bin/mkisofs +BuildRequires: MySQL-python %global _premium %(tar jtvmf %{SOURCE0} '*/cloudstack-proprietary/' --occurrence=1 2>/dev/null | wc -l) diff --git a/debian/control b/debian/control index 2dd9c58ad02..91a8186c662 100644 --- a/debian/control +++ b/debian/control @@ -2,7 +2,7 @@ Source: cloud Section: libs Priority: extra Maintainer: Manuel Amador (Rudd-O) -Build-Depends: debhelper (>= 7), openjdk-6-jdk, tomcat6, libws-commons-util-java, libcommons-dbcp-java, libcommons-collections-java, libcommons-httpclient-java, libservlet2.5-java, genisoimage +Build-Depends: debhelper (>= 7), openjdk-6-jdk, tomcat6, libws-commons-util-java, libcommons-dbcp-java, libcommons-collections-java, libcommons-httpclient-java, libservlet2.5-java, genisoimage, python-mysqldb Standards-Version: 3.8.1 Homepage: http://techcenter.cloud.com/software/cloudstack From 2fce7a1c35d45544966675db96ae6df4d97f7eff Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Fri, 3 Sep 2010 10:33:15 -0700 Subject: [PATCH 058/145] new UI - instances tab - implement change group action. --- ui/jsp/tab_storage.jsp | 4 +-- ui/new/jsp/tab_instance.jsp | 20 +++++++++++++ ui/new/scripts/cloud.core.instance.js | 42 +++++++++++++++++++++++++-- 3 files changed, 61 insertions(+), 5 deletions(-) diff --git a/ui/jsp/tab_storage.jsp b/ui/jsp/tab_storage.jsp index 92f6b5ae5fb..aaab49adb19 100755 --- a/ui/jsp/tab_storage.jsp +++ b/ui/jsp/tab_storage.jsp @@ -615,7 +615,7 @@ long milliseconds = new Date().getTime();
Name
-
+
Type
@@ -707,7 +707,7 @@ long milliseconds = new Date().getTime();
Name
-
+
Type
diff --git a/ui/new/jsp/tab_instance.jsp b/ui/new/jsp/tab_instance.jsp index 7d12d1a7542..5eb22150ba5 100644 --- a/ui/new/jsp/tab_instance.jsp +++ b/ui/new/jsp/tab_instance.jsp @@ -676,4 +676,24 @@
+
+ + + \ No newline at end of file diff --git a/ui/new/scripts/cloud.core.instance.js b/ui/new/scripts/cloud.core.instance.js index c4d59a654b0..1de7e8eb0d1 100755 --- a/ui/new/scripts/cloud.core.instance.js +++ b/ui/new/scripts/cloud.core.instance.js @@ -79,7 +79,12 @@ function clickInstanceGroupHeader($arrowIcon) { asyncJobResponse: "changeserviceforvirtualmachineresponse", dialogBeforeActionFn : doChangeService, afterActionSeccessFn: setMidmenuItemVm - } + }, + "Change Group": { + isAsyncJob: false, + dialogBeforeActionFn : doChangeGroup, + afterActionSeccessFn: setMidmenuItemVm + } } function doAttachISO($t, selectedItemIds, listAPIMap) { @@ -231,8 +236,33 @@ function clickInstanceGroupHeader($arrowIcon) { $(this).dialog("close"); } }).dialog("open"); - } - + } + + function doChangeGroup($t, selectedItemIds, listAPIMap) { + $("#dialog_change_group") + .dialog('option', 'buttons', { + "Confirm": function() { + var thisDialog = $(this); + thisDialog.dialog("close"); + + // validate values + var isValid = true; + isValid &= validateString("Group", thisDialog.find("#change_group_name"), thisDialog.find("#change_group_name_errormsg"), true); //group name is optional + if (!isValid) return; + + for(var id in selectedItemIds) { + var $midMenuItem = selectedItemIds[id]; + var jsonObj = $midMenuItem.data("jsonObj"); + var group = trim(thisDialog.find("#change_group_name").val()); + var apiCommand = "command=updateVirtualMachine&id="+id+"&group="+encodeURIComponent(group); + doAction(id, $t, apiCommand, listAPIMap); + } + }, + "Cancel": function() { + $(this).dialog("close"); + } + }).dialog("open"); + } function updateVirtualMachineStateInRightPanel(state) { if(state == "Running") @@ -447,6 +477,12 @@ function clickInstanceGroupHeader($arrowIcon) { zIndex: 2000 })); + activateDialog($("#dialog_change_group").dialog({ + autoOpen: false, + modal: true, + zIndex: 2000 + })); + //***** VM Wizard (begin) ****************************************************************************** $vmPopup = $("#vm_popup"); var $serviceOfferingTemplate = $("#vm_popup_service_offering_template"); From d7981cad6de5ada29789f42f1c2b38afc9389cb8 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Fri, 3 Sep 2010 10:44:12 -0700 Subject: [PATCH 059/145] Remove prepsystemvm.sh from agent scripts package in Debian/Ubuntu. This was missing in commit 4289dd79ecdfd3523837352771ad5e46da591b21 --- debian/cloud-agent-scripts.install | 1 - 1 file changed, 1 deletion(-) diff --git a/debian/cloud-agent-scripts.install b/debian/cloud-agent-scripts.install index 5c448a8c15d..e825669bf7b 100644 --- a/debian/cloud-agent-scripts.install +++ b/debian/cloud-agent-scripts.install @@ -13,7 +13,6 @@ /usr/lib/cloud/agent/scripts/vm/hypervisor/xenserver/make_migratable.sh /usr/lib/cloud/agent/scripts/vm/hypervisor/xenserver/network_info.sh /usr/lib/cloud/agent/scripts/vm/hypervisor/xenserver/networkUsage.sh -/usr/lib/cloud/agent/scripts/vm/hypervisor/xenserver/prepsystemvm.sh /usr/lib/cloud/agent/scripts/vm/hypervisor/xenserver/setup_iscsi.sh /usr/lib/cloud/agent/scripts/vm/hypervisor/xenserver/setupxenserver.sh /usr/lib/cloud/agent/scripts/vm/hypervisor/xenserver/vmops From d15e3899bb7e5daef667cd7111aa3fcf320302fc Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Fri, 3 Sep 2010 11:02:29 -0700 Subject: [PATCH 060/145] new UI - implement enable HA action, implement disable HA action. --- ui/new/scripts/cloud.core.instance.js | 56 ++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/ui/new/scripts/cloud.core.instance.js b/ui/new/scripts/cloud.core.instance.js index 1de7e8eb0d1..4fea2cf728f 100755 --- a/ui/new/scripts/cloud.core.instance.js +++ b/ui/new/scripts/cloud.core.instance.js @@ -84,7 +84,17 @@ function clickInstanceGroupHeader($arrowIcon) { isAsyncJob: false, dialogBeforeActionFn : doChangeGroup, afterActionSeccessFn: setMidmenuItemVm - } + }, + "Enable HA": { + isAsyncJob: false, + dialogBeforeActionFn : doEnableHA, + afterActionSeccessFn: setMidmenuItemVm + }, + "Disable HA": { + isAsyncJob: false, + dialogBeforeActionFn : doDisableHA, + afterActionSeccessFn: setMidmenuItemVm + } } function doAttachISO($t, selectedItemIds, listAPIMap) { @@ -264,6 +274,50 @@ function clickInstanceGroupHeader($arrowIcon) { }).dialog("open"); } + function doEnableHA($t, selectedItemIds, listAPIMap) { + var message = "

Please confirm you want to enable HA for your virtual machine. Once HA is enabled, your Virtual Instance will be automatically restarted in the event it is detected to have failed.

"; + + $("#dialog_confirmation") + .html(message) + .dialog('option', 'buttons', { + "Confirm": function() { + $(this).dialog("close"); + for(var id in selectedItemIds) { + var $midMenuItem = selectedItemIds[id]; + var jsonObj = $midMenuItem.data("jsonObj"); + var apiCommand = "command=updateVirtualMachine&id="+id+"&haenable=true"; + doAction(id, $t, apiCommand, listAPIMap); + } + }, + "Cancel": function() { + $(this).dialog("close"); + } + }).dialog("open"); + } + + function doDisableHA($t, selectedItemIds, listAPIMap) { + var message = "

Please confirm you want to disable HA for your virtual machine. Once HA is disabled, your Virtual Instance will no longer be be automatically restarted in the event of a failure.

"; + + $("#dialog_confirmation") + .html(message) + .dialog('option', 'buttons', { + "Confirm": function() { + $(this).dialog("close"); + for(var id in selectedItemIds) { + var $midMenuItem = selectedItemIds[id]; + var jsonObj = $midMenuItem.data("jsonObj"); + var apiCommand = "command=updateVirtualMachine&id="+id+"&haenable=false"; + doAction(id, $t, apiCommand, listAPIMap); + } + }, + "Cancel": function() { + $(this).dialog("close"); + } + }).dialog("open"); + } + + + function updateVirtualMachineStateInRightPanel(state) { if(state == "Running") $rightPanelContent.find("#state").text(state).removeClass("red gray").addClass("green"); From bb6a66030a7f7bcc83b274c0c8adea6aa1737684 Mon Sep 17 00:00:00 2001 From: abhishek Date: Fri, 3 Sep 2010 11:22:43 -0700 Subject: [PATCH 061/145] bug 5924: incremental checkin --- server/src/com/cloud/api/BaseCmd.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/com/cloud/api/BaseCmd.java b/server/src/com/cloud/api/BaseCmd.java index 90c2397f43a..da730ebceb7 100644 --- a/server/src/com/cloud/api/BaseCmd.java +++ b/server/src/com/cloud/api/BaseCmd.java @@ -309,6 +309,8 @@ public abstract class BaseCmd { RESOURCE_TYPE("resourcetype", BaseCmd.TYPE_INT, "resourcetype"), RESPONSE_TYPE("response",BaseCmd.TYPE_STRING,"response"), ROOT_DISK_OFFERING_ID("rootdiskofferingid", BaseCmd.TYPE_LONG, "rootDiskOfferingId"), + ROOT_DEVICE_ID("rootdeviceid", BaseCmd.TYPE_LONG, "rootDeviceId"), + ROOT_DEVICE_TYPE("rootdevicetype", BaseCmd.TYPE_STRING, "rootDeviceType"), RULE_ID("ruleid", BaseCmd.TYPE_LONG, "ruleId"), RUNNING_VMS("runningvms", BaseCmd.TYPE_LONG, "runningvms"), SCHEDULE("schedule", BaseCmd.TYPE_STRING, "schedule"), From 1ca7b1fd1fde8ce59ca91b9cf6344dfe91d7e320 Mon Sep 17 00:00:00 2001 From: abhishek Date: Fri, 3 Sep 2010 11:23:28 -0700 Subject: [PATCH 062/145] bug 5924: conforming the api to the ec2 implementation status 5924: resolved fixed --- core/src/com/cloud/server/ManagementServer.java | 1 + server/src/com/cloud/api/commands/ListVMsCmd.java | 10 ++++++++++ server/src/com/cloud/server/ManagementServerImpl.java | 7 +++++++ 3 files changed, 18 insertions(+) diff --git a/core/src/com/cloud/server/ManagementServer.java b/core/src/com/cloud/server/ManagementServer.java index 12ae65f682a..f1d126c64f6 100644 --- a/core/src/com/cloud/server/ManagementServer.java +++ b/core/src/com/cloud/server/ManagementServer.java @@ -2190,4 +2190,5 @@ public interface ManagementServer { Map listCapabilities(); GuestOSCategoryVO getGuestOsCategory(Long guestOsId); VolumeVO findVolumeByInstanceAndDeviceId(long instanceId, long deviceId); + VolumeVO getRootVolume(Long instanceId); } diff --git a/server/src/com/cloud/api/commands/ListVMsCmd.java b/server/src/com/cloud/api/commands/ListVMsCmd.java index 61ff9c46d96..7f52e7f66b9 100644 --- a/server/src/com/cloud/api/commands/ListVMsCmd.java +++ b/server/src/com/cloud/api/commands/ListVMsCmd.java @@ -34,7 +34,10 @@ import com.cloud.host.HostVO; import com.cloud.server.Criteria; import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.GuestOSCategoryVO; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VolumeVO; import com.cloud.user.Account; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; @@ -277,6 +280,13 @@ public class ListVMsCmd extends BaseCmd { //network groups vmData.add(new Pair(BaseCmd.Properties.NETWORK_GROUP_LIST.getName(), getManagementServer().getNetworkGroupsNamesForVm(vmInstance.getId()))); + //root device related + VolumeVO rootVolume = getManagementServer().findRootVolume(vmInstance.getId()); + vmData.add(new Pair(BaseCmd.Properties.ROOT_DEVICE_ID.getName(), rootVolume.getDeviceId())); + + StoragePoolVO storagePool = getManagementServer().findPoolById(rootVolume.getPoolId()); + vmData.add(new Pair(BaseCmd.Properties.ROOT_DEVICE_TYPE.getName(), storagePool.getPoolType().toString())); + vmTag[i++] = vmData; } List> returnTags = new ArrayList>(); diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 8284d0c05f4..b5edcee0d07 100644 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -198,6 +198,7 @@ import com.cloud.storage.StorageStats; import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; import com.cloud.storage.VolumeStats; import com.cloud.storage.VolumeVO; import com.cloud.storage.Snapshot.SnapshotType; @@ -8676,5 +8677,11 @@ public class ManagementServerImpl implements ManagementServer { { return _guestOSCategoryDao.findById(guestOsId); } + + @Override + public VolumeVO getRootVolume(Long instanceId) + { + return _volumeDao.findByInstanceAndType(instanceId, Volume.VolumeType.ROOT).get(0); + } } From 44e7298ef783da899023d39405ed38617d7c8ab6 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Fri, 3 Sep 2010 11:29:34 -0700 Subject: [PATCH 063/145] new UI - clicking instance group on left menu will load instances belonging to this group in middle menu. --- ui/new/scripts/cloud.core.instance.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ui/new/scripts/cloud.core.instance.js b/ui/new/scripts/cloud.core.instance.js index 4fea2cf728f..d1dc3781820 100755 --- a/ui/new/scripts/cloud.core.instance.js +++ b/ui/new/scripts/cloud.core.instance.js @@ -450,6 +450,11 @@ function clickInstanceGroupHeader($arrowIcon) { var instances = json.listvirtualmachinesresponse.virtualmachine; for(var i=0; i Date: Fri, 3 Sep 2010 13:51:04 -0700 Subject: [PATCH 064/145] bug 6084: fixing some more api response parameters status 6084: resolved fixed --- core/src/com/cloud/server/ManagementServer.java | 2 +- server/src/com/cloud/api/BaseCmd.java | 1 + server/src/com/cloud/api/commands/ListVMsCmd.java | 8 +++++--- server/src/com/cloud/server/ManagementServerImpl.java | 4 ++-- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/core/src/com/cloud/server/ManagementServer.java b/core/src/com/cloud/server/ManagementServer.java index f1d126c64f6..c737d029f6c 100644 --- a/core/src/com/cloud/server/ManagementServer.java +++ b/core/src/com/cloud/server/ManagementServer.java @@ -2188,7 +2188,7 @@ public interface ManagementServer { boolean checkIfMaintenable(long hostId); Map listCapabilities(); - GuestOSCategoryVO getGuestOsCategory(Long guestOsId); + GuestOSVO getGuestOs(Long guestOsId); VolumeVO findVolumeByInstanceAndDeviceId(long instanceId, long deviceId); VolumeVO getRootVolume(Long instanceId); } diff --git a/server/src/com/cloud/api/BaseCmd.java b/server/src/com/cloud/api/BaseCmd.java index da730ebceb7..c84a46c862e 100644 --- a/server/src/com/cloud/api/BaseCmd.java +++ b/server/src/com/cloud/api/BaseCmd.java @@ -199,6 +199,7 @@ public abstract class BaseCmd { GROUP("group", BaseCmd.TYPE_STRING, "group"), GROUP_ID("group", BaseCmd.TYPE_LONG, "groupId"), GROUP_IDS("groupids", BaseCmd.TYPE_STRING, "groupIds"), + GUEST_OS_ID("guestosid", BaseCmd.TYPE_LONG, "guestOsId"), HA_ENABLE("haenable", BaseCmd.TYPE_BOOLEAN, "haEnable"), HAS_CHILD("haschild", BaseCmd.TYPE_BOOLEAN, "haschild"), HOST_ID("hostid", BaseCmd.TYPE_LONG, "hostId"), diff --git a/server/src/com/cloud/api/commands/ListVMsCmd.java b/server/src/com/cloud/api/commands/ListVMsCmd.java index 7f52e7f66b9..add2a51445d 100644 --- a/server/src/com/cloud/api/commands/ListVMsCmd.java +++ b/server/src/com/cloud/api/commands/ListVMsCmd.java @@ -34,6 +34,7 @@ import com.cloud.host.HostVO; import com.cloud.server.Criteria; import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.GuestOSCategoryVO; +import com.cloud.storage.GuestOSVO; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateVO; @@ -272,10 +273,11 @@ public class ListVMsCmd extends BaseCmd { long networkKbWrite = (long)vmStats.getNetworkWriteKBs(); vmData.add(new Pair(BaseCmd.Properties.NETWORK_KB_WRITE.getName(), networkKbWrite)); } + vmData.add(new Pair(BaseCmd.Properties.GUEST_OS_ID.getName(), vmInstance.getGuestOSId())); - GuestOSCategoryVO guestOsCategory = getManagementServer().getGuestOsCategory(vmInstance.getGuestOSId()); - if(guestOsCategory!=null) - vmData.add(new Pair(BaseCmd.Properties.OS_TYPE_ID.getName(),guestOsCategory.getId())); + GuestOSVO guestOs = getManagementServer().getGuestOs(vmInstance.getGuestOSId()); + if(guestOs!=null) + vmData.add(new Pair(BaseCmd.Properties.OS_TYPE_ID.getName(),guestOs.getCategoryId())); //network groups vmData.add(new Pair(BaseCmd.Properties.NETWORK_GROUP_LIST.getName(), getManagementServer().getNetworkGroupsNamesForVm(vmInstance.getId()))); diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index b5edcee0d07..edd8c3758c0 100644 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -8673,9 +8673,9 @@ public class ManagementServerImpl implements ManagementServer { } @Override - public GuestOSCategoryVO getGuestOsCategory(Long guestOsId) + public GuestOSVO getGuestOs(Long guestOsId) { - return _guestOSCategoryDao.findById(guestOsId); + return _guestOSDao.findById(guestOsId); } @Override From d62c44ad1130d00425dc2f1c4ac75e3e2b38583c Mon Sep 17 00:00:00 2001 From: abhishek Date: Fri, 3 Sep 2010 14:38:22 -0700 Subject: [PATCH 065/145] bug 5147: some code cleanup, and more incremental checkin --- .../api/commands/PreparePrimaryStorageForMaintenanceCmd.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/com/cloud/api/commands/PreparePrimaryStorageForMaintenanceCmd.java b/server/src/com/cloud/api/commands/PreparePrimaryStorageForMaintenanceCmd.java index cfd3d013553..52e3455064a 100644 --- a/server/src/com/cloud/api/commands/PreparePrimaryStorageForMaintenanceCmd.java +++ b/server/src/com/cloud/api/commands/PreparePrimaryStorageForMaintenanceCmd.java @@ -27,7 +27,6 @@ import org.apache.log4j.Logger; import com.cloud.api.BaseCmd; import com.cloud.api.ServerApiException; import com.cloud.exception.InvalidParameterValueException; -import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.storage.StoragePoolVO; import com.cloud.user.Account; From 6c3a4eb2e742362ca9ebc1fb0d283270410ac78f Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Fri, 3 Sep 2010 15:22:44 -0700 Subject: [PATCH 066/145] new UI - clicking events in middle panel will load detail to right panel. --- ui/new/index.jsp | 2 ++ ui/new/jsp/tab_instance.jsp | 2 +- ui/new/scripts/cloud.core.init.js | 30 ++++++++++++++++++++++++------ 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/ui/new/index.jsp b/ui/new/index.jsp index 5b6c9526172..759b77129ce 100644 --- a/ui/new/index.jsp +++ b/ui/new/index.jsp @@ -31,6 +31,8 @@ + + Cloud.com CloudStack diff --git a/ui/new/jsp/tab_instance.jsp b/ui/new/jsp/tab_instance.jsp index 5eb22150ba5..88818e56b29 100644 --- a/ui/new/jsp/tab_instance.jsp +++ b/ui/new/jsp/tab_instance.jsp @@ -1,5 +1,5 @@ <%@ page import="java.util.*" %> diff --git a/ui/new/scripts/cloud.core.init.js b/ui/new/scripts/cloud.core.init.js index 1a9beb66132..2652cfd2ce7 100755 --- a/ui/new/scripts/cloud.core.init.js +++ b/ui/new/scripts/cloud.core.init.js @@ -54,9 +54,25 @@ $(document).ready(function() { return false; }); + + function setMidmenuItem(jsonObj, $midmenuItem1, toRightPanelFn) { + $midmenuItem1.attr("id", ("midmenuItem_"+jsonObj.id)); + $midmenuItem1.data("id", jsonObj.id); + $midmenuItem1.data("jsonObj", jsonObj); + $midmenuItem1.data("toRightPanelFn", toRightPanelFn); + +// $midmenuItem1.bind("click", function(event) { +// var $t = $(this); +// toRightPanelFn($t); +// return false; +// }); + } + var $midmenuItem = $("#midmenu_item"); - function listMidMenuItems(leftmenuId, apiName, jsonResponse1, jsonResponse2, descriptionProperty) { + function listMidMenuItems(leftmenuId, apiName, jsonResponse1, jsonResponse2, descriptionProperty, rightPanelJSP, toRightPanelFn) { $("#"+leftmenuId).bind("click", function(event) { + $("#right_panel").load(rightPanelJSP); + $.ajax({ cache: false, data: createURL("command="+apiName+"&response=json"), @@ -68,18 +84,20 @@ $(document).ready(function() { for(var i=0; i Date: Fri, 3 Sep 2010 15:25:34 -0700 Subject: [PATCH 067/145] new UI - add jsp file and js file for event detail panel. --- ui/new/jsp/tab_event.jsp | 96 ++++++++++++++++++++++++++++++ ui/new/scripts/cloud.core.event.js | 11 ++++ 2 files changed, 107 insertions(+) create mode 100644 ui/new/jsp/tab_event.jsp create mode 100644 ui/new/scripts/cloud.core.event.js diff --git a/ui/new/jsp/tab_event.jsp b/ui/new/jsp/tab_event.jsp new file mode 100644 index 00000000000..4c33b31018e --- /dev/null +++ b/ui/new/jsp/tab_event.jsp @@ -0,0 +1,96 @@ + + +<%@ page import="java.util.*" %> +<%@ page import="com.cloud.utils.*" %> + +<% + + Locale browserLocale = request.getLocale(); + CloudResourceBundle t = CloudResourceBundle.getBundle("resources/resource", browserLocale); +%> + + +
+ +
+
+ <%=t.t("Details")%>
+
+
+
+
+
+ <%=t.t("Initiated.By")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Owner.Account")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Type")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Level")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Description")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("State")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Date")%>:
+
+
+
+
+
+
+
+
+ \ No newline at end of file diff --git a/ui/new/scripts/cloud.core.event.js b/ui/new/scripts/cloud.core.event.js new file mode 100644 index 00000000000..ca70c3b3096 --- /dev/null +++ b/ui/new/scripts/cloud.core.event.js @@ -0,0 +1,11 @@ +function loadEventToRigntPanelFn($rightPanelContent) { + var jsonObj = $rightPanelContent.data("jsonObj"); + var $rightPanelContent = $("#right_panel_content"); + $rightPanelContent.find("#username").text(jsonObj.username); + $rightPanelContent.find("#account").text(jsonObj.account); + $rightPanelContent.find("#type").text(jsonObj.type); + $rightPanelContent.find("#level").text(jsonObj.level); + $rightPanelContent.find("#description").text(jsonObj.description); + $rightPanelContent.find("#state").text(jsonObj.state); + setDateField(jsonObj.created, $rightPanelContent.find("#created")); +} \ No newline at end of file From 1ce6b5baa2c59f096cdeed7cbe9480d14de25b40 Mon Sep 17 00:00:00 2001 From: anthony Date: Fri, 3 Sep 2010 16:42:21 -0700 Subject: [PATCH 068/145] need to execute addPoolToHost for each host to associate them with storage pool --- server/src/com/cloud/storage/StorageManagerImpl.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index 0e4aa33ef06..f6d9aa72de0 100644 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -1390,7 +1390,6 @@ public class StorageManagerImpl implements StorageManager { boolean success = addPoolToHost(h.getId(), pool); if (success) { poolHosts.add(h); - break; } } From ef219ae69a4cda6477e323b60ef117c7b66504c6 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Fri, 3 Sep 2010 12:22:55 -0700 Subject: [PATCH 069/145] squelch mysqldb spurious warnings --- setup/bindir/cloud-setup-databases.in | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/setup/bindir/cloud-setup-databases.in b/setup/bindir/cloud-setup-databases.in index 067ff7b2156..e12a4c99d48 100755 --- a/setup/bindir/cloud-setup-databases.in +++ b/setup/bindir/cloud-setup-databases.in @@ -11,6 +11,10 @@ from optparse import OptionParser import commands import MySQLdb +# squelch mysqldb spurious warnings +import warnings +warnings.simplefilter('ignore') + # ---- This snippet of code adds the sources path and the waf configured PYTHONDIR to the Python path ---- # ---- We do this so cloud_utils can be looked up in the following order: # ---- 1) Sources directory From 1aaa380a60fa9d2b58aa1f5c97f06732ea781493 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Fri, 3 Sep 2010 16:11:19 -0700 Subject: [PATCH 070/145] Split out user management as a waf tool --- tools/waf/usermgmt.py | 120 ++++++++++++++++++++++++++++++++++++++++++ wscript | 51 +----------------- wscript_build | 2 + wscript_configure | 1 + 4 files changed, 124 insertions(+), 50 deletions(-) create mode 100644 tools/waf/usermgmt.py diff --git a/tools/waf/usermgmt.py b/tools/waf/usermgmt.py new file mode 100644 index 00000000000..a140d17ebdc --- /dev/null +++ b/tools/waf/usermgmt.py @@ -0,0 +1,120 @@ +import Utils, Build +from TaskGen import feature, before +from Configure import ConfigurationError +import Options +import Task +import os + +def detect(conf): + if Options.platform == 'win32': return + path_list = ["/usr/local/sbin","/usr/sbin","/sbin"] + os.environ.get('PATH','').split(os.pathsep) + conf.find_program("useradd",var='USERADD',mandatory=True,path_list=path_list) + conf.find_program("userdel",var='USERDEL',mandatory=True,path_list=path_list) + +def set_options(opt): + if Options.platform == 'win32': return + og = opt.get_option_group('--force') + og.add_option('--nochown', + action = 'store_true', + help = 'do not create or remove user accounts or change file ownership on installed files', + default = False, + dest = 'NOUSERMGMT') + +def _subst_add_destdir(x,bld): + a = "${DESTDIR}" + x + a = a.replace("${DESTDIR}",Options.options.destdir) + a = Utils.subst_vars(a,bld.env) + if a.startswith("//"): a = a[1:] + return a +Build.BuildContext.subst_add_destdir = staticmethod(_subst_add_destdir) + +def _setownership(ctx,path,owner,group,mode=None): + if Options.platform == 'win32': return + if not hasattr(os,"getuid"): return + if os.getuid() != 0: return + if Options.options.NOUSERMGMT: return + + import pwd + import grp + import stat + from os import chown as _chown, chmod as _chmod + + def f(bld,path,owner,group,mode): + + try: uid = pwd.getpwnam(owner).pw_uid + except KeyError,e: + raise Utils.WafError("Before using setownership() you have to create the user with bld.createuser(username...)") + try: gid = grp.getgrnam(group).gr_gid + except KeyError,e: + raise Utils.WafError("Before using setownership() you have to create the user with bld.createuser(username...)") + + path = bld.subst_add_destdir(path,bld) + current_uid,current_gid = os.stat(path).st_uid,os.stat(path).st_gid + if current_uid != uid: + Utils.pprint("GREEN","* setting owner of %s to UID %s"%(path,uid)) + _chown(path,uid,current_gid) + current_uid = uid + if current_gid != gid: + Utils.pprint("GREEN","* setting group of %s to GID %s"%(path,gid)) + _chown(path,current_uid,gid) + current_gid = gid + if mode is not None: + current_mode = stat.S_IMODE(os.stat(path).st_mode) + if current_mode != mode: + Utils.pprint("GREEN","* adjusting permissions on %s to mode %o"%(path,mode)) + _chmod(path,mode) + current_mode = mode + + if ctx.is_install > 0: + ctx.add_post_fun(lambda ctx: f(ctx,path,owner,group,mode)) +Build.BuildContext.setownership = _setownership + +def _createuser(ctx,user,homedir,shell): + if Options.platform == 'win32': return + if not hasattr(os,"getuid"): return + if os.getuid() != 0: return + if Options.options.NOUSERMGMT: return + + def f(ctx,user,homedir,shell): + import pwd + try: + pwd.getpwnam(user).pw_uid + user_exists = True + except KeyError,e: + user_exists = False + if user_exists: return + + Utils.pprint("GREEN","* creating user %s"%user) + cmd = [ + ctx.env.USERADD, + '-M', + '-r', + '-s',shell, + '-d',homedir, + user, + ] + ret = Utils.exec_command(cmd) + if ret: raise Utils.WafError("Failed to run command %s"%cmd) + + def g(ctx,user,homedir,shell): + import pwd + try: + pwd.getpwnam(user).pw_uid + user_exists = True + except KeyError,e: + user_exists = False + if not user_exists: return + + Utils.pprint("GREEN","* removing user %s"%user) + cmd = [ + ctx.env.USERDEL, + user, + ] + ret = Utils.exec_command(cmd) + if ret: raise Utils.WafError("Failed to run command %s"%cmd) + + if ctx.is_install > 0: + ctx.add_pre_fun(lambda ctx: f(ctx,user,homedir,shell)) + elif ctx.is_install < 0: + ctx.add_pre_fun(lambda ctx: g(ctx,user,homedir,shell)) +Build.BuildContext.createuser = _createuser \ No newline at end of file diff --git a/wscript b/wscript index 2a039423637..3df0ab91573 100644 --- a/wscript +++ b/wscript @@ -199,14 +199,6 @@ def _getbuildnumber(): # FIXME implement for git return rev Utils.getbuildnumber = _getbuildnumber -def _subst_add_destdir(x,bld): - a = "${DESTDIR}" + x - a = a.replace("${DESTDIR}",Options.options.destdir) - a = Utils.subst_vars(a,bld.env) - if a.startswith("//"): a = a[1:] - return a -Build.BuildContext.subst_add_destdir = staticmethod(_subst_add_destdir) - def mkdir_p(directory): if not _isdir(directory): Utils.pprint("GREEN","Creating directory %s and necessary parents"%directory) @@ -360,48 +352,12 @@ def _substitute(self,listoffiles,install_to=None,cwd=None,dict=None,name=None,** if install_to is not None: self.install_as("%s/%s"%(install_to,inst), tgt, **kwargs) Build.BuildContext.substitute = _substitute -def _setownership(ctx,path,owner,group,mode=None): - def f(bld,path,owner,group,mode): - dochown = not Options.options.NOCHOWN \ - and hasattr(os,"getuid") and os.getuid() == 0 \ - and _chown \ - and _chmod \ - and pwd \ - and grp \ - and stat - if not dochown: return - - try: uid = pwd.getpwnam(owner).pw_uid - except KeyError,e: raise Utils.WafError("If installing as root, please either create a %s user or use the --nochown parameter of waf install to install the files as root"%owner) - try: gid = grp.getgrnam(group).gr_gid - except KeyError,e: raise Utils.WafError("If installing as root, please either create a %s group or use the --nochown parameter of waf install to install the files as root"%group) - - path = _subst_add_destdir(path,bld) - current_uid,current_gid = os.stat(path).st_uid,os.stat(path).st_gid - if current_uid != uid: - Utils.pprint("GREEN","* setting owner of %s to UID %s"%(path,uid)) - _chown(path,uid,current_gid) - current_uid = uid - if current_gid != gid: - Utils.pprint("GREEN","* setting group of %s to GID %s"%(path,gid)) - _chown(path,current_uid,gid) - current_gid = gid - if mode is not None: - current_mode = stat.S_IMODE(os.stat(path).st_mode) - if current_mode != mode: - Utils.pprint("GREEN","* adjusting permissions on %s to mode %o"%(path,mode)) - _chmod(path,mode) - current_mode = mode - - if Options.is_install: - ctx.add_post_fun(lambda ctx: f(ctx,path,owner,group,mode)) -Build.BuildContext.setownership = _setownership - def set_options(opt): """Register command line options""" opt.tool_options('gnu_dirs') opt.tool_options('tar',tooldir='tools/waf') opt.tool_options('mkisofs',tooldir='tools/waf') + opt.tool_options('usermgmt',tooldir='tools/waf') if platform.system() not in ['Windows',"Darwin"]: opt.tool_options('compiler_cc') opt.tool_options('python') opt.tool_options('tomcat',tooldir='tools/waf') @@ -439,11 +395,6 @@ def set_options(opt): default = False, dest = 'NODEPCHECK') inst_dir = opt.get_option_group('--force') # get the group that contains the force - inst_dir.add_option('--nochown', - action='store_true', - help = 'skip chown and chmod upon install (skipped on Windows or by non-root users by default)', - default = False, - dest = 'NOCHOWN') inst_dir.add_option('--preserve-config', action='store_true', help = 'do not install configuration files', diff --git a/wscript_build b/wscript_build index d6da48c6934..39aa0c7ec67 100644 --- a/wscript_build +++ b/wscript_build @@ -287,6 +287,8 @@ if buildpremium: # =================== Empty directory / symlink creation on install target ==================== +bld.createuser(bld.env.MSUSER,bld.env.MSENVIRON,'/bin/sh') + # 7. make log and cache dirs (this actually runs first) if bld.env.DISTRO in 'Windows Mac': pass else: diff --git a/wscript_configure b/wscript_configure index 07eaffceddb..068abb776b9 100644 --- a/wscript_configure +++ b/wscript_configure @@ -112,6 +112,7 @@ conf.check_message_2("%s"%conf.env.PREFIX,"GREEN") conf.check_tool('misc') conf.check_tool("gnu_dirs") conf.check_tool('tar') +conf.check_tool('usermgmt') try: conf.check_tool('mkisofs') except Configure.ConfigurationError,e: raise Configure.ConfigurationError, "The program genisoimage (or mkisofs) could not be found.\nOn Linux: ./waf installrpmdeps or ./waf installdebdeps according to your distro's package format.\nOn Windows: Use cygwin to install the mkisofs package, then ensure that the program is in your PATH." From 6fb45ce3c66334c1fc071b41bfc04af445ef30ba Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Fri, 3 Sep 2010 17:03:47 -0700 Subject: [PATCH 071/145] Move SSH key pair generation into the management server, make it on-demand rather than upon package installation. --- cloud.spec | 1 - debian/cloud-client.postinst | 2 -- .../cloud/server/ConfigurationServerImpl.java | 18 +++++++++++------- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cloud.spec b/cloud.spec index ae49260915f..fb147f8b5e6 100644 --- a/cloud.spec +++ b/cloud.spec @@ -373,7 +373,6 @@ if [ "$1" == "1" ] ; then /sbin/chkconfig --add %{name}-management > /dev/null 2>&1 || true /sbin/chkconfig --level 345 %{name}-management on > /dev/null 2>&1 || true fi -test -f %{_sharedstatedir}/%{name}/management/.ssh/id_rsa || su - %{name} -c 'yes "" 2>/dev/null | ssh-keygen -t rsa -q -N ""' < /dev/null diff --git a/debian/cloud-client.postinst b/debian/cloud-client.postinst index ce3ebc3da6d..af731f19be7 100644 --- a/debian/cloud-client.postinst +++ b/debian/cloud-client.postinst @@ -17,8 +17,6 @@ case "$1" in chgrp cloud $i done - test -f /var/lib/cloud/management/.ssh/id_rsa || su - cloud -c 'yes "" | ssh-keygen -t rsa -q -N ""' < /dev/null - for i in /etc/cloud/management/db.properties do chmod 0640 $i diff --git a/server/src/com/cloud/server/ConfigurationServerImpl.java b/server/src/com/cloud/server/ConfigurationServerImpl.java index 8a16cc25108..63ff6a811df 100644 --- a/server/src/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/com/cloud/server/ConfigurationServerImpl.java @@ -420,10 +420,12 @@ public class ConfigurationServerImpl implements ConfigurationServer { String homeDir = Script.runSimpleBashScript("echo ~"); if (homeDir == "~") { - s_logger.warn("No home directory was detected. Trouble with SSH keys ahead."); - return; + s_logger.error("No home directory was detected. Set the HOME environment variable to point to your user profile or home directory."); + throw new RuntimeException("No home directory was detected. Set the HOME environment variable to point to your user profile or home directory."); } + String keygenOutput = Script.runSimpleBashScript("if [ -f ~/.ssh/id_rsa ] ; then true ; else yes '' | ssh-keygen -t rsa -q -O no-pty ; fi"); + File privkeyfile = new File(homeDir + "/.ssh/id_rsa"); File pubkeyfile = new File(homeDir + "/.ssh/id_rsa.pub"); byte[] arr1 = new byte[4094]; // configuration table column value size @@ -431,8 +433,8 @@ public class ConfigurationServerImpl implements ConfigurationServer { new DataInputStream(new FileInputStream(privkeyfile)).readFully(arr1); } catch (EOFException e) { } catch (Exception e) { - s_logger.warn("Cannot read the private key file",e); - return; + s_logger.error("Cannot read the private key file",e); + throw new RuntimeException("Cannot read the private key file"); } String privateKey = new String(arr1).trim(); byte[] arr2 = new byte[4094]; // configuration table column value size @@ -441,7 +443,7 @@ public class ConfigurationServerImpl implements ConfigurationServer { } catch (EOFException e) { } catch (Exception e) { s_logger.warn("Cannot read the public key file",e); - return; + throw new RuntimeException("Cannot read the public key file"); } String publicKey = new String(arr2).trim(); @@ -458,7 +460,8 @@ public class ConfigurationServerImpl implements ConfigurationServer { s_logger.debug("Private key inserted into database"); } } catch (SQLException ex) { - s_logger.warn("SQL of the private key failed",ex); + s_logger.error("SQL of the private key failed",ex); + throw new RuntimeException("SQL of the private key failed"); } try { @@ -468,7 +471,8 @@ public class ConfigurationServerImpl implements ConfigurationServer { s_logger.debug("Public key inserted into database"); } } catch (SQLException ex) { - s_logger.warn("SQL of the public key failed",ex); + s_logger.error("SQL of the public key failed",ex); + throw new RuntimeException("SQL of the public key failed"); } } } From 2886a6dd3a8d364ad1cdfefc1d959cf7e54299f8 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Fri, 3 Sep 2010 17:08:27 -0700 Subject: [PATCH 072/145] Remove obsolete documentation files --- HACKING | 652 --------------------------------------------------- INSTALL | 155 ------------ README | 52 ---- cloud.spec | 86 ------- debian/rules | 2 +- 5 files changed, 1 insertion(+), 946 deletions(-) delete mode 100644 HACKING delete mode 100644 INSTALL delete mode 100644 README diff --git a/HACKING b/HACKING deleted file mode 100644 index b6a16c3ef5e..00000000000 --- a/HACKING +++ /dev/null @@ -1,652 +0,0 @@ ---------------------------------------------------------------------- -THE QUICK GUIDE TO CLOUDSTACK DEVELOPMENT ---------------------------------------------------------------------- - - -=== Overview of the development lifecycle === - -To hack on a CloudStack component, you will generally: - -1. Configure the source code: - ./waf configure --prefix=/home/youruser/cloudstack - (see below, "./waf configure") - -2. Build and install the CloudStack - ./waf install - (see below, "./waf install") - -3. Set the CloudStack component up - (see below, "Running the CloudStack components from source") - -4. Run the CloudStack component - (see below, "Running the CloudStack components from source") - -5. Modify the source code - -6. Build and install the CloudStack again - ./waf install --preserve-config - (see below, "./waf install") - -7. GOTO 4 - - -=== What is this waf thing in my development lifecycle? === - -waf is a self-contained, advanced build system written by Thomas Nagy, -in the spirit of SCons or the GNU autotools suite. - -* To run waf on Linux / Mac: ./waf [...commands...] -* To run waf on Windows: waf.bat [...commands...] - -./waf --help should be your first discovery point to find out both the -configure-time options and the different processes that you can run -using waf. - - -=== What do the different waf commands above do? === - -1. ./waf configure --prefix=/some/path - - You run this command *once*, in preparation to building, or every - time you need to change a configure-time variable. - - This runs configure() in wscript, which takes care of setting the - variables and options that waf will use for compilation and - installation, including the installation directory (PREFIX). - - For convenience reasons, if you forget to run configure, waf - will proceed with some default configuration options. By - default, PREFIX is /usr/local, but you can set it e.g. to - /home/youruser/cloudstack if you plan to do a non-root - install. Be ware that you can later install the stack as a - regular user, but most components need to *run* as root. - - ./waf showconfig displays the values of the configure-time options - -2. ./waf - - You run this command to trigger compilation of the modified files. - - This runs the contents of wscript_build, which takes care of - discovering and describing what needs to be built, which - build products / sources need to be installed, and where. - -3. ./waf install - - You run this command when you want to install the CloudStack. - - If you are going to install for production, you should run this - process as root. If, conversely, you only want to install the - stack as your own user and in a directory that you have write - permission, it's fine to run waf install as your own user. - - This runs the contents of wscript_build, with an option variable - Options.is_install = True. When this variable is set, waf will - install the files described in wscript_build. For convenience - reasons, when you run install, any files that need to be recompiled - will also be recompiled prior to installation. - - -------------------- - - WARNING: each time you do ./waf install, the configuration files - in the installation directory are *overwritten*. - - There are, however, two ways to get around this: - - a) ./waf install has an option --preserve-config. If you pass - this option when installing, configuration files are never - overwritten. - - This option is useful when you have modified source files and - you need to deploy them on a system that already has the - CloudStack installed and configured, but you do *not* want to - overwrite the existing configuration of the CloudStack. - - If, however, you have reconfigured and rebuilt the source - since the last time you did ./waf install, then you are - advised to replace the configuration files and set the - components up again, because some configuration files - in the source use identifiers that may have changed during - the last ./waf configure. So, if this is your case, check - out the next way: - - b) Every configuration file can be overridden in the source - without touching the original. - - - Look for said config file X (or X.in) in the source, then - - create an override/ folder in the folder that contains X, then - - place a file named X (or X.in) inside override/, then - - put the desired contents inside X (or X.in) - - Now, every time you run ./waf install, the file that will be - installed is path/to/override/X.in, instead of /path/to/X.in. - - This option is useful if you are developing the CloudStack - and constantly reinstalling it. It guarantees that every - time you install the CloudStack, the installation will have - the correct configuration and will be ready to run. - - -=== Running the CloudStack components from source (for debugging / coding) === - -It is not technically possible to run the CloudStack components from -the source. That, however, is fine -- each component can be run -independently from the install directory: - -- Management Server - - 1) Execute ./waf install as your current user (or as root if the - installation path is only writable by root). - - WARNING: if any CloudStack configuration files have been - already configured / altered, they will be *overwritten* by this - process. Append --preserve-config to ./waf install to prevent this - from happening. Or resort to the override method discussed - above (search for "override" in this document). - - 2) If you haven't done so yet, set up the management server database: - - - either run ./waf deploydb_kvm, or - - run $BINDIR/cloud-setup-databases - - 3) Execute ./waf run as your current user (or as root if the - installation path is only writable by root). Alternatively, - you can use ./waf debug and this will run with debugging enabled. - - -- Agent (Linux-only): - - 1) Execute ./waf install as your current user (or as root if the - installation path is only writable by root). - - WARNING: if any CloudStack configuration files have been - already configured / altered, they will be *overwritten* by this - process. Append --preserve-config to ./waf install to prevent this - from happening. Or resort to the override method discussed - above (search for "override" in this document). - - 2) If you haven't done so yet, set the Agent up: - - - run $BINDIR/cloud-setup-agent - - 3) Execute ./waf run_agent as root - - this will launch sudo and require your root password unless you have - set sudo up not to ask for it - - -- Console Proxy (Linux-only): - - 1) Execute ./waf install as your current user (or as root if the - installation path is only writable by root). - - WARNING: if any CloudStack configuration files have been - already configured / altered, they will be *overwritten* by this - process. Append --preserve-config to ./waf install to prevent this - from happening. Or resort to the override method discussed - above (search for "override" in this document). - - 2) If you haven't done so yet, set the Console Proxy up: - - - run $BINDIR/cloud-setup-console-proxy - - 3) Execute ./waf run_console_proxy - - this will launch sudo and require your root password unless you have - set sudo up not to ask for it - - ---------------------------------------------------------------------- -BUILD SYSTEM TIPS ---------------------------------------------------------------------- - - -=== Integrating compilation and execution of each component into Eclipse === - -To run the Management Server from Eclipse, set up an External Tool of the -Program variety. Put the path to the waf binary in the Location of the -window, and the source directory as Working Directory. Then specify -"install --preserve-config run" as arguments (without the quotes). You can -now use the Run button in Eclipse to execute the Management Server directly -from Eclipse. You can replace run with debug if you want to run the -Management Server with the Debugging Proxy turned on. - -To run the Agent or Console Proxy from Eclipse, set up an External Tool of -the Program variety just like in the Management Server case. In there, -however, specify "install --preserve-config run_agent" or -"install --preserve-config run_console_proxy" as arguments instead. -Remember that you need to set sudo up to not ask you for a password and not -require a TTY, otherwise sudo -- implicitly called by waf run_agent or -waf run_console_proxy -- will refuse to work. - - -=== Building targets selectively === - -You can find out the targets of the build system: - -./waf list_targets - -If you want to run a specific task generator, - -./waf build --targets=patchsubst - -should run just that one (and whatever targets are required to build that -one, of course). - - -=== Common targets === - -* ./waf configure: you must always run configure once, and provide it with - the target installation paths for when you run install later - o --help: will show you all the configure options - o --no-dep-check: will skip dependency checks for java packages - needed to compile (saves 20 seconds when redoing the configure) - o --with-db-user, --with-db-pw, --with-db-host: informs the build - system of the MySQL configuration needed to set up the management - server upon install, and to do deploydb - -* ./waf build: will compile any source files (and, on some projects, will - also perform any variable substitutions on any .in files such as the - MANIFEST files). Build outputs will be in /artifacts/default. - -* ./waf install: will compile if not compiled yet, then execute an install - of the built targets. I had to write a significantly large amount of code - (that is, couple tens of lines of code) to make install work. - -* ./waf run: will run the management server in the foreground - -* ./waf debug: will run the management server in the foreground, and open - port 8787 to connect with the debugger (see the Run / debug options of - waf --help to change that port) - -* ./waf deploydb: deploys the database using the MySQL configuration supplied - with the configuration options when you did ./waf configure. RUN WAF BUILD - FIRST AT LEAST ONCE. - -* ./waf dist: create a source tarball. These tarballs will be distributed - independently on our Web site, and will form the source release of the - Cloud Stack. It is a self-contained release that can be ./waf built and - ./waf installed everywhere. - -* ./waf clean: remove known build products - -* ./waf distclean: remove the artifacts/ directory altogether - -* ./waf uninstall: uninstall all installed files - -* ./waf rpm: build RPM packages - o if the build fails because the system lacks dependencies from our - other modules, waf will attempt to install RPMs from the repos, - then try the build - o it will place the built packages in artifacts/rpmbuild/ - -* ./waf deb: build Debian packages - o if the build fails because the system lacks dependencies from our - other modules, waf will attempt to install DEBs from the repos, - then try the build - o it will place the built packages in artifacts/debbuild/ - -* ./waf uninstallrpms: removes all Cloud.com RPMs from a system (but not - logfiles or modified config files) - -* ./waf viewrpmdeps: displays RPM dependencies declared in the RPM specfile - -* ./waf installrpmdeps: runs Yum to install the packages required to build - the CloudStack - -* ./waf uninstalldebs: removes all Cloud.com DEBs from a system (AND logfiles - AND modified config files) -* ./waf viewdebdeps: displays DEB dependencies declared in the project - debian/control file - -* ./waf installdebdeps: runs aptitude to install the packages required to - build our software - - -=== Overriding certain source files === - -Earlier in this document we explored overriding configuration files. -Overrides are not limited to configuration files. - -If you want to provide your own server-setup.xml or SQL files in client/setup: - - * create a directory override inside the client/setup folder - * place your file that should override a file in client/setup there - -There's also override support in client/tomcatconf and agent/conf. - - -=== Environment substitutions === - -Any file named "something.in" has its tokens (@SOMETOKEN@) automatically -substituted for the corresponding build environment variable. The build -environment variables are generally constructed at configure time and -controllable by the --command-line-parameters to waf configure, and should -be available as a list of variables inside the file -artifacts/c4che/build.default.py. - - -=== The prerelease mechanism === - -The prerelease mechanism (--prerelease=BRANCHNAME) allows developers and -builders to build packages with pre-release Release tags. The Release tags -are constructed in such a way that both the build number and the branch name -is included, so developers can push these packages to repositories and upgrade -them using yum or aptitude without having to delete packages manually and -install packages manually every time a new build is done. Any package built -with the prerelease mechanism gets a standard X.Y.Z version number -- and, -due to the way that the prerelease Release tags are concocted, always upgrades -any older prerelease package already present on any system. The prerelease -mechanism must never be used to create packages that are intended to be -released as stable software to the general public. - -Relevant documentation: - - http://www.debian.org/doc/debian-policy/ch-controlfields.html#s-f-Version - http://fedoraproject.org/wiki/PackageNamingGuidelines#Pre-Release_packages - -Everything comes together on the build server in the following way: - - -=== SCCS info === - -When building a source distribution (waf dist), or RPM/DEB distributions -(waf deb / waf rpm), waf will automatically detect the relevant source code -control information if the git command is present on the machine where waf -is run, and it will write the information to a file called sccs-info inside -the source tarball / install it into /usr/share/doc/cloud*/sccs-info when -installing the packages. - -If this source code conrol information cannot be calculated, then the old -sccs-info file is preserved across dist runs if it exists, and if it did -not exist before, the fact that the source could not be properly tracked -down to a repository is noted in the file. - - -=== Debugging the build system === - -Almost all targets have names. waf build -vvvvv --zones=task will give you -the task names that you can use in --targets. - - ---------------------------------------------------------------------- -UNDERSTANDING THE BUILD SYSTEM ---------------------------------------------------------------------- - - -=== Documentation for the build system === - -The first and foremost reference material: - -- http://freehackers.org/~tnagy/wafbook/index.html - -Examples - -- http://code.google.com/p/waf/wiki/CodeSnippets -- http://code.google.com/p/waf/w/list - -FAQ - -- http://code.google.com/p/waf/wiki/FAQ - - -=== Why waf === - -The CloudStack uses waf to build itself. waf is a relative newcomer -to the build system world; it borrows concepts from SCons and -other later-generation build systems: - -- waf is very flexible and rich; unlike other build systems, it covers - the entire life cycle, from compilation to installation to - uninstallation. it also supports dist (create source tarball), - distcheck (check that the source tarball compiles and installs), - autoconf-like checks for dependencies at compilation time, - and more. - -- waf is self-contained. A single file, distributed with the project, - enables everything to be built, with only a dependency on Python, - which is freely available and shipped in all Linux computers. - -- waf also supports building projects written in multiple languages - (in the case of the CloudStack, we build from C, Java and Python). - -- since waf is written in Python, the entire library of the Python - language is available to use in the build process. - - -=== Hacking on the build system: what are these wscript files? === - -1. wscript: contains most commands you can run from within waf -2. wscript_configure: contains the process that discovers the software - on the system and configures the build to fit that -2. wscript_build: contains a manifest of *what* is built and installed - -Refer to the waf book for general information on waf: - http://freehackers.org/~tnagy/wafbook/index.html - - -=== What happens when waf runs === - -When you run waf, this happens behind the scenes: - -- When you run waf for the first time, it unpacks itself to a hidden - directory .waf-1.X.Y.MD5SUM, including the main program and all - the Python libraries it provides and needs. - -- Immediately after unpacking itself, waf reads the wscript file - at the root of the source directory. After parsing this file and - loading the functions defined here, it reads wscript_build and - generates a function build() based on it. - -- After loading the build scripts as explained above, waf calls - the functions you specified in the command line. - -So, for example, ./waf configure build install will: - -* call configure() from wscript, -* call build() loaded from the contents of wscript_build, -* call build() once more but with Options.is_install = True. - -As part of build(), waf invokes ant to build the Java portion of our -stack. - - -=== How and why we use ant within waf === - -By now, you have probably noticed that we do, indeed, ship ant -build files in the CloudStack. During the build process, waf calls -ant directly to build the Java portions of our stack, and it uses -the resulting JAR files to perform the installation. - -The reason we do this rather than use the native waf capabilities -for building Java projects is simple: by using ant, we can leverage -the support built-in for ant in Eclipse and many other IDEs. Another -reason to do this is because Java developers are familiar with ant, -so adding a new JAR file or modifying what gets built into the -existing JAR files is facilitated for Java developers. - -If you add to the ant build files a new ant target that uses the -compile-java macro, waf will automatically pick it up, along with its -depends= and JAR name attributes. In general, all you need to do is -add the produced JAR name to the packaging manifests (cloud.spec and -debian/{name-of-package}.install). - - ---------------------------------------------------------------------- -FOR ANT USERS ---------------------------------------------------------------------- - - -If you are using Ant directly instead of using waf, these instructions apply to you: - -in this document, the example instructions are based on local source repository rooted at c:\root. You are free to locate it to anywhere you'd like to. -3.1 Setup developer build type - - 1) Go to c:\cloud\java\build directory - - 2) Copy file build-cloud.properties.template to file build-cloud.properties, then modify some of the parameters to match your local setup. The template properties file should have content as - - debug=true - debuglevel=lines,vars,source - tomcat.home=$TOMCAT_HOME --> change to your local Tomcat root directory such as c:/apache-tomcat-6.0.18 - debug.jvmarg=-Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n - deprecation=off - build.type=developer - target.compat.version=1.5 - source.compat.version=1.5 - branding.name=default - - 3) Make sure the following Environment variables and Path are set: - -set enviroment variables: -CATALINA_HOME: -JAVA_HOME: -CLOUD_HOME: -MYSQL_HOME: - -update the path to include - -MYSQL_HOME\bin - - 4) Clone a full directory tree of C:\cloud\java\build\deploy\production to C:\cloud\java\build\deploy\developer - - You can use Windows Explorer to copy the directory tree over. Please note, during your daily development process, whenever you see updates in C:\cloud\java\build\deploy\production, be sure to sync it into C:\cloud\java\build\deploy\developer. -3.2 Common build instructions - -After you have setup the build type, you are ready to perform build and run Management Server alone locally. - -cd java -python waf configure build install - -More at Build system. - -Will install the management server and its requisites to the appropriate place (your Tomcat instance on Windows, /usr/local on Linux). It will also install the agent to /usr/local/cloud/agent (this will change in the future). -4. Database and Server deployment - -After a successful management server build (database deployment scripts use some of the artifacts from build process), you can use database deployment script to deploy and initialize the database. You can find the deployment scripts in C:/cloud/java/build/deploy/db. deploy-db.sh is used to create, populate your DB instance. Please take a look at content of deploy-db.sh for more details - -Before you run the scripts, you should edit C:/cloud/java/build/deploy/developer/db/server-setup-dev.xml to allocate Public and Private IP ranges for your development setup. Ensure that the ranges you pick are unallocated to others. - -Customized VM templates to be populated are in C:/cloud/java/build/deploy/developer/db/templates-dev.sql Edit this file to customize the templates to your needs. - -Deploy the DB by running - -./deploy-db.sh ../developer/db/server-setup-dev.xml ../developer/db/templates-dev.xml -4.1. Management Server Deployment - -ant build-server - -Build Management Server - -ant deploy-server - -Deploy Management Server software to Tomcat environment - -ant debug - -Start Management Server in debug mode. The JVM debug options can be found in cloud-build.properties - -ant run - -Start Management Server in normal mode. - -5. Agent deployment - -After a successful build process, you should be able to find build artifacts at distribution directory, in this example case, for developer build type, the artifacts locate at c:\cloud\java\dist\developer, particularly, if you have run - -ant package-agent build command, you should see the agent software be packaged in a single file named agent.zip under c:\cloud\java\dist\developer, together with the agent deployment script deploy-agent.sh. -5.1 Agent Type - -Agent software can be deployed and configured to serve with different roles at run time. In current implementation, there are 3 types of agent configuration, respectively called as Computing Server, Routing Server and Storage Server. - - * When agent software is configured to run as Computing server, it is responsible to host user VMs. Agent software should be running in Xen Dom0 system on computer server machine. - - * When agent software is configured to run as Routing Server, it is responsible to host routing VMs for user virtual network and console proxy system VMs. Routing server serves as the bridge to outside network, the machine that agent software is running should have at least two network interfaces, one towards outside network, one participates the internal VMOps management network. Like computer server, agent software on routing server should also be running in Xen Dom0 system. - - * When agent software is configured to run as Storage server, it is responsible to provide storage service for all VMs. The storage service is based on ZFS running on a Solaris system, agent software on storage server is therefore running under Solaris (actually a Solaris VM), Dom0 systems on computing server and routing server can access the storage service through iScsi initiator. The storage volume will be eventually mounted on Dom0 system and make available to DomU VMs through our agent software. - -5.2 Resource sharing - -All developers can share the same set of agent server machines for development, to make this possible, the concept of instance appears in various places - - * VM names. VM names are structual names, it contains a instance section that can identify VMs from different VMOps cloud instances. VMOps cloud instance name is configured in server configuration parameter AgentManager/instance.name - * iScsi initiator mount point. For Computing servers and Routing servers, the mount point can distinguish the mounted DomU VM images from different agent deployments. The mount location can be specified in agent.properties file with a name-value pair named mount.parent - * iScsi target allocation point. For storage servers, this allocation point can distinguish the storage allocation from different storage agent deployments. The allocation point can be specified in agent.properties file with a name-value pair named parent - -5.4 Deploy agent software - -Before running the deployment scripts, first copy the build artifacts agent.zip and deploy-agent.sh to your personal development directory on agent server machines. By our current convention, you can create your personal development directory that usually locates at /root/your name. In following example, the agent package and deployment scripts are copied to test0.lab.vmops.com and the deployment script file has been marked as executible. - - On build machine, - - scp agent.zip root@test0:/root/your name - - scp deploy-agent.sh root@test0:/root/your name - - On agent server machine - -chmod +x deploy-agent.sh -5.4.1 Deploy agent on computing server - -deploy-agent.sh -d /root//agent -h -t computing -m expert -5.4.2 Deploy agent on routing server - -deploy-agent.sh -d /root//agent -h -t routing -m expert -5.4.3 Deploy agent on storage server - -deploy-agent.sh -d /root//agent -h -t storage -m expert -5.5 Configure agent - -After you have deployed the agent software, you should configure the agent by editing the agent.properties file under /root//agent/conf directory on each of the Routing, Computing and Storage servers. Add/Edit following properties. The rest are defaults that get populated by the agent at runtime. - workers=3 - host= - port=8250 - pod= - zone= - instance= - developer=true - -Following is a sample agent.properties file for Routing server - - workers=3 - id=1 - port=8250 - pod=RC - storage=comstar - zone=RC - type=routing - private.network.nic=xenbr0 - instance=RC - public.network.nic=xenbr1 - developer=true - host=192.168.1.138 -5.5 Running agent - -Edit /root//agent/conf/log4j-cloud.xml to update the location of logs to somewhere under /root/ - -Once you have deployed and configured the agent software, you are ready to launch it. Under the agent root directory (in our example, /root//agent. there is a scrip file named run.sh, you can use it to launch the agent. - -Launch agent in detached background process - -nohup ./run.sh & - -Launch agent in interactive mode - -./run.sh - -Launch agent in debug mode, for example, following command makes JVM listen at TCP port 8787 - -./run.sh -Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n - -If agent is launched in debug mode, you may use Eclipse IDE to remotely debug it, please note, when you are sharing agent server machine with others, choose a TCP port that is not in use by someone else. - -Please also note that, run.sh also searches for /etc/cloud directory for agent.properties, make sure it uses the correct agent.properties file! -5.5. Stopping the Agents - -the pid of the agent process is in /var/run/agent..pid - -To Stop the agent: - -kill - - \ No newline at end of file diff --git a/INSTALL b/INSTALL deleted file mode 100644 index bcf10e20b23..00000000000 --- a/INSTALL +++ /dev/null @@ -1,155 +0,0 @@ ---------------------------------------------------------------------- -TABLE OF CONTENTS ---------------------------------------------------------------------- - - -1. Really quick start: building and installing a production stack -2. Post-install: setting the CloudStack components up -3. Installation paths: where the stack is installed on your system -4. Uninstalling the CloudStack from your system - - ---------------------------------------------------------------------- -REALLY QUICK START: BUILDING AND INSTALLING A PRODUCTION STACK ---------------------------------------------------------------------- - - -You have two options. Choose one: - -a) Building distribution packages from the source and installing them -b) Building from the source and installing directly from there - - -=== I want to build and install distribution packages === - -This is the recommended way to run your CloudStack cloud. The -advantages are that dependencies are taken care of automatically -for you, and you can verify the integrity of the installed files -using your system's package manager. - -1. As root, install the build dependencies. - - a) Fedora / CentOS: ./waf installrpmdeps - - b) Ubuntu: ./waf installdebdeps - -2. As a non-root user, build the CloudStack packages. - - a) Fedora / CentOS: ./waf rpm - - b) Ubuntu: ./waf deb - -3. As root, install the CloudStack packages. - You can choose which components to install on your system. - - a) Fedora / CentOS: the installable RPMs are in artifacts/rpmbuild - install as root: rpm -ivh artifacts/rpmbuild/RPMS/{x86_64,noarch,i386}/*.rpm - - b) Ubuntu: the installable DEBs are in artifacts/debbuild - install as root: dpkg -i artifacts/debbuild/*.deb - -4. Configure and start the components you intend to run. - Consult the Installation Guide to find out how to - configure each component, and "Installation paths" for information - on where programs, initscripts and config files are installed. - - -=== I want to build and install directly from the source === - -This is the recommended way to run your CloudStack cloud if you -intend to modify the source, if you intend to port the CloudStack to -another distribution, or if you intend to run the CloudStack on a -distribution for which packages are not built. - -1. As root, install the build dependencies. - See below for a list. - -2. As non-root, configure the build. - See below to discover configuration options. - - ./waf configure - -3. As non-root, build the CloudStack. - To learn more, see "Quick guide to developing, building and - installing from source" below. - - ./waf build - -4. As root, install the runtime dependencies. - See below for a list. - -5. As root, Install the CloudStack - - ./waf install - -6. Configure and start the components you intend to run. - Consult the Installation Guide to find out how to - configure each component, and "Installation paths" for information - on where to find programs, initscripts and config files mentioned - in the Installation Guide (paths may vary). - - -=== Dependencies of the CloudStack === - -- Build dependencies: - - 1. FIXME DEPENDENCIES LIST THEM HERE - -- Runtime dependencies: - - 2. FIXME DEPENDENCIES LIST THEM HERE - - ---------------------------------------------------------------------- -INSTALLATION PATHS: WHERE THE STACK IS INSTALLED ON YOUR SYSTEM ---------------------------------------------------------------------- - - -The CloudStack build system installs files on a variety of paths, each -one of which is selectable when building from source. - -- $PREFIX: - the default prefix where the entire stack is installed - defaults to /usr/local on source builds - defaults to /usr on package builds - -- $SYSCONFDIR/cloud: - - the prefix for CloudStack configuration files - defaults to $PREFIX/etc/cloud on source builds - defaults to /etc/cloud on package builds - -- $SYSCONFDIR/init.d: - the prefix for CloudStack initscripts - defaults to $PREFIX/etc/init.d on source builds - defaults to /etc/init.d on package builds - -- $BINDIR: - the CloudStack installs programs there - defaults to $PREFIX/bin on source builds - defaults to /usr/bin on package builds - -- $LIBEXECDIR: - the CloudStack installs service runners there - defaults to $PREFIX/libexec on source builds - defaults to /usr/libexec on package builds (/usr/bin on Ubuntu) - - ---------------------------------------------------------------------- -UNINSTALLING THE CLOUDSTACK FROM YOUR SYSTEM ---------------------------------------------------------------------- - - -- If you installed the CloudStack using packages, use your operating - system package manager to remove the CloudStack packages. - - a) Fedora / CentOS: the installable RPMs are in artifacts/rpmbuild - as root: rpm -qa | grep ^cloud- | xargs rpm -e - - b) Ubuntu: the installable DEBs are in artifacts/debbuild - aptitude purge '~ncloud' - -- If you installed from a source tree: - - ./waf uninstall - diff --git a/README b/README deleted file mode 100644 index b0478ff475f..00000000000 --- a/README +++ /dev/null @@ -1,52 +0,0 @@ -Hello, and thanks for downloading the Cloud.com CloudStack™! The -Cloud.com CloudStack™ is Open Source Software that allows -organizations to build Infrastructure as a Service (Iaas) clouds. -Working with server, storage, and networking equipment of your -choice, the CloudStack provides a turn-key software stack that -dramatically simplifies the process of deploying and managing a -cloud. - - ---------------------------------------------------------------------- -HOW TO INSTALL THE CLOUDSTACK ---------------------------------------------------------------------- - - -Please refer to the document INSTALL distributed with the source. - - ---------------------------------------------------------------------- -HOW TO HACK ON THE CLOUDSTACK ---------------------------------------------------------------------- - - -Please refer to the document HACKING distributed with the source. - - ---------------------------------------------------------------------- -BE PART OF THE CLOUD.COM COMMUNITY! ---------------------------------------------------------------------- - - -We are more than happy to have you ask us questions, hack our source -code, and receive your contributions. - -* Our forums are available at http://cloud.com/community . -* If you would like to modify / extend / hack on the CloudStack source, - refer to the file HACKING for more information. -* If you find bugs, please log on to http://bugs.cloud.com/ and file - a report. -* If you have patches to send us get in touch with us at info@cloud.com - or file them as attachments in our bug tracker above. - - ---------------------------------------------------------------------- -Cloud.com's contact information is: - -20400 Stevens Creek Blvd -Suite 390 -Cupertino, CA 95014 -Tel: +1 (888) 384-0962 - -This software is OSI certified Open Source Software. OSI Certified is a -certification mark of the Open Source Initiative. diff --git a/cloud.spec b/cloud.spec index fb147f8b5e6..4b85ee7bde3 100644 --- a/cloud.spec +++ b/cloud.spec @@ -456,30 +456,17 @@ fi %doc %{_docdir}/%{name}-%{version}/sccs-info %doc %{_docdir}/%{name}-%{version}/version-info %doc %{_docdir}/%{name}-%{version}/configure-info -%doc README -%doc INSTALL -%doc HACKING %doc README.html %doc debian/copyright %files client-ui %defattr(0644,root,root,0755) %{_datadir}/%{name}/management/webapps/client/* -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files server %defattr(0644,root,root,0755) %{_javadir}/%{name}-server.jar %{_sysconfdir}/%{name}/server/* -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files agent-scripts %defattr(-,root,root,-) @@ -497,20 +484,10 @@ fi %endif %{_libdir}/%{name}/agent/vms/systemvm.zip %{_libdir}/%{name}/agent/vms/systemvm.iso -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files daemonize %defattr(-,root,root,-) %attr(755,root,root) %{_bindir}/%{name}-daemonize -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files deps %defattr(0644,root,root,0755) @@ -531,39 +508,20 @@ fi %{_javadir}/%{name}-xenserver-5.5.0-1.jar %{_javadir}/%{name}-xmlrpc-common-3.*.jar %{_javadir}/%{name}-xmlrpc-client-3.*.jar -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files core %defattr(0644,root,root,0755) %{_javadir}/%{name}-core.jar -%doc README -%doc INSTALL -%doc HACKING -%doc debian/copyright %files vnet %defattr(0644,root,root,0755) %attr(0755,root,root) %{_sbindir}/%{name}-vnetd %attr(0755,root,root) %{_sbindir}/%{name}-vn %attr(0755,root,root) %{_initrddir}/%{name}-vnetd -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files python %defattr(0644,root,root,0755) %{_prefix}/lib*/python*/site-packages/%{name}* -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files setup %attr(0755,root,root) %{_bindir}/%{name}-setup-databases @@ -584,11 +542,6 @@ fi %{_datadir}/%{name}/setup/schema-level.sql %{_datadir}/%{name}/setup/schema-21to22.sql %{_datadir}/%{name}/setup/data-21to22.sql -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files client %defattr(0644,root,root,0755) @@ -628,19 +581,10 @@ fi %dir %attr(770,root,%{name}) %{_localstatedir}/cache/%{name}/management/temp %dir %attr(770,root,%{name}) %{_localstatedir}/log/%{name}/management %dir %attr(770,root,%{name}) %{_localstatedir}/log/%{name}/agent -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files agent-libs %defattr(0644,root,root,0755) %{_javadir}/%{name}-agent.jar -%doc README -%doc INSTALL -%doc HACKING -%doc debian/copyright %files agent %defattr(0644,root,root,0755) @@ -656,11 +600,6 @@ fi %{_libdir}/%{name}/agent/images %attr(0755,root,root) %{_bindir}/%{name}-setup-agent %dir %attr(770,root,root) %{_localstatedir}/log/%{name}/agent -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files console-proxy %defattr(0644,root,root,0755) @@ -673,11 +612,6 @@ fi %{_libdir}/%{name}/console-proxy/* %attr(0755,root,root) %{_bindir}/%{name}-setup-console-proxy %dir %attr(770,root,root) %{_localstatedir}/log/%{name}/console-proxy -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %if %{_premium} @@ -688,20 +622,10 @@ fi %{_sharedstatedir}/%{name}/test/* %{_libdir}/%{name}/test/* %{_sysconfdir}/%{name}/test/* -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files premium-deps %defattr(0644,root,root,0755) %{_javadir}/%{name}-premium/*.jar -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files premium %defattr(0644,root,root,0755) @@ -721,11 +645,6 @@ fi %{_libdir}/%{name}/agent/scripts/vm/hypervisor/xenserver/xenheartbeat.sh %{_libdir}/%{name}/agent/scripts/vm/hypervisor/xenserver/xenserver56/patch-premium %{_libdir}/%{name}/agent/scripts/vm/hypervisor/xenserver/xs_cleanup.sh -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %files usage %defattr(0644,root,root,0755) @@ -736,11 +655,6 @@ fi %{_sysconfdir}/%{name}/usage/usage-components.xml %config(noreplace) %{_sysconfdir}/%{name}/usage/log4j-%{name}_usage.xml %config(noreplace) %attr(640,root,%{name}) %{_sysconfdir}/%{name}/usage/db.properties -%doc README -%doc INSTALL -%doc HACKING -%doc README.html -%doc debian/copyright %endif diff --git a/debian/rules b/debian/rules index c99b62b85a7..4f0fa109a82 100755 --- a/debian/rules +++ b/debian/rules @@ -91,7 +91,7 @@ binary-common: dh_testdir dh_testroot dh_installchangelogs - dh_installdocs -A README INSTALL HACKING README.html + dh_installdocs -A README.html # dh_installexamples # dh_installmenu # dh_installdebconf From 61dce9f31a8117e2896da09cb12e1cd703b79ce9 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Fri, 3 Sep 2010 17:13:42 -0700 Subject: [PATCH 073/145] new UI - implement account detail panel. --- ui/new/index.jsp | 2 + ui/new/jsp/tab_account.jsp | 106 +++++++++++++++++++++++++++ ui/new/scripts/cloud.core.account.js | 12 +++ ui/new/scripts/cloud.core.init.js | 7 +- 4 files changed, 121 insertions(+), 6 deletions(-) create mode 100644 ui/new/jsp/tab_account.jsp create mode 100644 ui/new/scripts/cloud.core.account.js diff --git a/ui/new/index.jsp b/ui/new/index.jsp index 759b77129ce..29da536dc88 100644 --- a/ui/new/index.jsp +++ b/ui/new/index.jsp @@ -34,6 +34,8 @@ + + Cloud.com CloudStack diff --git a/ui/new/jsp/tab_account.jsp b/ui/new/jsp/tab_account.jsp new file mode 100644 index 00000000000..a4b8c117648 --- /dev/null +++ b/ui/new/jsp/tab_account.jsp @@ -0,0 +1,106 @@ + + +<%@ page import="java.util.*" %> +<%@ page import="com.cloud.utils.*" %> + +<% + + Locale browserLocale = request.getLocale(); + CloudResourceBundle t = CloudResourceBundle.getBundle("resources/resource", browserLocale); +%> + + +
+ +
+
+ <%=t.t("Details")%>
+
+
+
+
+
+ <%=t.t("Role")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Account")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Domain")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("VMs")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("IPs")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Bytes.Received")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Bytes.Sent")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("State")%>:
+
+
+
+
+
+
+
+
+ \ No newline at end of file diff --git a/ui/new/scripts/cloud.core.account.js b/ui/new/scripts/cloud.core.account.js new file mode 100644 index 00000000000..ed4475d3af1 --- /dev/null +++ b/ui/new/scripts/cloud.core.account.js @@ -0,0 +1,12 @@ +function loadAccountToRigntPanelFn($rightPanelContent) { + var jsonObj = $rightPanelContent.data("jsonObj"); + var $rightPanelContent = $("#right_panel_content"); + $rightPanelContent.find("#role").text(toRole(jsonObj.accounttype)); + $rightPanelContent.find("#account").text(jsonObj.name); + $rightPanelContent.find("#domain").text(jsonObj.domain); + $rightPanelContent.find("#vm_total").text(jsonObj.vmtotal); + $rightPanelContent.find("#ip_total").text(jsonObj.iptotal); + $rightPanelContent.find("#bytes_received").text(jsonObj.receivedbytes); + $rightPanelContent.find("#bytes_sent").text(jsonObj.sentbytes); + $rightPanelContent.find("#state").text(jsonObj.state); +} \ No newline at end of file diff --git a/ui/new/scripts/cloud.core.init.js b/ui/new/scripts/cloud.core.init.js index 2652cfd2ce7..8cc8478731e 100755 --- a/ui/new/scripts/cloud.core.init.js +++ b/ui/new/scripts/cloud.core.init.js @@ -60,12 +60,6 @@ $(document).ready(function() { $midmenuItem1.data("id", jsonObj.id); $midmenuItem1.data("jsonObj", jsonObj); $midmenuItem1.data("toRightPanelFn", toRightPanelFn); - -// $midmenuItem1.bind("click", function(event) { -// var $t = $(this); -// toRightPanelFn($t); -// return false; -// }); } var $midmenuItem = $("#midmenu_item"); @@ -98,6 +92,7 @@ $(document).ready(function() { } listMidMenuItems("leftmenu_event", "listEvents", "listeventsresponse", "event", "description", "jsp/tab_event.jsp", loadEventToRigntPanelFn); //listMidMenuItems("leftmenu_alert", "listAlerts", "listalertsresponse", "alert", "description", loadAlertToRightPanel); + listMidMenuItems("leftmenu_account", "listAccounts", "listaccountsresponse", "account", "name", "jsp/tab_account.jsp", loadAccountToRigntPanelFn); From 0364a6b47bee033c0ef847f9d617e57a84f9a082 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Fri, 3 Sep 2010 18:01:12 -0700 Subject: [PATCH 074/145] new UI - implement volume detail panel. --- ui/new/index.jsp | 2 + ui/new/jsp/tab_volume.jsp | 136 ++++++++++++++++++++++++++++ ui/new/scripts/cloud.core.init.js | 2 +- ui/new/scripts/cloud.core.volume.js | 23 +++++ 4 files changed, 162 insertions(+), 1 deletion(-) create mode 100644 ui/new/jsp/tab_volume.jsp create mode 100644 ui/new/scripts/cloud.core.volume.js diff --git a/ui/new/index.jsp b/ui/new/index.jsp index 29da536dc88..3008a154f28 100644 --- a/ui/new/index.jsp +++ b/ui/new/index.jsp @@ -35,6 +35,8 @@ + + Cloud.com CloudStack diff --git a/ui/new/jsp/tab_volume.jsp b/ui/new/jsp/tab_volume.jsp new file mode 100644 index 00000000000..4d0b9e84d36 --- /dev/null +++ b/ui/new/jsp/tab_volume.jsp @@ -0,0 +1,136 @@ + + +<%@ page import="java.util.*" %> +<%@ page import="com.cloud.utils.*" %> + +<% + + Locale browserLocale = request.getLocale(); + CloudResourceBundle t = CloudResourceBundle.getBundle("resources/resource", browserLocale); +%> + + +
+ +
+
+ <%=t.t("Details")%>
+
+
+
+
+
+ <%=t.t("ID")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Name")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Type")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Zone")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Instance.Name")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Device.ID")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Size")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("State")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Created")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Storage")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Account")%>:
+
+
+
+
+
+
+
+
+ \ No newline at end of file diff --git a/ui/new/scripts/cloud.core.init.js b/ui/new/scripts/cloud.core.init.js index 8cc8478731e..4a2398e6448 100755 --- a/ui/new/scripts/cloud.core.init.js +++ b/ui/new/scripts/cloud.core.init.js @@ -93,7 +93,7 @@ $(document).ready(function() { listMidMenuItems("leftmenu_event", "listEvents", "listeventsresponse", "event", "description", "jsp/tab_event.jsp", loadEventToRigntPanelFn); //listMidMenuItems("leftmenu_alert", "listAlerts", "listalertsresponse", "alert", "description", loadAlertToRightPanel); listMidMenuItems("leftmenu_account", "listAccounts", "listaccountsresponse", "account", "name", "jsp/tab_account.jsp", loadAccountToRigntPanelFn); - + listMidMenuItems("leftmenu_volume", "listVolumes", "listvolumesresponse", "volume", "name", "jsp/tab_volume.jsp", loadVolumeToRigntPanelFn); diff --git a/ui/new/scripts/cloud.core.volume.js b/ui/new/scripts/cloud.core.volume.js new file mode 100644 index 00000000000..1a5493427af --- /dev/null +++ b/ui/new/scripts/cloud.core.volume.js @@ -0,0 +1,23 @@ +function loadVolumeToRigntPanelFn($rightPanelContent) { + var jsonObj = $rightPanelContent.data("jsonObj"); + + var $rightPanelContent = $("#right_panel_content"); + + $rightPanelContent.find("#id").text(jsonObj.id); + $rightPanelContent.find("#name").text(jsonObj.name); + $rightPanelContent.find("#zonename").text(jsonObj.zonename); + $rightPanelContent.find("#device_id").text(jsonObj.deviceid); + $rightPanelContent.find("#state").text(jsonObj.state); + $rightPanelContent.find("#storage").text(jsonObj.storage); + $rightPanelContent.find("#account").text(jsonObj.account); + + $rightPanelContent.find("#type").text(noNull(jsonObj.type) + " (" + noNull(jsonObj.storagetype) + " storage)"); + $rightPanelContent.find("#size").text((jsonObj.size == "0") ? "" : convertBytes(jsonObj.size)); + + if (jsonObj.virtualmachineid == null) + $rightPanelContent.find("#vm_name").text("detached"); + else + $rightPanelContent.find("#vm_name").text(getVmName(jsonObj.vmname, jsonObj.vmdisplayname) + " (" + jsonObj.vmstate + ")"); + + setDateField(jsonObj.created, $rightPanelContent.find("#created")); +} \ No newline at end of file From e7127054caca8874597f4b77e3a5b8662ed663b2 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Fri, 3 Sep 2010 17:17:44 -0700 Subject: [PATCH 075/145] Platforms: use Waf builtin platform detection so the build scripts will work correctly with Jython --- tools/waf/tar.py | 3 ++- wscript | 3 +-- wscript_configure | 5 ++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/tools/waf/tar.py b/tools/waf/tar.py index a6948d3a18c..bf9a91b7521 100644 --- a/tools/waf/tar.py +++ b/tools/waf/tar.py @@ -1,4 +1,5 @@ import Utils +import Options import tarfile from TaskGen import feature, before import Task @@ -33,7 +34,7 @@ def tar_up(task): ti.mode = 0755 ti.size = os.path.getsize(src) openmode = 'r' - if sys.platform == 'win32': openmode = openmode + 'b' + if Options.platform == 'win32': openmode = openmode + 'b' f = file(src,openmode) z.addfile(ti,fileobj=f) f.close() diff --git a/wscript b/wscript index 3df0ab91573..ce086c90f5c 100644 --- a/wscript +++ b/wscript @@ -9,7 +9,6 @@ APPNAME = 'cloud' import shutil,os import email,time import optparse -import platform import Utils,Node,Options,Logs,Scripting,Environment,Build,Configure from subprocess import Popen as _Popen,PIPE import os @@ -358,9 +357,9 @@ def set_options(opt): opt.tool_options('tar',tooldir='tools/waf') opt.tool_options('mkisofs',tooldir='tools/waf') opt.tool_options('usermgmt',tooldir='tools/waf') - if platform.system() not in ['Windows',"Darwin"]: opt.tool_options('compiler_cc') opt.tool_options('python') opt.tool_options('tomcat',tooldir='tools/waf') + if Options.platform not in ['darwin',"win32"]: opt.tool_options('compiler_cc') inst_dir = opt.get_option_group('--bindir') # get the group that contains bindir inst_dir.add_option('--javadir', # add javadir to the group that contains bindir diff --git a/wscript_configure b/wscript_configure index 068abb776b9..107d650de4f 100644 --- a/wscript_configure +++ b/wscript_configure @@ -4,7 +4,6 @@ - detects Tomcat (on Windows) - detects or configures directories according to command-line options""" -import platform import Utils,Node,Options,Logs,Scripting,Environment,Build,Configure from os import unlink as _unlink, makedirs as _makedirs, getcwd as _getcwd, chdir as _chdir try: from os import getuid as _getuid @@ -91,8 +90,8 @@ hard_deps = [ conf.check_message_1('Detecting distribution') -if platform.system() == 'Windows': conf.env.DISTRO = "Windows" -elif platform.system() == 'Darwin': conf.env.DISTRO = "Mac" +if Options.platform == 'win32': conf.env.DISTRO = "Windows" +elif Options.platform == 'darwin': conf.env.DISTRO = "Mac" elif _exists("/etc/network"): conf.env.DISTRO = "Ubuntu" elif _exists("/etc/fedora-release"): conf.env.DISTRO = "Fedora" elif _exists("/etc/centos-release") or _exists("/etc/redhat-release"): conf.env.DISTRO = "CentOS" From fd43b5aee8229f89bae5460afdfbc8bcf558e03f Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Fri, 3 Sep 2010 17:51:39 -0700 Subject: [PATCH 076/145] Split out JAVADIR detection to a separate file, and make non-Linux configure much more robust --- tools/waf/javadir.py | 22 ++++++++ tools/waf/usermgmt.py | 8 ++- wscript | 34 ++++++------ wscript_configure | 117 ++++++++++++++++++------------------------ 4 files changed, 92 insertions(+), 89 deletions(-) create mode 100644 tools/waf/javadir.py diff --git a/tools/waf/javadir.py b/tools/waf/javadir.py new file mode 100644 index 00000000000..9c71a64c80c --- /dev/null +++ b/tools/waf/javadir.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python + +import Options, Utils +import os + +def detect(conf): + conf.check_message_1('Detecting JAVADIR') + javadir = getattr(Options.options, 'JAVADIR', '') + if javadir: + conf.env.JAVADIR = javadir + conf.check_message_2("%s (forced through --javadir)"%conf.env.JAVADIR,"GREEN") + else: + conf.env.JAVADIR = os.path.join(conf.env.DATADIR,'java') + conf.check_message_2("%s (using default ${DATADIR}/java directory)"%conf.env.JAVADIR,"GREEN") + +def set_options(opt): + inst_dir = opt.get_option_group('--datadir') # get the group that contains bindir + if not inst_dir: raise Utils.WafError, "DATADIR not set. Did you load the gnu_dirs tool options with opt.tool_options('gnu_dirs') before running opt.tool_options('javadir')?" + inst_dir.add_option('--javadir', # add javadir to the group that contains bindir + help = 'Java class and jar files [Default: ${DATADIR}/java]', + default = '', + dest = 'JAVADIR') diff --git a/tools/waf/usermgmt.py b/tools/waf/usermgmt.py index a140d17ebdc..65fd889d330 100644 --- a/tools/waf/usermgmt.py +++ b/tools/waf/usermgmt.py @@ -6,13 +6,15 @@ import Task import os def detect(conf): - if Options.platform == 'win32': return + if Options.platform == 'win32': raise Utils.WafError('the usermgmt tool only works on Linux') + if Options.platform == 'darwin': raise Utils.WafError('the usermgmt tool only works on Linux') path_list = ["/usr/local/sbin","/usr/sbin","/sbin"] + os.environ.get('PATH','').split(os.pathsep) conf.find_program("useradd",var='USERADD',mandatory=True,path_list=path_list) conf.find_program("userdel",var='USERDEL',mandatory=True,path_list=path_list) def set_options(opt): - if Options.platform == 'win32': return + if Options.platform == 'win32': raise Utils.WafError('the usermgmt tool only works on Linux') + if Options.platform == 'darwin': raise Utils.WafError('the usermgmt tool only works on Linux') og = opt.get_option_group('--force') og.add_option('--nochown', action = 'store_true', @@ -30,6 +32,7 @@ Build.BuildContext.subst_add_destdir = staticmethod(_subst_add_destdir) def _setownership(ctx,path,owner,group,mode=None): if Options.platform == 'win32': return + if Options.platform == 'darwin': return if not hasattr(os,"getuid"): return if os.getuid() != 0: return if Options.options.NOUSERMGMT: return @@ -71,6 +74,7 @@ Build.BuildContext.setownership = _setownership def _createuser(ctx,user,homedir,shell): if Options.platform == 'win32': return + if Options.platform == 'darwin': return if not hasattr(os,"getuid"): return if os.getuid() != 0: return if Options.options.NOUSERMGMT: return diff --git a/wscript b/wscript index ce086c90f5c..823a4ae8390 100644 --- a/wscript +++ b/wscript @@ -354,28 +354,24 @@ Build.BuildContext.substitute = _substitute def set_options(opt): """Register command line options""" opt.tool_options('gnu_dirs') + opt.tool_options('python') opt.tool_options('tar',tooldir='tools/waf') opt.tool_options('mkisofs',tooldir='tools/waf') - opt.tool_options('usermgmt',tooldir='tools/waf') - opt.tool_options('python') + if Options.platform not in ['darwin','win32']: opt.tool_options('usermgmt',tooldir='tools/waf') + if Options.platform not in ['darwin','win32']: opt.tool_options('javadir',tooldir='tools/waf') opt.tool_options('tomcat',tooldir='tools/waf') if Options.platform not in ['darwin',"win32"]: opt.tool_options('compiler_cc') - inst_dir = opt.get_option_group('--bindir') # get the group that contains bindir - inst_dir.add_option('--javadir', # add javadir to the group that contains bindir - help = 'Java class and jar files [Default: ${DATADIR}/java]', - default = '', - dest = 'JAVADIR') - inst_dir = opt.get_option_group('--srcdir') # get the group that contains the srcdir - inst_dir.add_option('--with-db-host', # add javadir to the group that contains bindir + inst_dir = opt.get_option_group('--srcdir') + inst_dir.add_option('--with-db-host', help = 'Database host to use for waf deploydb [Default: 127.0.0.1]', default = '127.0.0.1', dest = 'DBHOST') - inst_dir.add_option('--with-db-user', # add javadir to the group that contains bindir + inst_dir.add_option('--with-db-user', help = 'Database user to use for waf deploydb [Default: root]', default = 'root', dest = 'DBUSER') - inst_dir.add_option('--with-db-pw', # add javadir to the group that contains bindir + inst_dir.add_option('--with-db-pw', help = 'Database password to use for waf deploydb [Default: ""]', default = '', dest = 'DBPW') @@ -393,7 +389,7 @@ def set_options(opt): help = 'does ---no-dep-check', default = False, dest = 'NODEPCHECK') - inst_dir = opt.get_option_group('--force') # get the group that contains the force + inst_dir = opt.get_option_group('--force') inst_dir.add_option('--preserve-config', action='store_true', help = 'do not install configuration files', @@ -402,7 +398,7 @@ def set_options(opt): debugopts = optparse.OptionGroup(opt.parser,'run/debug options') opt.add_option_group(debugopts) - debugopts.add_option('--debug-port', # add javadir to the group that contains bindir + debugopts.add_option('--debug-port', help = 'Port on which the debugger will listen when running waf debug [Default: 8787]', default = '8787', dest = 'DEBUGPORT') @@ -419,15 +415,15 @@ def set_options(opt): rpmopts = optparse.OptionGroup(opt.parser,'RPM/DEB build options') opt.add_option_group(rpmopts) - rpmopts.add_option('--build-number', # add javadir to the group that contains bindir + rpmopts.add_option('--build-number', help = 'Build number [Default: SVN revision number for builds from checkouts, or empty for builds from source releases]', default = '', dest = 'BUILDNUMBER') - rpmopts.add_option('--prerelease', # add javadir to the group that contains bindir + rpmopts.add_option('--prerelease', help = 'Branch name to append to the release number (if specified, alter release number to be a prerelease); this option requires --build-number=X [Default: nothing]', default = '', dest = 'PRERELEASE') - rpmopts.add_option('--skip-dist', # add javadir to the group that contains bindir + rpmopts.add_option('--skip-dist', action='store_true', help = 'Normally, dist() is called during package build. This makes the package build assume that a distribution tarball has already been made, and use that. This option is also valid during distcheck and dist.', default = False, @@ -435,7 +431,7 @@ def set_options(opt): distopts = optparse.OptionGroup(opt.parser,'dist options') opt.add_option_group(distopts) - distopts.add_option('--oss', # add javadir to the group that contains bindir + distopts.add_option('--oss', help = 'Only include open source components', action = 'store_true', default = False, @@ -757,14 +753,14 @@ def debug(ctx): @throws_command_errors def run_agent(args): - """runs the management server""" + """runs the agent""" # FIXME: make this use the run/debug options conf = _getbuildcontext() if not _exists(_join(conf.env.LIBEXECDIR,"agent-runner")): Scripting.install(conf) _check_call("sudo",[_join(conf.env.LIBEXECDIR,"agent-runner")]) @throws_command_errors def run_console_proxy(args): - """runs the management server""" + """runs the console proxy""" # FIXME: make this use the run/debug options conf = _getbuildcontext() if not _exists(_join(conf.env.LIBEXECDIR,"console-proxy-runner")): Scripting.install(conf) _check_call("sudo",[_join(conf.env.LIBEXECDIR,"console-proxy-runner")]) diff --git a/wscript_configure b/wscript_configure index 107d650de4f..f70cf61fa46 100644 --- a/wscript_configure +++ b/wscript_configure @@ -89,6 +89,8 @@ hard_deps = [ ] +conf.env.VERSION = Utils.g_module.VERSION + conf.check_message_1('Detecting distribution') if Options.platform == 'win32': conf.env.DISTRO = "Windows" elif Options.platform == 'darwin': conf.env.DISTRO = "Mac" @@ -111,7 +113,6 @@ conf.check_message_2("%s"%conf.env.PREFIX,"GREEN") conf.check_tool('misc') conf.check_tool("gnu_dirs") conf.check_tool('tar') -conf.check_tool('usermgmt') try: conf.check_tool('mkisofs') except Configure.ConfigurationError,e: raise Configure.ConfigurationError, "The program genisoimage (or mkisofs) could not be found.\nOn Linux: ./waf installrpmdeps or ./waf installdebdeps according to your distro's package format.\nOn Windows: Use cygwin to install the mkisofs package, then ensure that the program is in your PATH." @@ -124,36 +125,10 @@ except ImportError,e: raise Configure.ConfigurationError, "The Python MySQLdb module could not be found.\nOn Linux: ./waf installrpmdeps or ./waf installdebdeps according to your distro's package format.\nOn Windows: Install MySQL 5.1 on your machine, then install the Python MySQLdb module for Python %s.\nThe module for Python 2.6 / win32 / MySQL 5.1 is available here: http://soemin.googlecode.com/files/MySQL-python-1.2.3c1.win32-py2.6.exe"%conf.env.PYTHON_VERSION conf.check_message_2('MySQLdb','GREEN') -if conf.env.DISTRO not in ["Windows","Mac"]: - conf.check_tool('compiler_cc') - conf.check_cc(lib='pthread') - conf.check_cc(lib='dl') - -# waf uses slashes somewhere along the line in some paths. we fix them on windows. -if conf.env.DISTRO in ['Windows']: - for pth in [ x for x in conf.env.get_merged_dict().keys() if x.endswith("DIR") ]: - conf.env[pth] = conf.env[pth].replace("/","\\") - -for a in "DBHOST DBUSER DBPW DBDIR".split(): - conf.env[a] = getattr(Options.options, a, '') - try: conf.check_tool("tomcat") except Configure.ConfigurationError,e: conf.fatal("Tomcat directory %r not found. Either install Tomcat using ./waf installrpmdeps or ./waf installdebdeps, or manually install Tomcat to a directory in your system and set the environment variable TOMCAT_HOME to point to it."%conf.env.TOMCATHOME) -conf.check_message_1('Determining management server user name') -msuser = getattr(Options.options, 'MSUSER', '') -if msuser: - conf.env.MSUSER = msuser - conf.check_message_2("%s (forced through --tomcat-user)"%conf.env.MSUSER,"GREEN") -else: - if conf.env.DISTRO in ['Windows','Mac']: - conf.env.MSUSER = 'root' - conf.check_message_2("%s (not used on Windows or Mac)"%conf.env.MSUSER,"GREEN") - else: - conf.env.MSUSER = conf.env.PACKAGE - conf.check_message_2("%s (Linux default)"%conf.env.MSUSER,"GREEN") - conf.env.AGENTPATH = _join(conf.env.PACKAGE,"agent") conf.env.CPPATH = _join(conf.env.PACKAGE,"console-proxy") conf.env.MSPATH = _join(conf.env.PACKAGE,"management") @@ -172,34 +147,63 @@ else: conf.env.MSLOGDIR = _join(conf.env.LOCALSTATEDIR,"log",conf.env.MSPATH) conf.env.MSMNTDIR = _join(conf.env.SHAREDSTATEDIR,conf.env.PACKAGE,"mnt") -conf.env.PIDDIR = _join(conf.env.LOCALSTATEDIR,"run") -conf.env.LOCKDIR = _join(conf.env.LOCALSTATEDIR,"lock","subsys") - -conf.check_message_1('Detecting JAVADIR') -javadir = getattr(Options.options, 'JAVADIR', '') -if javadir: - conf.env.JAVADIR = javadir - conf.check_message_2("%s (forced through --javadir)"%conf.env.JAVADIR,"GREEN") -elif conf.env.DISTRO in ['Windows','Mac']: - conf.env.JAVADIR = _join(conf.env['TOMCATHOME'],'lib') - conf.check_message_2("%s (using Tomcat's lib/ directory)"%conf.env.JAVADIR,"GREEN") -else: - conf.env.JAVADIR = _join(conf.env.DATADIR,'java') - conf.check_message_2("%s (using default ${DATADIR}/java directory)"%conf.env.JAVADIR,"GREEN") - -if conf.env.DISTRO in ["Windows","Mac"]: +if conf.env.DISTRO in ['Windows','Mac']: conf.env.PREMIUMJAVADIR = conf.env.JAVADIR conf.env.PLUGINJAVADIR = conf.env.JAVADIR conf.env.SYSTEMJAVADIR = conf.env.JAVADIR -else: + # waf uses slashes somewhere along the line in some paths. we fix them on windows. + for pth in [ x for x in conf.env.get_merged_dict().keys() if x.endswith("DIR") ]: + conf.env[pth] = conf.env[pth].replace("/","\\") +else: + conf.check_tool('compiler_cc') + conf.check_cc(lib='pthread') + conf.check_cc(lib='dl') + conf.check_tool('usermgmt') + conf.check_message_1('Determining management server user name') + msuser = getattr(Options.options, 'MSUSER', '') + if msuser: + conf.env.MSUSER = msuser + conf.check_message_2("%s (forced through --tomcat-user)"%conf.env.MSUSER,"GREEN") + else: + conf.env.MSUSER = conf.env.PACKAGE + conf.check_message_2("%s (Linux default)"%conf.env.MSUSER,"GREEN") + conf.check_tool("javadir") conf.env.PREMIUMJAVADIR = _join(conf.env.JAVADIR,"%s-premium"%conf.env.PACKAGE) conf.env.PLUGINJAVADIR = _join(conf.env.JAVADIR,"%s-plugins"%conf.env.PACKAGE) conf.env.SYSTEMJAVADIR = "/usr/share/java" +conf.check_message_1('Database info for developer setup') +for a in "DBHOST DBUSER DBPW".split(): conf.env[a] = getattr(Options.options, a, '') +conf.check_message_2("user: %r, password: %r, host: %r"%(conf.env.DBUSER,conf.env.DBPW,conf.env.DBHOST),'GREEN') + in_javadir = lambda name: _join(conf.env.JAVADIR,_basename(name)) # $PREFIX/share/java in_system_javadir = lambda name: _join(conf.env.SYSTEMJAVADIR,name) # /usr/share/java in_premiumjavadir = lambda name: _join(conf.env.PREMIUMJAVADIR,name) # $PREFIX/share/java/cloud-premium +conf.env.AGENTLIBDIR = Utils.subst_vars(_join("${LIBDIR}","${AGENTPATH}"),conf.env) +conf.env.AGENTSYSCONFDIR = Utils.subst_vars(_join("${SYSCONFDIR}","${AGENTPATH}"),conf.env) +conf.env.AGENTLOGDIR = Utils.subst_vars(_join("${LOCALSTATEDIR}","log","${AGENTPATH}"),conf.env) + +conf.env.USAGELOGDIR = Utils.subst_vars(_join("${LOCALSTATEDIR}","log","${USAGEPATH}"),conf.env) +conf.env.USAGESYSCONFDIR = Utils.subst_vars(_join("${SYSCONFDIR}","${USAGEPATH}"),conf.env) + +conf.env.CPLIBDIR = Utils.subst_vars(_join("${LIBDIR}","${CPPATH}"),conf.env) +conf.env.CPSYSCONFDIR = Utils.subst_vars(_join("${SYSCONFDIR}","${CPPATH}"),conf.env) +conf.env.CPLOGDIR = Utils.subst_vars(_join("${LOCALSTATEDIR}","log","${CPPATH}"),conf.env) + +conf.env.MSLOG = _join(conf.env.MSLOGDIR,"management-server.log") +conf.env.APISERVERLOG = _join(conf.env.MSLOGDIR,"api-server.log") +conf.env.AGENTLOG = _join(conf.env.AGENTLOGDIR,"agent.log") +conf.env.USAGELOG = _join(conf.env.USAGELOGDIR,"usage.log") +conf.env.CPLOG = _join(conf.env.CPLOGDIR,"console-proxy.log") + +conf.env.SETUPDATADIR = Utils.subst_vars(_join("${DATADIR}","${SETUPPATH}"),conf.env) + +conf.env.SERVERSYSCONFDIR = Utils.subst_vars(_join("${SYSCONFDIR}","${SERVERPATH}"),conf.env) +conf.env.PIDDIR = _join(conf.env.LOCALSTATEDIR,"run") +conf.env.LOCKDIR = _join(conf.env.LOCALSTATEDIR,"lock","subsys") + + conf.check_message_1('Building classpaths') # == Here we build the run-time classpaths == @@ -250,30 +254,7 @@ compilecp+= _glob(_join(conf.env.TOMCATHOME,'lib',"*.jar")) conf.env.CLASSPATH = pathsep.join(compilecp) conf.check_message_2('Done','GREEN') -conf.env.VERSION = Utils.g_module.VERSION - -conf.env.AGENTLIBDIR = Utils.subst_vars(_join("${LIBDIR}","${AGENTPATH}"),conf.env) -conf.env.AGENTSYSCONFDIR = Utils.subst_vars(_join("${SYSCONFDIR}","${AGENTPATH}"),conf.env) -conf.env.AGENTLOGDIR = Utils.subst_vars(_join("${LOCALSTATEDIR}","log","${AGENTPATH}"),conf.env) - -conf.env.USAGELOGDIR = Utils.subst_vars(_join("${LOCALSTATEDIR}","log","${USAGEPATH}"),conf.env) -conf.env.USAGESYSCONFDIR = Utils.subst_vars(_join("${SYSCONFDIR}","${USAGEPATH}"),conf.env) - -conf.env.CPLIBDIR = Utils.subst_vars(_join("${LIBDIR}","${CPPATH}"),conf.env) -conf.env.CPSYSCONFDIR = Utils.subst_vars(_join("${SYSCONFDIR}","${CPPATH}"),conf.env) -conf.env.CPLOGDIR = Utils.subst_vars(_join("${LOCALSTATEDIR}","log","${CPPATH}"),conf.env) - -conf.env.MSLOG = _join(conf.env.MSLOGDIR,"management-server.log") -conf.env.APISERVERLOG = _join(conf.env.MSLOGDIR,"api-server.log") -conf.env.AGENTLOG = _join(conf.env.AGENTLOGDIR,"agent.log") -conf.env.USAGELOG = _join(conf.env.USAGELOGDIR,"usage.log") -conf.env.CPLOG = _join(conf.env.CPLOGDIR,"console-proxy.log") - -conf.env.SETUPDATADIR = Utils.subst_vars(_join("${DATADIR}","${SETUPPATH}"),conf.env) - -conf.env.SERVERSYSCONFDIR = Utils.subst_vars(_join("${SYSCONFDIR}","${SERVERPATH}"),conf.env) - - # log4j config and property config files require backslash escapes on Windows +# log4j config and property config files require backslash escapes on Windows if conf.env.DISTRO in ["Windows"]: for log in "MSLOG APISERVERLOG AGENTLIBDIR USAGELOG AGENTLOG".split(): conf.env[log] = conf.env[log].replace("\\","\\\\") From de7aa486444253b62d6733b661e39dc603208d0f Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Fri, 3 Sep 2010 18:04:41 -0700 Subject: [PATCH 077/145] Move database setup configuration together, skip empty path fixups on Windows and put together with gnu_dirs tool --- wscript_configure | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/wscript_configure b/wscript_configure index f70cf61fa46..c0255a6c621 100644 --- a/wscript_configure +++ b/wscript_configure @@ -111,19 +111,32 @@ if Options.options.prefix == Options.default_prefix: conf.check_message_2("%s"%conf.env.PREFIX,"GREEN") conf.check_tool('misc') + conf.check_tool("gnu_dirs") +if conf.env.distro == 'Windows': + # waf uses slashes somewhere along the line in some paths. we fix them on windows. + for pth in [ x for x in conf.env.get_merged_dict().keys() if x.endswith("DIR") ]: + if not pth: continue + if not conf.env[pth]: continue + conf.env[pth] = conf.env[pth].replace("/","\\") + conf.check_tool('tar') try: conf.check_tool('mkisofs') except Configure.ConfigurationError,e: raise Configure.ConfigurationError, "The program genisoimage (or mkisofs) could not be found.\nOn Linux: ./waf installrpmdeps or ./waf installdebdeps according to your distro's package format.\nOn Windows: Use cygwin to install the mkisofs package, then ensure that the program is in your PATH." conf.check_tool('java') + conf.check_tool("python") conf.check_python_version((2,4,0)) + conf.check_message_1('Detecting Python MySQL module') try: import MySQLdb except ImportError,e: raise Configure.ConfigurationError, "The Python MySQLdb module could not be found.\nOn Linux: ./waf installrpmdeps or ./waf installdebdeps according to your distro's package format.\nOn Windows: Install MySQL 5.1 on your machine, then install the Python MySQLdb module for Python %s.\nThe module for Python 2.6 / win32 / MySQL 5.1 is available here: http://soemin.googlecode.com/files/MySQL-python-1.2.3c1.win32-py2.6.exe"%conf.env.PYTHON_VERSION conf.check_message_2('MySQLdb','GREEN') +conf.check_message_1('Database info for developer setup') +for a in "DBHOST DBUSER DBPW".split(): conf.env[a] = getattr(Options.options, a, '') +conf.check_message_2("user: %r, password: %r, host: %r"%(conf.env.DBUSER,conf.env.DBPW,conf.env.DBHOST),'GREEN') try: conf.check_tool("tomcat") except Configure.ConfigurationError,e: @@ -151,9 +164,6 @@ if conf.env.DISTRO in ['Windows','Mac']: conf.env.PREMIUMJAVADIR = conf.env.JAVADIR conf.env.PLUGINJAVADIR = conf.env.JAVADIR conf.env.SYSTEMJAVADIR = conf.env.JAVADIR - # waf uses slashes somewhere along the line in some paths. we fix them on windows. - for pth in [ x for x in conf.env.get_merged_dict().keys() if x.endswith("DIR") ]: - conf.env[pth] = conf.env[pth].replace("/","\\") else: conf.check_tool('compiler_cc') conf.check_cc(lib='pthread') @@ -172,10 +182,6 @@ else: conf.env.PLUGINJAVADIR = _join(conf.env.JAVADIR,"%s-plugins"%conf.env.PACKAGE) conf.env.SYSTEMJAVADIR = "/usr/share/java" -conf.check_message_1('Database info for developer setup') -for a in "DBHOST DBUSER DBPW".split(): conf.env[a] = getattr(Options.options, a, '') -conf.check_message_2("user: %r, password: %r, host: %r"%(conf.env.DBUSER,conf.env.DBPW,conf.env.DBHOST),'GREEN') - in_javadir = lambda name: _join(conf.env.JAVADIR,_basename(name)) # $PREFIX/share/java in_system_javadir = lambda name: _join(conf.env.SYSTEMJAVADIR,name) # /usr/share/java in_premiumjavadir = lambda name: _join(conf.env.PREMIUMJAVADIR,name) # $PREFIX/share/java/cloud-premium From 02830a4f2c1f234198e3a2d8634a44457a6eb26a Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Fri, 3 Sep 2010 20:11:44 -0500 Subject: [PATCH 078/145] Make JAVADIR on Windows point to Tomcat/lib, and fix the lowercase distro below gnu_dirs that was preventing path fixup --- wscript_configure | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/wscript_configure b/wscript_configure index c0255a6c621..42ff5d6d9b5 100644 --- a/wscript_configure +++ b/wscript_configure @@ -113,7 +113,7 @@ conf.check_message_2("%s"%conf.env.PREFIX,"GREEN") conf.check_tool('misc') conf.check_tool("gnu_dirs") -if conf.env.distro == 'Windows': +if conf.env.DISTRO == 'Windows': # waf uses slashes somewhere along the line in some paths. we fix them on windows. for pth in [ x for x in conf.env.get_merged_dict().keys() if x.endswith("DIR") ]: if not pth: continue @@ -154,17 +154,15 @@ if conf.env.DISTRO in ['Windows','Mac']: conf.env.MSCONF = _join(conf.env.TOMCATHOME,"conf") conf.env.MSLOGDIR = _join(conf.env.TOMCATHOME,"logs") conf.env.MSMNTDIR = _join(conf.env.TOMCATHOME,"mnt") + conf.env.JAVADIR = _join(conf.env.MSENVIRON,"lib") + conf.env.PREMIUMJAVADIR = conf.env.JAVADIR + conf.env.PLUGINJAVADIR = conf.env.JAVADIR + conf.env.SYSTEMJAVADIR = conf.env.JAVADIR else: conf.env.MSENVIRON = _join(conf.env.DATADIR,conf.env.MSPATH) conf.env.MSCONF = _join(conf.env.SYSCONFDIR,conf.env.MSPATH) conf.env.MSLOGDIR = _join(conf.env.LOCALSTATEDIR,"log",conf.env.MSPATH) conf.env.MSMNTDIR = _join(conf.env.SHAREDSTATEDIR,conf.env.PACKAGE,"mnt") - -if conf.env.DISTRO in ['Windows','Mac']: - conf.env.PREMIUMJAVADIR = conf.env.JAVADIR - conf.env.PLUGINJAVADIR = conf.env.JAVADIR - conf.env.SYSTEMJAVADIR = conf.env.JAVADIR -else: conf.check_tool('compiler_cc') conf.check_cc(lib='pthread') conf.check_cc(lib='dl') From c050d0717b591e01407606217baccae601bcc15a Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Fri, 3 Sep 2010 18:24:43 -0700 Subject: [PATCH 079/145] new UI - implement alert detail panel. --- ui/new/index.jsp | 2 ++ ui/new/jsp/tab_alert.jsp | 56 ++++++++++++++++++++++++++++++ ui/new/jsp/tab_event.jsp | 2 +- ui/new/scripts/cloud.core.alert.js | 10 ++++++ ui/new/scripts/cloud.core.init.js | 2 +- 5 files changed, 70 insertions(+), 2 deletions(-) create mode 100644 ui/new/jsp/tab_alert.jsp create mode 100644 ui/new/scripts/cloud.core.alert.js diff --git a/ui/new/index.jsp b/ui/new/index.jsp index 3008a154f28..93fa2b84844 100644 --- a/ui/new/index.jsp +++ b/ui/new/index.jsp @@ -33,6 +33,8 @@ + + diff --git a/ui/new/jsp/tab_alert.jsp b/ui/new/jsp/tab_alert.jsp new file mode 100644 index 00000000000..5d388aa4b7c --- /dev/null +++ b/ui/new/jsp/tab_alert.jsp @@ -0,0 +1,56 @@ + + +<%@ page import="java.util.*" %> +<%@ page import="com.cloud.utils.*" %> + +<% + + Locale browserLocale = request.getLocale(); + CloudResourceBundle t = CloudResourceBundle.getBundle("resources/resource", browserLocale); +%> + + +
+ +
+
+ <%=t.t("Details")%>
+
+
+
+
+
+ <%=t.t("Type")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Description")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Sent")%>:
+
+
+
+
+
+
+
+
+ \ No newline at end of file diff --git a/ui/new/jsp/tab_event.jsp b/ui/new/jsp/tab_event.jsp index 4c33b31018e..58cd0b49f57 100644 --- a/ui/new/jsp/tab_event.jsp +++ b/ui/new/jsp/tab_event.jsp @@ -93,4 +93,4 @@
- \ No newline at end of file + \ No newline at end of file diff --git a/ui/new/scripts/cloud.core.alert.js b/ui/new/scripts/cloud.core.alert.js new file mode 100644 index 00000000000..76d488b0991 --- /dev/null +++ b/ui/new/scripts/cloud.core.alert.js @@ -0,0 +1,10 @@ +function loadAlertToRigntPanelFn($rightPanelContent) { + var jsonObj = $rightPanelContent.data("jsonObj"); + + var $rightPanelContent = $("#right_panel_content"); + + $rightPanelContent.find("#type").text(jsonObj.type); + $rightPanelContent.find("#description").text(jsonObj.description); + + setDateField(jsonObj.sent, $rightPanelContent.find("#sent")); +} \ No newline at end of file diff --git a/ui/new/scripts/cloud.core.init.js b/ui/new/scripts/cloud.core.init.js index 4a2398e6448..b6203176cef 100755 --- a/ui/new/scripts/cloud.core.init.js +++ b/ui/new/scripts/cloud.core.init.js @@ -91,7 +91,7 @@ $(document).ready(function() { }); } listMidMenuItems("leftmenu_event", "listEvents", "listeventsresponse", "event", "description", "jsp/tab_event.jsp", loadEventToRigntPanelFn); - //listMidMenuItems("leftmenu_alert", "listAlerts", "listalertsresponse", "alert", "description", loadAlertToRightPanel); + listMidMenuItems("leftmenu_alert", "listAlerts", "listalertsresponse", "alert", "description", "jsp/tab_alert.jsp", loadAlertToRigntPanelFn); listMidMenuItems("leftmenu_account", "listAccounts", "listaccountsresponse", "account", "name", "jsp/tab_account.jsp", loadAccountToRigntPanelFn); listMidMenuItems("leftmenu_volume", "listVolumes", "listvolumesresponse", "volume", "name", "jsp/tab_volume.jsp", loadVolumeToRigntPanelFn); From 39cd43ff582923c2fb6615d15825a3d697ce499a Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Fri, 3 Sep 2010 18:50:46 -0700 Subject: [PATCH 080/145] new UI - implement snapshot detail panel. --- ui/new/index.jsp | 2 + ui/new/jsp/tab_alert.jsp | 2 +- ui/new/jsp/tab_snapshot.jsp | 96 +++++++++++++++++++++++++++ ui/new/scripts/cloud.core.init.js | 1 + ui/new/scripts/cloud.core.snapshot.js | 14 ++++ 5 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 ui/new/jsp/tab_snapshot.jsp create mode 100644 ui/new/scripts/cloud.core.snapshot.js diff --git a/ui/new/index.jsp b/ui/new/index.jsp index 93fa2b84844..98d89948978 100644 --- a/ui/new/index.jsp +++ b/ui/new/index.jsp @@ -39,6 +39,8 @@ + + Cloud.com CloudStack diff --git a/ui/new/jsp/tab_alert.jsp b/ui/new/jsp/tab_alert.jsp index 5d388aa4b7c..bcaef0bf2d1 100644 --- a/ui/new/jsp/tab_alert.jsp +++ b/ui/new/jsp/tab_alert.jsp @@ -1,5 +1,5 @@ <%@ page import="java.util.*" %> diff --git a/ui/new/jsp/tab_snapshot.jsp b/ui/new/jsp/tab_snapshot.jsp new file mode 100644 index 00000000000..ea5aa04fede --- /dev/null +++ b/ui/new/jsp/tab_snapshot.jsp @@ -0,0 +1,96 @@ + + +<%@ page import="java.util.*" %> +<%@ page import="com.cloud.utils.*" %> + +<% + + Locale browserLocale = request.getLocale(); + CloudResourceBundle t = CloudResourceBundle.getBundle("resources/resource", browserLocale); +%> + + +
+ +
+
+ <%=t.t("Details")%>
+
+
+
+
+
+ <%=t.t("ID")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Name")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Volume")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Interval.Type")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Created")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Account")%>:
+
+
+
+
+
+
+
+
+
+ <%=t.t("Domain")%>:
+
+
+
+
+
+
+
+
+ \ No newline at end of file diff --git a/ui/new/scripts/cloud.core.init.js b/ui/new/scripts/cloud.core.init.js index b6203176cef..c3f9a1ffb10 100755 --- a/ui/new/scripts/cloud.core.init.js +++ b/ui/new/scripts/cloud.core.init.js @@ -94,6 +94,7 @@ $(document).ready(function() { listMidMenuItems("leftmenu_alert", "listAlerts", "listalertsresponse", "alert", "description", "jsp/tab_alert.jsp", loadAlertToRigntPanelFn); listMidMenuItems("leftmenu_account", "listAccounts", "listaccountsresponse", "account", "name", "jsp/tab_account.jsp", loadAccountToRigntPanelFn); listMidMenuItems("leftmenu_volume", "listVolumes", "listvolumesresponse", "volume", "name", "jsp/tab_volume.jsp", loadVolumeToRigntPanelFn); + listMidMenuItems("leftmenu_snapshot", "listSnapshots", "listsnapshotsresponse", "snapshot", "name", "jsp/tab_snapshot.jsp", loadSnapshotToRigntPanelFn); diff --git a/ui/new/scripts/cloud.core.snapshot.js b/ui/new/scripts/cloud.core.snapshot.js new file mode 100644 index 00000000000..279d38b0d95 --- /dev/null +++ b/ui/new/scripts/cloud.core.snapshot.js @@ -0,0 +1,14 @@ +function loadSnapshotToRigntPanelFn($rightPanelContent) { + var jsonObj = $rightPanelContent.data("jsonObj"); + + var $rightPanelContent = $("#right_panel_content"); + + $rightPanelContent.find("#id").text(jsonObj.id); + $rightPanelContent.find("#name").text(jsonObj.name); + $rightPanelContent.find("#volume_name").text(jsonObj.volumename); + $rightPanelContent.find("#interval_type").text(jsonObj.intervaltype); + $rightPanelContent.find("#account").text(jsonObj.account); + $rightPanelContent.find("#domain").text(jsonObj.domain); + + setDateField(jsonObj.created, $rightPanelContent.find("#created")); +} \ No newline at end of file From 8577624e858c4a9db7f4b9aa29d4f851a4a79857 Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Fri, 3 Sep 2010 19:10:53 -0700 Subject: [PATCH 081/145] Add dependencies to documentation --- README.html | 87 +++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 75 insertions(+), 12 deletions(-) diff --git a/README.html b/README.html index 5a38a434bd9..ae6d2e6e882 100644 --- a/README.html +++ b/README.html @@ -702,11 +702,11 @@ Once this command is done, the packages will be built in the directory {{{artifa # As a non-root user, run the command {{{./waf deb}}} in the source directory. Once this command is done, the packages will be built in the directory {{{artifacts/debbuild}}}.
-
+
!Obtain the source for the CloudStack
 If you aren't reading this from a local copy of the source code, see [[Obtaining the source]].
-!Prepare your development environment
-See [[Preparing your development environment]].
+!Prepare your environment
+See [[Preparing your environment]].
 !Configure the build on the builder machine
 As non-root, run the command {{{./waf configure}}}.  See [[waf configure]] to discover configuration options for that command.
 !Build the CloudStack on the builder machine
@@ -737,11 +737,74 @@ See the files in the {{{debian/}}} folder.
The Cloud.com CloudStack is an open source software product that enables the deployment, management, and configuration of multi-tier and multi-tenant infrastructure cloud services by enterprises and service providers.
-
-
Not done yet!
+
+
Prior to building the CloudStack, you need to install the following software packages in your system:
+# Sun Java 1.6
+## You must install the Java Development Kit with {{{javac}}}, not just the Java Runtime Environment
+## The commands {{{java}}} and {{{javac}}} must be found in your {{{PATH}}}
+# Apache Tomcat
+## If you are using the official Apache binary distribution, set the environment variable {{{TOMCAT_HOME}}} to point to the Apache Tomcat directory
+# MySQL
+## At the very minimum, you need to have the client and libraries installed
+## If your development machine is also going to be the database server, you need to have the server installed and running as well
+# Python 2.6
+## Ensure that the {{{python}}} command is in your {{{PATH}}}
+## Do ''not'' install Cygwin Python!
+# The MySQLdb module for Python 2.6
+## If you use Windows, you can find a [[pre-built package here|http://soemin.googlecode.com/files/MySQL-python-1.2.3c1.win32-py2.6.exe]]
+# bash
+# coreutils
+!Installing the dependencies on Windows
+Some of the packages in the above list are only available on Windows through Cygwin.  If that is your case, install them using Cygwin and remember to include the Cygwin {{{bin/}}} directory in your PATH.
+
+Do not install Cygwin Python!  Use the Python for Windows official installer instead.
+!Additional dependencies for Linux development environments
+# GCC (only needed on Linux)
+# glibc-devel / glibc-dev
+# The Java packages (usually available in your distribution):
+## commons-collections
+## commons-dbcp
+## commons-logging
+## commons-logging-api
+## commons-pool
+## commons-httpclient
+## ws-commons-util
-
-
Not done yet!
+
+
The following software / programs must be correctly installed in the machines where you will run a CloudStack component.  This list is by no means complete yet, but it will be soon.
+!Dependencies common to all components of the CloudStack
+# bash
+# coreutils
+# useradd
+# userdel
+# Sun Java 1.6
+## You must install the Java Development Kit with {{{javac}}}, not just the Java Runtime Environment
+## The commands {{{java}}} and {{{javac}}} must be found in your {{{PATH}}}
+# Python 2.6
+## Ensure that the {{{python}}} command is in your {{{PATH}}}
+## Do ''not'' install Cygwin Python!
+# The Java packages (usually available in your distribution):
+## commons-collections
+## commons-dbcp
+## commons-logging
+## commons-logging-api
+## commons-pool
+## commons-httpclient
+## ws-commons-util
+!!Installing the dependencies on Windows
+Some of the packages in the above list are only available on Windows through Cygwin.  If that is your case, install them using Cygwin and remember to include the Cygwin {{{bin/}}} directory in your PATH.
+
+Do not install Cygwin Python!  Use the Python for Windows official installer instead.
+!Management Server-specific dependencies
+# Apache Tomcat
+## If you are using the official Apache binary distribution, set the environment variable {{{TOMCAT_HOME}}} to point to the Apache Tomcat directory
+# MySQL
+## At the very minimum, you need to have the client and libraries installed
+## If you will be running the Management Server in the same machine that will run the database server, you need to have the server installed and running as well
+# The MySQLdb module for Python 2.6
+## If you use Windows, you can find a [[pre-built package here|http://soemin.googlecode.com/files/MySQL-python-1.2.3c1.win32-py2.6.exe]] 
+# openssh-clients (provides the ssh-keygen command)
+# mkisofs (provides the genisoimage command)
To support incremental migration from one version to another without having to redeploy the database, the CloudStack supports an incremental schema migration mechanism for the database.
@@ -774,10 +837,10 @@ Otherwise, ''end-user migration will fail catastrophically''.
Not done yet!
-
+
Start here if you want to learn the essentials to extend, modify and enhance the CloudStack.  This assumes that you've already familiarized yourself with CloudStack concepts, installation and configuration using the [[Getting started|Welcome]] instructions.
 * [[Obtain the source|Obtaining the source]]
-* [[Prepare your environment|Preparing your development environment]]
+* [[Prepare your environment|Preparing your environment]]
 * [[Get acquainted with the development lifecycle|Your development lifecycle]]
 * [[Familiarize yourself with our development conventions|Development conventions]]
 Extra developer information:
@@ -924,7 +987,7 @@ This will create a folder called {{{cloudstack-oss}}} in your current folder.
 !Browsing the source code online
 You can browse the CloudStack source code through [[our CGit Web interface|http://git.cloud.com/cloudstack-oss]].
-
+
!Install the build dependencies on the machine where you will compile the CloudStack
 !!Fedora / CentOS
 The command [[waf installrpmdeps]] issued from the source tree gets it done.
@@ -1172,8 +1235,8 @@ Cloud.com's contact information is:
 !Legal information
 //Unless otherwise specified// by Cloud.com, Inc., or in the sources themselves, [[this software is OSI certified Open Source Software distributed under the GNU General Public License, version 3|License statement]].  OSI Certified is a certification mark of the Open Source Initiative.  The software powering this documentation is """BSD-licensed""" and obtained from [[TiddlyWiki.com|http://tiddlywiki.com/]].
-
-
This is the typical lifecycle that you would follow when hacking on a CloudStack component, assuming that your [[development environment has been set up|Preparing your development environment]]:
+
+
This is the typical lifecycle that you would follow when hacking on a CloudStack component, assuming that your [[development environment has been set up|Preparing your environment]]:
 # [[Configure|waf configure]] the source code<br>{{{./waf configure}}}
 # [[Build|waf build]] and [[install|waf install]] the CloudStack
 ## {{{./waf install}}}

From 576e4f163b697809e927c9c6b02990a7f52fb0c0 Mon Sep 17 00:00:00 2001
From: "Manuel Amador (Rudd-O)" 
Date: Fri, 3 Sep 2010 19:39:20 -0700
Subject: [PATCH 082/145] Simplified build-and-run-from-source instructions,
 and removed autolinking for CamelCase words on the handbook.

---
 README.html | 176 +++++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 139 insertions(+), 37 deletions(-)

diff --git a/README.html b/README.html
index ae6d2e6e882..8212176103e 100644
--- a/README.html
+++ b/README.html
@@ -512,6 +512,13 @@ Also see [[AdvancedOptions]]
+
+
|''Type:''|file|
+|''URL:''|http://tiddlyvault.tiddlyspot.com/#%5B%5BDisableWikiLinksPlugin%20(TiddlyTools)%5D%5D|
+|''Workspace:''|(default)|
+
+This tiddler was automatically created to record the details of this server
+
---------------------------------------------------------------------
 FOR ANT USERS
@@ -702,21 +709,18 @@ Once this command is done, the packages will be built in the directory {{{artifa
 # As a non-root user, run the command {{{./waf deb}}} in the source directory.
 Once this command is done, the packages will be built in the directory {{{artifacts/debbuild}}}.
-
-
!Obtain the source for the CloudStack
+
+
You need to do the following steps on each machine that will run a CloudStack component.
+!Obtain the source for the CloudStack
 If you aren't reading this from a local copy of the source code, see [[Obtaining the source]].
 !Prepare your environment
 See [[Preparing your environment]].
-!Configure the build on the builder machine
+!Configure the build
 As non-root, run the command {{{./waf configure}}}.  See [[waf configure]] to discover configuration options for that command.
-!Build the CloudStack on the builder machine
+!Build the CloudStack
 As non-root, run the command {{{./waf build}}}.  See [[waf build]] for an explanation.
-!Install the CloudStack on the target systems
-On each machine where you intend to run a CloudStack component:
-# upload the entire source code tree after compilation, //ensuring that the source ends up in the same path as the machine in which you compiled it//,
-## {{{rsync}}} is [[usually very handy|Using rsync to quickly transport the source tree to another machine]] for this
-# in that newly uploaded directory of the target machine, run the command {{{./waf install}}} //as root//.
-Consult [[waf install]] for information on installation.
+!Install the CloudStack +Run the command {{{./waf install}}} //as root//. Consult [[waf install]] for information on installation.
!Changing the [[configuration|waf configure]] process
@@ -737,8 +741,8 @@ See the files in the {{{debian/}}} folder.
The Cloud.com CloudStack is an open source software product that enables the deployment, management, and configuration of multi-tier and multi-tenant infrastructure cloud services by enterprises and service providers.
-
-
Prior to building the CloudStack, you need to install the following software packages in your system:
+
+
Prior to building the CloudStack, you need to install the following software packages in your system.
 # Sun Java 1.6
 ## You must install the Java Development Kit with {{{javac}}}, not just the Java Runtime Environment
 ## The commands {{{java}}} and {{{javac}}} must be found in your {{{PATH}}}
@@ -752,12 +756,9 @@ See the files in the {{{debian/}}} folder.
## Do ''not'' install Cygwin Python! # The MySQLdb module for Python 2.6 ## If you use Windows, you can find a [[pre-built package here|http://soemin.googlecode.com/files/MySQL-python-1.2.3c1.win32-py2.6.exe]] -# bash -# coreutils -!Installing the dependencies on Windows -Some of the packages in the above list are only available on Windows through Cygwin. If that is your case, install them using Cygwin and remember to include the Cygwin {{{bin/}}} directory in your PATH. - -Do not install Cygwin Python! Use the Python for Windows official installer instead. +# The Bourne-again shell (also known as bash) +# GNU coreutils +''Note for Windows users'': Some of the packages in the above list are only available on Windows through Cygwin. If that is your case, install them using Cygwin and remember to include the Cygwin {{{bin/}}} directory in your PATH. Under no circumstances install Cygwin Python! Use the Python for Windows official installer instead. !Additional dependencies for Linux development environments # GCC (only needed on Linux) # glibc-devel / glibc-dev @@ -768,15 +769,17 @@ Do not install Cygwin Python! Use the Python for Windows official installer ins ## commons-logging-api ## commons-pool ## commons-httpclient -## ws-commons-util
+## ws-commons-util +# useradd +# userdel
-
+
The following software / programs must be correctly installed in the machines where you will run a CloudStack component.  This list is by no means complete yet, but it will be soon.
-!Dependencies common to all components of the CloudStack
+
+''Note for Windows users'':  Some of the packages in the lists below are only available on Windows through Cygwin.  If that is your case, install them using Cygwin and remember to include the Cygwin {{{bin/}}} directory in your PATH.  Under no circumstances install Cygwin Python!  Use the Python for Windows official installer instead.
+!Run-time dependencies common to all components of the CloudStack
 # bash
 # coreutils
-# useradd
-# userdel
 # Sun Java 1.6
 ## You must install the Java Development Kit with {{{javac}}}, not just the Java Runtime Environment
 ## The commands {{{java}}} and {{{javac}}} must be found in your {{{PATH}}}
@@ -791,10 +794,6 @@ Do not install Cygwin Python!  Use the Python for Windows official installer ins
 ## commons-pool
 ## commons-httpclient
 ## ws-commons-util
-!!Installing the dependencies on Windows
-Some of the packages in the above list are only available on Windows through Cygwin.  If that is your case, install them using Cygwin and remember to include the Cygwin {{{bin/}}} directory in your PATH.
-
-Do not install Cygwin Python!  Use the Python for Windows official installer instead.
 !Management Server-specific dependencies
 # Apache Tomcat
 ## If you are using the official Apache binary distribution, set the environment variable {{{TOMCAT_HOME}}} to point to the Apache Tomcat directory
@@ -834,6 +833,108 @@ Otherwise, ''end-user migration will fail catastrophically''.
#[[Source layout guide]]
+
+
/***
+|Name|DisableWikiLinksPlugin|
+|Source|http://www.TiddlyTools.com/#DisableWikiLinksPlugin|
+|Version|1.6.0|
+|Author|Eric Shulman|
+|License|http://www.TiddlyTools.com/#LegalStatements|
+|~CoreVersion|2.1|
+|Type|plugin|
+|Description|selectively disable TiddlyWiki's automatic ~WikiWord linking behavior|
+This plugin allows you to disable TiddlyWiki's automatic ~WikiWord linking behavior, so that WikiWords embedded in tiddler content will be rendered as regular text, instead of being automatically converted to tiddler links.  To create a tiddler link when automatic linking is disabled, you must enclose the link text within {{{[[...]]}}}.
+!!!!!Usage
+<<<
+You can block automatic WikiWord linking behavior for any specific tiddler by ''tagging it with<<tag excludeWikiWords>>'' (see configuration below) or, check a plugin option to disable automatic WikiWord links to non-existing tiddler titles, while still linking WikiWords that correspond to existing tiddlers titles or shadow tiddler titles.  You can also block specific selected WikiWords from being automatically linked by listing them in [[DisableWikiLinksList]] (see configuration below), separated by whitespace.  This tiddler is optional and, when present, causes the listed words to always be excluded, even if automatic linking of other WikiWords is being permitted.  
+
+Note: WikiWords contained in default ''shadow'' tiddlers will be automatically linked unless you select an additional checkbox option lets you disable these automatic links as well, though this is not recommended, since it can make it more difficult to access some TiddlyWiki standard default content (such as AdvancedOptions or SideBarTabs)
+<<<
+!!!!!Configuration
+<<<
+<<option chkDisableWikiLinks>> Disable ALL automatic WikiWord tiddler links
+<<option chkAllowLinksFromShadowTiddlers>> ... except for WikiWords //contained in// shadow tiddlers
+<<option chkDisableNonExistingWikiLinks>> Disable automatic WikiWord links for non-existing tiddlers
+Disable automatic WikiWord links for words listed in: <<option txtDisableWikiLinksList>>
+Disable automatic WikiWord links for tiddlers tagged with: <<option txtDisableWikiLinksTag>>
+<<<
+!!!!!Revisions
+<<<
+2008.07.22 [1.6.0] hijack tiddler changed() method to filter disabled wiki words from internal links[] array (so they won't appear in the missing tiddlers list)
+2007.06.09 [1.5.0] added configurable txtDisableWikiLinksTag (default value: "excludeWikiWords") to allows selective disabling of automatic WikiWord links for any tiddler tagged with that value.
+2006.12.31 [1.4.0] in formatter, test for chkDisableNonExistingWikiLinks
+2006.12.09 [1.3.0] in formatter, test for excluded wiki words specified in DisableWikiLinksList
+2006.12.09 [1.2.2] fix logic in autoLinkWikiWords() (was allowing links TO shadow tiddlers, even when chkDisableWikiLinks is TRUE).  
+2006.12.09 [1.2.1] revised logic for handling links in shadow content
+2006.12.08 [1.2.0] added hijack of Tiddler.prototype.autoLinkWikiWords so regular (non-bracketed) WikiWords won't be added to the missing list
+2006.05.24 [1.1.0] added option to NOT bypass automatic wikiword links when displaying default shadow content (default is to auto-link shadow content)
+2006.02.05 [1.0.1] wrapped wikifier hijack in init function to eliminate globals and avoid FireFox 1.5.0.1 crash bug when referencing globals
+2005.12.09 [1.0.0] initial release
+<<<
+!!!!!Code
+***/
+//{{{
+version.extensions.DisableWikiLinksPlugin= {major: 1, minor: 6, revision: 0, date: new Date(2008,7,22)};
+
+if (config.options.chkDisableNonExistingWikiLinks==undefined) config.options.chkDisableNonExistingWikiLinks= false;
+if (config.options.chkDisableWikiLinks==undefined) config.options.chkDisableWikiLinks=false;
+if (config.options.txtDisableWikiLinksList==undefined) config.options.txtDisableWikiLinksList="DisableWikiLinksList";
+if (config.options.chkAllowLinksFromShadowTiddlers==undefined) config.options.chkAllowLinksFromShadowTiddlers=true;
+if (config.options.txtDisableWikiLinksTag==undefined) config.options.txtDisableWikiLinksTag="excludeWikiWords";
+
+// find the formatter for wikiLink and replace handler with 'pass-thru' rendering
+initDisableWikiLinksFormatter();
+function initDisableWikiLinksFormatter() {
+	for (var i=0; i<config.formatters.length && config.formatters[i].name!="wikiLink"; i++);
+	config.formatters[i].coreHandler=config.formatters[i].handler;
+	config.formatters[i].handler=function(w) {
+		// supress any leading "~" (if present)
+		var skip=(w.matchText.substr(0,1)==config.textPrimitives.unWikiLink)?1:0;
+		var title=w.matchText.substr(skip);
+		var exists=store.tiddlerExists(title);
+		var inShadow=w.tiddler && store.isShadowTiddler(w.tiddler.title);
+		// check for excluded Tiddler
+		if (w.tiddler && w.tiddler.isTagged(config.options.txtDisableWikiLinksTag))
+			{ w.outputText(w.output,w.matchStart+skip,w.nextMatch); return; }
+		// check for specific excluded wiki words
+		var t=store.getTiddlerText(config.options.txtDisableWikiLinksList);
+		if (t && t.length && t.indexOf(w.matchText)!=-1)
+			{ w.outputText(w.output,w.matchStart+skip,w.nextMatch); return; }
+		// if not disabling links from shadows (default setting)
+		if (config.options.chkAllowLinksFromShadowTiddlers && inShadow)
+			return this.coreHandler(w);
+		// check for non-existing non-shadow tiddler
+		if (config.options.chkDisableNonExistingWikiLinks && !exists)
+			{ w.outputText(w.output,w.matchStart+skip,w.nextMatch); return; }
+		// if not enabled, just do standard WikiWord link formatting
+		if (!config.options.chkDisableWikiLinks)
+			return this.coreHandler(w);
+		// just return text without linking
+		w.outputText(w.output,w.matchStart+skip,w.nextMatch)
+	}
+}
+
+Tiddler.prototype.coreAutoLinkWikiWords = Tiddler.prototype.autoLinkWikiWords;
+Tiddler.prototype.autoLinkWikiWords = function()
+{
+	// if all automatic links are not disabled, just return results from core function
+	if (!config.options.chkDisableWikiLinks)
+		return this.coreAutoLinkWikiWords.apply(this,arguments);
+	return false;
+}
+
+Tiddler.prototype.disableWikiLinks_changed = Tiddler.prototype.changed;
+Tiddler.prototype.changed = function()
+{
+	this.disableWikiLinks_changed.apply(this,arguments);
+	// remove excluded wiki words from links array
+	var t=store.getTiddlerText(config.options.txtDisableWikiLinksList,"").readBracketedList();
+	if (t.length) for (var i=0; i<t.length; i++)
+		if (this.links.contains(t[i]))
+			this.links.splice(this.links.indexOf(t[i]),1);
+};
+//}}}
+
Not done yet!
@@ -987,16 +1088,17 @@ This will create a folder called {{{cloudstack-oss}}} in your current folder. !Browsing the source code online You can browse the CloudStack source code through [[our CGit Web interface|http://git.cloud.com/cloudstack-oss]].
-
-
!Install the build dependencies on the machine where you will compile the CloudStack
-!!Fedora / CentOS
-The command [[waf installrpmdeps]] issued from the source tree gets it done.
-!!Ubuntu
-The command [[waf installdebdeps]] issues from the source tree gets it done.
-!!Other distributions
-See [[CloudStack build dependencies]]
-!Install the run-time dependencies on the machines where you will run the CloudStack
-See [[CloudStack run-time dependencies]].
+
+
!Install the build dependencies
+* If you want to compile the CloudStack on Linux:
+** Fedora / CentOS: The command [[waf installrpmdeps]] issued from the source tree gets it done.
+** Ubuntu: The command [[waf installdebdeps]] issues from the source tree gets it done.
+** Other distributions: Manually install the packages listed in [[CloudStack build dependencies]].
+* If you want to compile the CloudStack on Windows or Mac:
+** Manually install the packages listed in [[CloudStack build dependencies]].
+** Note that you won't be able to deploy this compiled CloudStack onto Linux machines -- you will be limited to running the Management Server.
+!Install the run-time dependencies
+In addition to the build dependencies, a number of software packages need to be installed on the machine to be able to run certain components of the CloudStack.  These packages are not strictly required to //build// the stack, but they are required to run at least one part of it.  See the topic [[CloudStack run-time dependencies]] for the list of packages.
Every time you run {{{./waf install}}} to deploy changed code, waf will install configuration files once again.  This can be a nuisance if you are developing the stack.

From 0765e4b5e2c1341d3f96c7e8a5599b64c2c86f82 Mon Sep 17 00:00:00 2001
From: kishan 
Date: Tue, 7 Sep 2010 13:19:14 +0530
Subject: [PATCH 083/145] bug : 6095 Show proper error when vm create fails due
 to insufficient capacity

---
 .../manager/allocator/impl/UserConcentratedAllocator.java     | 2 +-
 server/src/com/cloud/vm/UserVmManagerImpl.java                | 4 ++++
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/server/src/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java
index 2236689c049..cab971738e1 100755
--- a/server/src/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java
+++ b/server/src/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java
@@ -143,7 +143,7 @@ public class UserConcentratedAllocator implements PodAllocator {
         }
         
         if (availablePods.size() == 0) {
-            s_logger.debug("There are no pods with enough memory/CPU capacity in zone" + zone.getName());
+            s_logger.debug("There are no pods with enough memory/CPU capacity in zone " + zone.getName());
             return null;
         } else {
         	// Return a random pod
diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java
index c183ba9eceb..6e724f2a5b3 100755
--- a/server/src/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/com/cloud/vm/UserVmManagerImpl.java
@@ -1511,6 +1511,10 @@ public class UserVmManagerImpl implements UserVmManager {
                 podsToAvoid.add(pod.first().getId());
             }
 
+            if(pod == null){
+                throw new ResourceAllocationException("Create VM " + ((vm == null) ? vmId : vm.toString()) + " failed. There are no pods with enough CPU/memory");
+            }
+            
             if ((vm == null) || (poolid == 0)) {
                 throw new ResourceAllocationException("Create VM " + ((vm == null) ? vmId : vm.toString()) + " failed due to no Storage Pool is available");
             }

From 8bfe15d68dfc583c0dcdc464a620488129d85ffc Mon Sep 17 00:00:00 2001
From: kishan 
Date: Tue, 7 Sep 2010 16:02:09 +0530
Subject: [PATCH 084/145] bug 6080: handle default route list when dhcp is in
 use

---
 server/src/com/cloud/server/ConfigurationServerImpl.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/server/src/com/cloud/server/ConfigurationServerImpl.java b/server/src/com/cloud/server/ConfigurationServerImpl.java
index 63ff6a811df..851e3068772 100644
--- a/server/src/com/cloud/server/ConfigurationServerImpl.java
+++ b/server/src/com/cloud/server/ConfigurationServerImpl.java
@@ -238,7 +238,7 @@ public class ConfigurationServerImpl implements ConfigurationServer {
 		
 		String[] defaultRouteList = defaultRoute.split("\\s+");
 		
-		if (defaultRouteList.length != 7) {
+		if (defaultRouteList.length < 5) {
 			return null;
 		}
 		

From 1fcea6a786c10085369eaf6ba696c1d58dd5a9f6 Mon Sep 17 00:00:00 2001
From: edison 
Date: Tue, 7 Sep 2010 09:58:03 -0700
Subject: [PATCH 085/145] Add 3 times re-try when failed to connect to kvm
 agent host.

---
 .../hypervisor/kvm/discoverer/KvmServerDiscoverer.java | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/server/src/com/cloud/hypervisor/kvm/discoverer/KvmServerDiscoverer.java b/server/src/com/cloud/hypervisor/kvm/discoverer/KvmServerDiscoverer.java
index b1d8bb088cf..a17536db1a0 100644
--- a/server/src/com/cloud/hypervisor/kvm/discoverer/KvmServerDiscoverer.java
+++ b/server/src/com/cloud/hypervisor/kvm/discoverer/KvmServerDiscoverer.java
@@ -24,6 +24,7 @@ import com.cloud.configuration.dao.ConfigurationDao;
 import com.cloud.exception.DiscoveryException;
 import com.cloud.host.HostVO;
 import com.cloud.host.Status;
+import com.cloud.host.Status.Event;
 import com.cloud.host.dao.HostDao;
 import com.cloud.hypervisor.kvm.resource.KvmDummyResourceBase;
 import com.cloud.hypervisor.xen.resource.CitrixResourceBase;
@@ -44,7 +45,7 @@ public class KvmServerDiscoverer extends DiscovererBase implements Discoverer,
 	 private String _setupAgentPath;
 	 private ConfigurationDao _configDao;
 	 private String _hostIp;
-	 private int _waitTime = 10;
+	 private int _waitTime = 3; /*wait for 3 minutes*/
 	 @Inject HostDao _hostDao = null;
 	 
 	@Override
@@ -244,6 +245,7 @@ public class KvmServerDiscoverer extends DiscovererBase implements Discoverer,
 		for (int i = 0 ; i < _waitTime; i++) {
 			
 			if (host.getStatus() != Status.Up) {
+				s_logger.debug("Wait host comes back, try: " + i);
 				try {
 					Thread.sleep(60000);
 				} catch (InterruptedException e) {
@@ -253,9 +255,11 @@ public class KvmServerDiscoverer extends DiscovererBase implements Discoverer,
 				return;
 			}
 		}
-
+		
+		
+		_hostDao.updateStatus(host, Event.AgentDisconnected, msId);
 		/*Timeout, throw warning msg to user*/
-		throw new DiscoveryException("Agent " + host.getId() + ":" + host.getPublicIpAddress() + " does not come back, It may connect to server later, if not, please check the agent log");
+		throw new DiscoveryException("Host " + host.getId() + ":" + host.getPrivateIpAddress() + " does not come back, It may connect to server later, if not, please check the agent log on this host");
 	}
 	
 	@Override

From 1d47910c32da1db8fd23d2eaa76315aca21dab93 Mon Sep 17 00:00:00 2001
From: abhishek 
Date: Tue, 7 Sep 2010 10:24:56 -0700
Subject: [PATCH 086/145] bug 6044: Added functionality to ensure vms belonging
 to deleted accounts do not show up

status 6044: resolved fixed
---
 server/src/com/cloud/api/commands/ListVMsCmd.java | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/server/src/com/cloud/api/commands/ListVMsCmd.java b/server/src/com/cloud/api/commands/ListVMsCmd.java
index add2a51445d..68ffa17b314 100644
--- a/server/src/com/cloud/api/commands/ListVMsCmd.java
+++ b/server/src/com/cloud/api/commands/ListVMsCmd.java
@@ -182,6 +182,14 @@ public class ListVMsCmd extends BaseCmd {
         }
 
         for (UserVm vmInstance : virtualMachines) {
+    
+        	//if the account is deleted, do not return the user vm 
+        	Account currentVmAccount = getManagementServer().getAccount(vmInstance.getAccountId());
+        	if(currentVmAccount.getRemoved()!=null)
+        	{
+        		continue; //not returning this vm
+        	}
+        	
             List> vmData = new ArrayList>();
             AsyncJobVO asyncJob = getManagementServer().findInstancePendingAsyncJob("vm_instance", vmInstance.getId());
             if(asyncJob != null) {

From 47dbeaa87848c3ae60ba5c7ce1ee60d4ae08cee1 Mon Sep 17 00:00:00 2001
From: Jessica Wang 
Date: Tue, 7 Sep 2010 10:40:16 -0700
Subject: [PATCH 087/145] new UI - when accordion menu is changed, collapse
 submenu instance group under menu instance.

---
 ui/new/scripts/cloud.core.init.js     | 6 +++++-
 ui/new/scripts/cloud.core.instance.js | 2 +-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/ui/new/scripts/cloud.core.init.js b/ui/new/scripts/cloud.core.init.js
index c3f9a1ffb10..732adf89029 100755
--- a/ui/new/scripts/cloud.core.init.js
+++ b/ui/new/scripts/cloud.core.init.js
@@ -9,7 +9,11 @@ $(document).ready(function() {
             if(ui.newContent.length==0)  //collapse
                 ui.oldHeader.find("#arrow_icon").removeClass("open").addClass("close");            
             else  //expand
-                ui.newHeader.find("#arrow_icon").removeClass("close").addClass("open");   
+                ui.newHeader.find("#arrow_icon").removeClass("close").addClass("open"); 
+                
+            //collapse submenu instanceGroup under menu instance
+            $("#leftmenu_instance_group_header").find("#arrow_icon").removeClass("open").addClass("close");            
+            $("#leftmenu_instance_group_container").empty();     
         }
     });
     $("#accordion_menu").show();
diff --git a/ui/new/scripts/cloud.core.instance.js b/ui/new/scripts/cloud.core.instance.js
index d1dc3781820..78ce2d97683 100755
--- a/ui/new/scripts/cloud.core.instance.js
+++ b/ui/new/scripts/cloud.core.instance.js
@@ -452,7 +452,7 @@ function clickInstanceGroupHeader($arrowIcon) {
                                         var instance = instances[i];
                                         var instanceGroup = instance.group;
                                         if(instanceGroup == null || instanceGroup == "")
-                                            instanceGroup = "(no group name)";                                                                       
+                                            instanceGroup = noGroupName;                                                                       
                                         if(instanceGroup != groupName)
                                             continue;                                        
                                         var $midmenuItemVm1 = $midmenuItemVm.clone();                                                                                                                               

From a6afbbe341623ae3d9532a647951cb1ac4f448ec Mon Sep 17 00:00:00 2001
From: NIKITA 
Date: Tue, 7 Sep 2010 10:55:00 -0700
Subject: [PATCH 088/145] Volume UI for instance

---
 ui/new/css/main.css                  |  40 +++++++++++++++++++---
 ui/new/images/gird_actions.gif       | Bin 0 -> 1110 bytes
 ui/new/images/gird_actions_hover.gif | Bin 0 -> 1106 bytes
 ui/new/images/grid_headerbg.gif      | Bin 0 -> 182 bytes
 ui/new/jsp/tab_instance.jsp          |  49 ++++++++++++++++++++++++++-
 5 files changed, 84 insertions(+), 5 deletions(-)
 create mode 100644 ui/new/images/gird_actions.gif
 create mode 100644 ui/new/images/gird_actions_hover.gif
 create mode 100644 ui/new/images/grid_headerbg.gif

diff --git a/ui/new/css/main.css b/ui/new/css/main.css
index a4f239d1334..1c6ac42a7d4 100644
--- a/ui/new/css/main.css
+++ b/ui/new/css/main.css
@@ -1467,7 +1467,7 @@ a:visited {
 	height:auto;
 	float:left;
 	margin:0 0 5px 10px;
-	padding:0;
+	padding:0 0 10px 0;
 	list-style:none;
 }
 
@@ -1479,7 +1479,7 @@ a:visited {
 	color:#CCC;
 	font-size:11px;
 	font-weight:normal;
-	margin:6px 0 0 0;
+	margin:8px 0 0 0;
 	padding:0;
 	list-style:none;
 }
@@ -1998,7 +1998,39 @@ a:visited {
 	color:#a90000;
 }
 
-.thanks box {
-		
+.grid_header {
+	width:100%;
+	height:20px;
+	float:left;
+	background:url(../images/grid_headerbg.gif) repeat-x top left;
+	margin:0;
+	padding:0;
 }
 
+.grid_header_title {
+	width:auto;
+	height:auto;
+	float:left;
+	margin:4px 0 0 10px;
+	display:inline;
+	color:#434343;
+	text-align:left;
+	font-weight:bold;
+	font-size:11px;
+	padding:0;
+}
+
+.grid_actionbox {
+	width:33px;
+	height:15px;
+	float:right;
+	background:url(../images/gird_actions.gif) no-repeat top left;
+	margin:2px 10px 0 0;
+	padding:0;
+	cursor:pointer;
+	cursor:hand;
+}
+
+.grid_actionbox:hover {
+	background:url(../images/gird_actions_hover.gif) no-repeat top left;
+}
\ No newline at end of file
diff --git a/ui/new/images/gird_actions.gif b/ui/new/images/gird_actions.gif
new file mode 100644
index 0000000000000000000000000000000000000000..18a377f5704032851c0aebfa67e8b3376291d8f5
GIT binary patch
literal 1110
zcmZ?wbhEHbRAk_1_|Cxa|NsAY@7{g*@ZrsyH~;?q`}FD4moH!b{{8#y+qWM-e*F6N
z>-X>96%`e)UcLJI_3OuvA5WY(F=fh>Q>RYdxN+m*!-ub5zkc-SQF46Z#EBE1Jb7~A
z!iBSE&wl^@{q5Vg@87?#t*)6fch1X~FTGq`bToA;%PY^HKX>uMrPk)=bLY+#6ciK}
z6*-t&2Kf2Sn>+XHnX?Yo)=QTzO-o7E)zN+W^y%*1yS8lE*45P&9~;-$&~#w`f#u7W
zFIloYE;_cZrYb)_KPxj6sKU$1v7x>pCNga1%sH(MjSChm`10jzLR7T3yVvulPkOq$
zU7VfU+S;BxeE9R{Pisp{T}_?2bLV=yy4F6GJ;{m5VL?HA_wHM|
zWJza7r=pDF=~Jf<9Xd2=(uD2Xw#}F^tF)-7thi+1!iC?yea}iuPfbp)t*LqO;>EP-
z({5b9o}XVB77(y{_3G(Ur*B%jHZLn{>Xa$P`2`J)4fpQdYinr<3kdG$Xpf7IY-(zn
zHf`F&d-oPCSU73Yq(6WDtY5u`VH6BR2q^w!0b8pBB0+hAf#W{|I}gW%2Mi6(9Ku>L
z6$SxK3{27-3L6BxdyRRFl~fcKI5Kx}9Q+~ZkmA#+9Bq=Oa^ZpFA^oyP4L2AMd&t|S
zg&5U5XlU@C@2jMR`C|pN;x$tl%b4&K`~Ft)OR4z05LmG9@9!U!}s_-eB1Br`7?(OGwHwv
zAPfM7g@sb76oz5BTwYvU%x1G&TU$jUkw74*s;c7i`CKlSAPAjKXEK?*UT<}Eb$xxk
zP$;adtelydS;ld16l-p7=J9w2gTZL|v=GJH+S+=1dnFP{cXzjcm5|9~9*?KAwA5@i
zcXf45O-LAGO3pxDDeVT#!;8F;~I2?}tgHfsK3*v;`AsS|4
zhjHBDa7=!mcDY=Wi1US13jE){EI>yofA!ga_~#Qq0s*8QgSAWwSd*}6=k@XuQhXvQ
z?{>OY@hq>FvJc8RNn>sSn9_8*^0_vTPr1-^E{mDpn7e~7rz4rhj}J35sFUZxwawdD
z2dR%XWzi^-bE546umc<;ZSz1qVBBa@W=`iZoB7Eue%#HJB;nW)DKU);vtYaQ!c|34
zQf|x9u%TA5HnhEPhCP~sZn4$}%w#y`zqC|hzlWaS-G
zc2SOk=YCteE}`J%o6s{Vv^#%Qe(B=1UO_6Yh!H4(!)uOT2EkXOFNYfr?zy*%F*vP{
zCyyp)@lM@Z-@gBfQ1h5JpuX!*+mf(1HUEZIe4=c=i81ovK6&F-lBqra-5?fQbEL6k
X4%UHGG9_ycq-;)22a^LSRN&}u5dOiJ

literal 0
HcmV?d00001

diff --git a/ui/new/images/grid_headerbg.gif b/ui/new/images/grid_headerbg.gif
new file mode 100644
index 0000000000000000000000000000000000000000..9004bfa474b6c93210d51ef5fa5dc9167cb79bdd
GIT binary patch
literal 182
zcmZ?wbhEHbWMdFvIKsei_UzeLuU?%xb?VHSGgq!$xqkinn>TMBJa};a{P`zOp4_^1
z>(Zr5`}XZSapJ_mg9kr;{CM=}(Ytr=K7aoF{rmR}L;%H~EMQeSAQEIJ1FP`^<-Xud
zsSK$#Yo6@$)R8Zdy}M)4d)N2E2OcDOxJdLjEGasW!!_?hTM5_7DA^l2$)|Z48LR
             <%=t.t("Statistics")%>
-
+ + From d801ead9f91d6cbe3b451ddf98abbeff7872885a Mon Sep 17 00:00:00 2001 From: NIKITA Date: Tue, 7 Sep 2010 11:00:38 -0700 Subject: [PATCH 089/145] no major change --- ui/index.jsp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/index.jsp b/ui/index.jsp index 8be1f714663..3fe04850c2a 100755 --- a/ui/index.jsp +++ b/ui/index.jsp @@ -25,7 +25,7 @@ long milliseconds = new Date().getTime(); - Default Cloud.com styling of the site. This file contains the easiest portion of the site that can be styled to your companie's need such as logo, top navigation, and dialogs. --> - + From 0ea1f370b8336b78db27c1661395097eabfa7c9f Mon Sep 17 00:00:00 2001 From: abhishek Date: Tue, 7 Sep 2010 11:08:38 -0700 Subject: [PATCH 090/145] bug 5147: Adding the check to block create and destroy of system resources whilst the storage pool is in maintenance mode. This is based on checking if the pool is in UP state --- .../storage/allocator/AbstractStoragePoolAllocator.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java index e0ae7a9e756..d29e51d11cf 100644 --- a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java +++ b/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java @@ -142,7 +142,12 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement return false; } - + //if pool is NOT in up state, return false + if(!pool.getStatus().equals(com.cloud.host.Status.Up))//this is the pool status + { + return false; + } + // Check that the pool type is correct if (!poolIsCorrectType(dskCh, pool, vm, offering)) { return false; From 1a824bd19594676094ef6900fc6fb9ed6f1a5a63 Mon Sep 17 00:00:00 2001 From: alena Date: Tue, 7 Sep 2010 11:25:06 -0700 Subject: [PATCH 091/145] 1) Use transactions instead of global lock in AlertManagerImpl, StatsCollector. 2) Removed GlobalLock on read available capacity from UserConcentratedAllocator --- .../impl/UserConcentratedAllocator.java | 35 ++++---------- .../src/com/cloud/alert/AlertManagerImpl.java | 41 ++++++++--------- .../src/com/cloud/server/StatsCollector.java | 46 ++++++++----------- 3 files changed, 47 insertions(+), 75 deletions(-) diff --git a/server/src/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java index cab971738e1..ba612b71fc2 100755 --- a/server/src/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java +++ b/server/src/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java @@ -37,7 +37,6 @@ import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; import com.cloud.dc.dao.HostPodDao; import com.cloud.offering.NetworkOffering; -import com.cloud.offering.NetworkOffering.GuestIpType; import com.cloud.offering.ServiceOffering; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; @@ -50,7 +49,6 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.Inject; -import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.SearchCriteria; import com.cloud.vm.State; import com.cloud.vm.UserVmVO; @@ -76,7 +74,6 @@ public class UserConcentratedAllocator implements PodAllocator { @Inject VMInstanceDao _vmInstanceDao; Random _rand = new Random(System.currentTimeMillis()); - private final GlobalLock m_capacityCheckLock = GlobalLock.getInternLock("capacity.check"); private int _hoursToSkipStoppedVMs = 24; private int _secStorageVmRamSize = 1024; @@ -156,30 +153,14 @@ public class UserConcentratedAllocator implements PodAllocator { private boolean dataCenterAndPodHasEnoughCapacity(long dataCenterId, long podId, long capacityNeeded, short capacityType, long[] hostCandidate) { List capacities = null; - if (m_capacityCheckLock.lock(120)) { // 2 minutes - try { - SearchCriteria sc = _capacityDao.createSearchCriteria(); - sc.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType); - sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, dataCenterId); - sc.addAnd("podId", SearchCriteria.Op.EQ, podId); - capacities = _capacityDao.search(sc, null); - } finally { - m_capacityCheckLock.unlock(); - } - } else { - s_logger.error("Unable to acquire synchronization lock for pod allocation"); - - // we now try to enforce reservation-style allocation, waiting time has been adjusted - // to 2 minutes - return false; - -/* - // If we can't lock the table, just return that there is enough capacity and allow instance creation to fail on the agent - // if there is not enough capacity. All that does is skip the optimization of checking for capacity before sending the - // command to the agent. - return true; -*/ - } + + SearchCriteria sc = _capacityDao.createSearchCriteria(); + sc.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType); + sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, dataCenterId); + sc.addAnd("podId", SearchCriteria.Op.EQ, podId); + s_logger.trace("Executing search"); + capacities = _capacityDao.search(sc, null); + s_logger.trace("Done with a search"); boolean enoughCapacity = false; if (capacities != null) { diff --git a/server/src/com/cloud/alert/AlertManagerImpl.java b/server/src/com/cloud/alert/AlertManagerImpl.java index 033b1ba3282..9250bb86557 100644 --- a/server/src/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/com/cloud/alert/AlertManagerImpl.java @@ -65,8 +65,9 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.DB; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; import com.cloud.vm.ConsoleProxyVO; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.SecondaryStorageVmVO; @@ -118,8 +119,6 @@ public class AlertManagerImpl implements AlertManager { private double _publicIPCapacityThreshold = 0.75; private double _privateIPCapacityThreshold = 0.75; - private final GlobalLock m_capacityCheckLock = GlobalLock.getInternLock("capacity.check"); - @Override public boolean configure(String name, Map params) throws ConfigurationException { _name = name; @@ -319,7 +318,7 @@ public class AlertManagerImpl implements AlertManager { } } - @Override + @Override @DB public void recalculateCapacity() { // FIXME: the right way to do this is to register a listener (see RouterStatsListener, VMSyncListener) // for the vm sync state. The listener model has connects/disconnects to keep things in sync much better @@ -435,25 +434,23 @@ public class AlertManagerImpl implements AlertManager { newCapacities.add(newPrivateIPCapacity); } - if (m_capacityCheckLock.lock(5)) { // 5 second timeout - try { - // delete the old records - _capacityDao.clearNonStorageCapacities(); + Transaction txn = Transaction.currentTxn(); + try { + txn.start(); + // delete the old records + _capacityDao.clearNonStorageCapacities(); - for (CapacityVO newCapacity : newCapacities) { - _capacityDao.persist(newCapacity); - } - } finally { - m_capacityCheckLock.unlock(); - } - - if (s_logger.isTraceEnabled()) { - s_logger.trace("done recalculating system capacity"); - } - } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Skipping capacity check, unable to lock the capacity table for recalculation."); - } + for (CapacityVO newCapacity : newCapacities) { + s_logger.trace("Executing capacity update"); + _capacityDao.persist(newCapacity); + s_logger.trace("Done with capacity update"); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + s_logger.error("Unable to start transaction for capacity update"); + }finally { + txn.close(); } } diff --git a/server/src/com/cloud/server/StatsCollector.java b/server/src/com/cloud/server/StatsCollector.java index 58cfe27cdf8..668d4674171 100644 --- a/server/src/com/cloud/server/StatsCollector.java +++ b/server/src/com/cloud/server/StatsCollector.java @@ -61,6 +61,7 @@ import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; import com.cloud.vm.VmStats; @@ -99,7 +100,7 @@ public class StatsCollector { long storageStatsInterval = -1L; long volumeStatsInterval = -1L; - private final GlobalLock m_capacityCheckLock = GlobalLock.getInternLock("capacity.check"); + //private final GlobalLock m_capacityCheckLock = GlobalLock.getInternLock("capacity.check"); public static StatsCollector getInstance() { return s_instance; @@ -335,32 +336,25 @@ public class StatsCollector { // _capacityDao.persist(capacity); } - if (m_capacityCheckLock.lock(5)) { // 5 second timeout - if (s_logger.isTraceEnabled()) { - s_logger.trace("recalculating system storage capacity"); - } - try { - // now update the capacity table with the new stats - // FIXME: the right way to do this is to register a listener (see RouterStatsListener) - // for the host stats, send the WatchCommand at a regular interval - // to collect the stats from an agent and update the database as needed. The - // listener model has connects/disconnects to keep things in sync much better - // than this model right now - _capacityDao.clearStorageCapacities(); + Transaction txn = Transaction.open(Transaction.CLOUD_DB); + try { + if (s_logger.isTraceEnabled()) { + s_logger.trace("recalculating system storage capacity"); + } + txn.start(); + _capacityDao.clearStorageCapacities(); - for (CapacityVO newCapacity : newCapacities) { - _capacityDao.persist(newCapacity); - } - } finally { - m_capacityCheckLock.unlock(); - } - if (s_logger.isTraceEnabled()) { - s_logger.trace("done recalculating system storage capacity"); - } - } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("not recalculating system storage capacity, unable to lock capacity table"); - } + for (CapacityVO newCapacity : newCapacities) { + s_logger.trace("Executing capacity update"); + _capacityDao.persist(newCapacity); + s_logger.trace("Done with capacity update"); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + s_logger.error("Unable to start transaction for storage capacity update"); + }finally { + txn.close(); } } catch (Throwable t) { s_logger.error("Error trying to retrieve storage stats", t); From 5aa70deae776d0b71ff026bfdc509a98638d0374 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Tue, 7 Sep 2010 11:24:46 -0700 Subject: [PATCH 092/145] new UI - add a shared function fromdb() --- ui/new/scripts/cloud.core.account.js | 4 ++-- ui/new/scripts/cloud.core.event.js | 6 +++--- ui/new/scripts/cloud.core.instance.js | 16 ++++++++-------- ui/new/scripts/cloud.core.js | 14 +++++++++++--- ui/new/scripts/cloud.core.snapshot.js | 8 ++++---- ui/new/scripts/cloud.core.volume.js | 10 +++++----- 6 files changed, 33 insertions(+), 25 deletions(-) diff --git a/ui/new/scripts/cloud.core.account.js b/ui/new/scripts/cloud.core.account.js index ed4475d3af1..6438c247731 100644 --- a/ui/new/scripts/cloud.core.account.js +++ b/ui/new/scripts/cloud.core.account.js @@ -2,8 +2,8 @@ function loadAccountToRigntPanelFn($rightPanelContent) { var jsonObj = $rightPanelContent.data("jsonObj"); var $rightPanelContent = $("#right_panel_content"); $rightPanelContent.find("#role").text(toRole(jsonObj.accounttype)); - $rightPanelContent.find("#account").text(jsonObj.name); - $rightPanelContent.find("#domain").text(jsonObj.domain); + $rightPanelContent.find("#account").text(fromdb(jsonObj.name)); + $rightPanelContent.find("#domain").text(fromdb(jsonObj.domain)); $rightPanelContent.find("#vm_total").text(jsonObj.vmtotal); $rightPanelContent.find("#ip_total").text(jsonObj.iptotal); $rightPanelContent.find("#bytes_received").text(jsonObj.receivedbytes); diff --git a/ui/new/scripts/cloud.core.event.js b/ui/new/scripts/cloud.core.event.js index ca70c3b3096..72c54960e9c 100644 --- a/ui/new/scripts/cloud.core.event.js +++ b/ui/new/scripts/cloud.core.event.js @@ -1,11 +1,11 @@ function loadEventToRigntPanelFn($rightPanelContent) { var jsonObj = $rightPanelContent.data("jsonObj"); var $rightPanelContent = $("#right_panel_content"); - $rightPanelContent.find("#username").text(jsonObj.username); - $rightPanelContent.find("#account").text(jsonObj.account); + $rightPanelContent.find("#username").text(fromdb(jsonObj.username)); + $rightPanelContent.find("#account").text(fromdb(jsonObj.account)); $rightPanelContent.find("#type").text(jsonObj.type); $rightPanelContent.find("#level").text(jsonObj.level); - $rightPanelContent.find("#description").text(jsonObj.description); + $rightPanelContent.find("#description").text(fromdb(jsonObj.description)); $rightPanelContent.find("#state").text(jsonObj.state); setDateField(jsonObj.created, $rightPanelContent.find("#created")); } \ No newline at end of file diff --git a/ui/new/scripts/cloud.core.instance.js b/ui/new/scripts/cloud.core.instance.js index 78ce2d97683..d44eb6bf51d 100755 --- a/ui/new/scripts/cloud.core.instance.js +++ b/ui/new/scripts/cloud.core.instance.js @@ -384,21 +384,21 @@ function clickInstanceGroupHeader($arrowIcon) { var jsonObj = $t.data("jsonObj"); var vmName = getVmName(jsonObj.name, jsonObj.displayname); - $rightPanelHeader.find("#vm_name").text(vmName); + $rightPanelHeader.find("#vm_name").text(fromdb(vmName)); updateVirtualMachineStateInRightPanel(jsonObj.state); $rightPanelContent.find("#ipAddress").text(jsonObj.ipaddress); - $rightPanelContent.find("#zoneName").text(jsonObj.zonename); - $rightPanelContent.find("#templateName").text(jsonObj.templatename); - $rightPanelContent.find("#serviceOfferingName").text(jsonObj.serviceofferingname); + $rightPanelContent.find("#zoneName").text(fromdb(jsonObj.zonename)); + $rightPanelContent.find("#templateName").text(fromdb(jsonObj.templatename)); + $rightPanelContent.find("#serviceOfferingName").text(fromdb(jsonObj.serviceofferingname)); if(jsonObj.haenable == "true") $rightPanelContent.find("#ha").removeClass("cross_icon").addClass("tick_icon").show(); else $rightPanelContent.find("#ha").removeClass("tick_icon").addClass("cross_icon").show(); $rightPanelContent.find("#created").text(jsonObj.created); - $rightPanelContent.find("#account").text(jsonObj.account); - $rightPanelContent.find("#domain").text(jsonObj.domain); - $rightPanelContent.find("#hostName").text(jsonObj.hostname); - $rightPanelContent.find("#group").text(jsonObj.group); + $rightPanelContent.find("#account").text(fromdb(jsonObj.account)); + $rightPanelContent.find("#domain").text(fromdb(jsonObj.domain)); + $rightPanelContent.find("#hostName").text(fromdb(jsonObj.hostname)); + $rightPanelContent.find("#group").text(fromdb(jsonObj.group)); if(jsonObj.isoid != null && jsonObj.isoid.length > 0) $rightPanelContent.find("#iso").removeClass("cross_icon").addClass("tick_icon").show(); else diff --git a/ui/new/scripts/cloud.core.js b/ui/new/scripts/cloud.core.js index d65d01b054a..c563805529c 100755 --- a/ui/new/scripts/cloud.core.js +++ b/ui/new/scripts/cloud.core.js @@ -165,6 +165,14 @@ function createURL(url) { return url +"&response=json&sessionkey=" + g_sessionKey; } +function fromdb(val) { + return sanitizeXSS(unescape(noNull(val))); +} + +function todb(val) { + return encodeURIComponent(escape(display)); +} + @@ -790,16 +798,16 @@ function sanitizeXSS(val) { function getVmName(p_vmName, p_vmDisplayname) { if(p_vmDisplayname == null) - return sanitizeXSS(p_vmName); + return sanitizeXSS(unescape(p_vmName)); var vmName = null; if (isAdmin()) { if (p_vmDisplayname != p_vmName) { - vmName = p_vmName + "(" + sanitizeXSS(p_vmDisplayname) + ")"; + vmName = p_vmName + "(" + sanitizeXSS(unescape(p_vmDisplayname)) + ")"; } else { vmName = p_vmName; } } else { - vmName = sanitizeXSS(p_vmDisplayname); + vmName = sanitizeXSS(unescape(p_vmDisplayname)); } return vmName; } diff --git a/ui/new/scripts/cloud.core.snapshot.js b/ui/new/scripts/cloud.core.snapshot.js index 279d38b0d95..c54a63d3b53 100644 --- a/ui/new/scripts/cloud.core.snapshot.js +++ b/ui/new/scripts/cloud.core.snapshot.js @@ -4,11 +4,11 @@ function loadSnapshotToRigntPanelFn($rightPanelContent) { var $rightPanelContent = $("#right_panel_content"); $rightPanelContent.find("#id").text(jsonObj.id); - $rightPanelContent.find("#name").text(jsonObj.name); - $rightPanelContent.find("#volume_name").text(jsonObj.volumename); + $rightPanelContent.find("#name").text(fromdb(jsonObj.name)); + $rightPanelContent.find("#volume_name").text(fromdb(jsonObj.volumename)); $rightPanelContent.find("#interval_type").text(jsonObj.intervaltype); - $rightPanelContent.find("#account").text(jsonObj.account); - $rightPanelContent.find("#domain").text(jsonObj.domain); + $rightPanelContent.find("#account").text(fromdb(jsonObj.account)); + $rightPanelContent.find("#domain").text(fromdb(jsonObj.domain)); setDateField(jsonObj.created, $rightPanelContent.find("#created")); } \ No newline at end of file diff --git a/ui/new/scripts/cloud.core.volume.js b/ui/new/scripts/cloud.core.volume.js index 1a5493427af..7324d6d3d04 100644 --- a/ui/new/scripts/cloud.core.volume.js +++ b/ui/new/scripts/cloud.core.volume.js @@ -4,14 +4,14 @@ function loadVolumeToRigntPanelFn($rightPanelContent) { var $rightPanelContent = $("#right_panel_content"); $rightPanelContent.find("#id").text(jsonObj.id); - $rightPanelContent.find("#name").text(jsonObj.name); - $rightPanelContent.find("#zonename").text(jsonObj.zonename); + $rightPanelContent.find("#name").text(fromdb(jsonObj.name)); + $rightPanelContent.find("#zonename").text(fromdb(jsonObj.zonename)); $rightPanelContent.find("#device_id").text(jsonObj.deviceid); $rightPanelContent.find("#state").text(jsonObj.state); - $rightPanelContent.find("#storage").text(jsonObj.storage); - $rightPanelContent.find("#account").text(jsonObj.account); + $rightPanelContent.find("#storage").text(fromdb(jsonObj.storage)); + $rightPanelContent.find("#account").text(fromdb(jsonObj.account)); - $rightPanelContent.find("#type").text(noNull(jsonObj.type) + " (" + noNull(jsonObj.storagetype) + " storage)"); + $rightPanelContent.find("#type").text(jsonObj.type + " (" + jsonObj.storagetype + " storage)"); $rightPanelContent.find("#size").text((jsonObj.size == "0") ? "" : convertBytes(jsonObj.size)); if (jsonObj.virtualmachineid == null) From 2b590dbc5d3c6437107b4ade0a98361bdeb06b0d Mon Sep 17 00:00:00 2001 From: abhishek Date: Tue, 7 Sep 2010 11:30:16 -0700 Subject: [PATCH 093/145] bug 5147: Now, we restart the system vms immediately on another pool after a successful stop on the current pool, if such a reserve pool exists --- .../com/cloud/storage/StorageManagerImpl.java | 60 ++++++++----------- 1 file changed, 24 insertions(+), 36 deletions(-) diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index f6d9aa72de0..de19c3ba8f6 100644 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -1963,8 +1963,6 @@ public class StorageManagerImpl implements StorageManager { { boolean destroyVolumes = false; long count = 1; - long consoleProxyId = 0; - long ssvmId = 0; try { //1. Get the primary storage record @@ -2023,9 +2021,20 @@ public class StorageManagerImpl implements StorageManager { { if(destroyVolumes) { - //proxy vm is stopped, and we have another ps available - //get the id for restart - consoleProxyId = vmInstance.getId(); + //create a dummy event + long eventId1 = saveScheduledEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, EventTypes.EVENT_PROXY_START, "starting console proxy with Id: "+vmInstance.getId()); + + //Restore config val for consoleproxy.restart to true + _configMgr.updateConfiguration(userId, "consoleproxy.restart", "true"); + + if(_consoleProxyMgr.startProxy(vmInstance.getId(), eventId1)==null) + { + s_logger.warn("There was an error starting the console proxy id: "+vmInstance.getId()+" on another storage pool, cannot enable primary storage maintenance"); + primaryStorage.setStatus(Status.ErrorInMaintenance); + _storagePoolDao.persist(primaryStorage); + return false; + } + } } } @@ -2064,9 +2073,15 @@ public class StorageManagerImpl implements StorageManager { { if(destroyVolumes) { - //ss vm is stopped, and we have another ps available - //get the id for restart - ssvmId = vmInstance.getId(); + //create a dummy event and restart the ssvm immediately + long eventId = saveScheduledEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, EventTypes.EVENT_SSVM_START, "starting ssvm with Id: "+vmInstance.getId()); + if(_secStorageMgr.startSecStorageVm(vmInstance.getId(), eventId)==null) + { + s_logger.warn("There was an error starting the ssvm id: "+vmInstance.getId()+" on another storage pool, cannot enable primary storage maintenance"); + primaryStorage.setStatus(Status.ErrorInMaintenance); + _storagePoolDao.persist(primaryStorage); + return false; + } } } @@ -2100,34 +2115,7 @@ public class StorageManagerImpl implements StorageManager { _volsDao.remove(vol.getId()); } - //5. Restart all the system vms conditionally - if(destroyVolumes) //this means we have another ps. Ok to restart - { - //create a dummy event - long eventId = saveScheduledEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, EventTypes.EVENT_SSVM_START, "starting ssvm with Id: "+ssvmId); - if(_secStorageMgr.startSecStorageVm(ssvmId, eventId)==null) - { - s_logger.warn("There was an error starting the ssvm id: "+ssvmId+" on another storage pool, cannot enable primary storage maintenance"); - primaryStorage.setStatus(Status.ErrorInMaintenance); - _storagePoolDao.persist(primaryStorage); - return false; - } - - //create a dummy event - long eventId1 = saveScheduledEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, EventTypes.EVENT_PROXY_START, "starting console proxy with Id: "+consoleProxyId); - - //Restore config val for consoleproxy.restart to true - _configMgr.updateConfiguration(userId, "consoleproxy.restart", "true"); - - if(_consoleProxyMgr.startProxy(consoleProxyId, eventId1)==null) - { - s_logger.warn("There was an error starting the console proxy id: "+consoleProxyId+" on another storage pool, cannot enable primary storage maintenance"); - primaryStorage.setStatus(Status.ErrorInMaintenance); - _storagePoolDao.persist(primaryStorage); - return false; } - } - - //6. Update the status + //5. Update the status primaryStorage.setStatus(Status.Maintenance); _storagePoolDao.persist(primaryStorage); From afc181dd82317ea86c9e399b4e7b0fc6e89d5e77 Mon Sep 17 00:00:00 2001 From: abhishek Date: Tue, 7 Sep 2010 11:50:04 -0700 Subject: [PATCH 094/145] some code cleanup --- server/src/com/cloud/storage/StorageManagerImpl.java | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index de19c3ba8f6..552e13c1d44 100644 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -19,11 +19,7 @@ package com.cloud.storage; import java.net.URI; import java.net.UnknownHostException; -import java.text.DateFormat; -import java.text.ParseException; -import java.text.SimpleDateFormat; import java.util.ArrayList; -import java.util.Calendar; import java.util.Collections; import java.util.Date; import java.util.Enumeration; @@ -33,7 +29,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TimeZone; import java.util.UUID; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -94,9 +89,9 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceInUseException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.Host; -import com.cloud.host.Host.Type; import com.cloud.host.HostVO; import com.cloud.host.Status; +import com.cloud.host.Host.Type; import com.cloud.host.dao.DetailsDao; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; @@ -127,12 +122,9 @@ import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; import com.cloud.user.User; -import com.cloud.user.UserContext; -import com.cloud.user.UserVO; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; import com.cloud.uservm.UserVm; -import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.Adapters; From 673ed9821390cd99681f5dd87dbbefbf6ad247a8 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Tue, 7 Sep 2010 11:46:15 -0700 Subject: [PATCH 095/145] new UI - use new shared function todb() to escape and encode parameters in API command. --- ui/new/scripts/cloud.core.instance.js | 38 +++++++++++++-------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/ui/new/scripts/cloud.core.instance.js b/ui/new/scripts/cloud.core.instance.js index d44eb6bf51d..11ecdce7749 100755 --- a/ui/new/scripts/cloud.core.instance.js +++ b/ui/new/scripts/cloud.core.instance.js @@ -108,7 +108,7 @@ function clickInstanceGroupHeader($arrowIcon) { if (isos != null && isos.length > 0) { isoSelect.empty(); for (var i = 0; i < isos.length; i++) { - isoSelect.append("");; + isoSelect.append("");; } } } @@ -196,7 +196,7 @@ function clickInstanceGroupHeader($arrowIcon) { var name = trim(thisDialog.find("#change_instance_name").val()); for(var id in selectedItemIds) { - var apiCommand = "command=updateVirtualMachine&id="+id+"&displayName="+encodeURIComponent(escape(name)); + var apiCommand = "command=updateVirtualMachine&id="+id+"&displayName="+todb(name); doAction(id, $t, apiCommand, listAPIMap); } }, @@ -217,7 +217,7 @@ function clickInstanceGroupHeader($arrowIcon) { if (offerings != null && offerings.length > 0) { for (var i = 0; i < offerings.length; i++) { - var option = $("").data("name", sanitizeXSS(unescape(offerings[i].name))); + var option = $("").data("name", fromdb(offerings[i].name)); offeringSelect.append(option); } } @@ -264,7 +264,7 @@ function clickInstanceGroupHeader($arrowIcon) { var $midMenuItem = selectedItemIds[id]; var jsonObj = $midMenuItem.data("jsonObj"); var group = trim(thisDialog.find("#change_group_name").val()); - var apiCommand = "command=updateVirtualMachine&id="+id+"&group="+encodeURIComponent(group); + var apiCommand = "command=updateVirtualMachine&id="+id+"&group="+todb(group); doAction(id, $t, apiCommand, listAPIMap); } }, @@ -558,7 +558,7 @@ function clickInstanceGroupHeader($arrowIcon) { var $zoneSelect = $vmPopup.find("#wizard_zone").empty(); if (zones != null && zones.length > 0) { for (var i = 0; i < zones.length; i++) { - $zoneSelect.append(""); + $zoneSelect.append(""); } } listTemplatesInVmPopup(); @@ -582,15 +582,15 @@ function clickInstanceGroupHeader($arrowIcon) { var $t = $serviceOfferingTemplate.clone(); $t.find("input:radio[name=service_offering_radio]").val(offerings[i].id); - $t.find("#name").text(sanitizeXSS(unescape(offerings[i].name))); - $t.find("#description").text(sanitizeXSS(unescape(offerings[i].displaytext))); + $t.find("#name").text(fromdb(offerings[i].name)); + $t.find("#description").text(fromdb(offerings[i].displaytext)); if (i > 0) $t.find("input:radio[name=service_offering_radio]").removeAttr("checked"); //if(i == 0) // $t.find("input:radio[name=service_offering_radio]").attr("checked", true); - //var listItem = $("
  • "); + //var listItem = $("
  • "); $container.append($t.show()); } //Safari and Chrome are not smart enough to make checkbox checked if html markup is appended by JQuery.append(). So, the following 2 lines are added. @@ -628,8 +628,8 @@ function clickInstanceGroupHeader($arrowIcon) { for (var i = 0; i < offerings.length; i++) { var $t = $existingDiskOfferingTemplate.clone(); $t.find("input:radio").attr("name","data_disk_offering_radio").val(offerings[i].id).removeAttr("checked"); - $t.find("#name").text(sanitizeXSS(unescape(noNull(offerings[i].name)))); - $t.find("#description").text(sanitizeXSS(noNull(unescape(offerings[i].displaytext)))); + $t.find("#name").text(fromdb(noNull(offerings[i].name))); + $t.find("#description").text(fromdb(offerings[i].displaytext)); $dataDiskOfferingContainer.append($t.show()); } } @@ -655,8 +655,8 @@ function clickInstanceGroupHeader($arrowIcon) { $t.find("input:radio").attr("name","root_disk_offering_radio").val(offerings[i].id); if(i > 0) //default is the 1st existing disk offering. If there is no existing disk offering, default to "custom" radio button $t.find("input:radio").removeAttr("checked"); - $t.find("#name").text(sanitizeXSS(unescape(noNull(offerings[i].name)))); - $t.find("#description").text(sanitizeXSS(noNull(unescape(offerings[i].displaytext)))); + $t.find("#name").text(fromdb(offerings[i].name)); + $t.find("#description").text(fromdb(offerings[i].displaytext)); $rootDiskOfferingContainer.append($t.show()); } } @@ -684,14 +684,14 @@ function clickInstanceGroupHeader($arrowIcon) { var html = "
  • " +"" - +"" + +"" +"
  • "; $("#wizard_root_disk_offering").append(html); var html2 = "
  • " +"" - +"" + +"" +"
  • "; $("#wizard_data_disk_offering").append(html2); } @@ -843,8 +843,8 @@ function clickInstanceGroupHeader($arrowIcon) { var html = '
    ' +'
    ' - +'
    '+sanitizeXSS(items[i].displaytext)+'
    ' - +'
    '+sanitizeXSS(items[i].account)+'
    ' + +'
    '+fromdb(items[i].displaytext)+'
    ' + +'
    '+fromdb(items[i].account)+'
    ' +'
    '; container.append(html); } @@ -1067,11 +1067,11 @@ function clickInstanceGroupHeader($arrowIcon) { var name = trim($thisPopup.find("#wizard_vm_name").val()); if (name != null && name.length > 0) - moreCriteria.push("&displayname="+encodeURIComponent(name)); + moreCriteria.push("&displayname="+todb(name)); var group = trim($thisPopup.find("#wizard_vm_group").val()); if (group != null && group.length > 0) - moreCriteria.push("&group="+encodeURIComponent(group)); + moreCriteria.push("&group="+todb(group)); vmWizardClose(); @@ -1131,7 +1131,7 @@ function clickInstanceGroupHeader($arrowIcon) { // Failed $t.find("#vm_name").text("Adding failed"); $t.find("#info_icon").addClass("error").show(); - $t.data("afterActionInfo", ("Adding failed. Reason: " + sanitizeXSS(result.jobresult))); + $t.data("afterActionInfo", ("Adding failed. Reason: " + fromdb(result.jobresult))); $t.bind("click", function(event) { $rightPanelContent.find("#after_action_info").text($(this).data("afterActionInfo")); $rightPanelContent.find("#after_action_info_container").addClass("errorbox"); From 3bf19dcccbdc12f8886ddbe5417c98d934b86308 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Tue, 7 Sep 2010 12:02:53 -0700 Subject: [PATCH 096/145] new UI - apply detail header text. --- ui/new/jsp/tab_account.jsp | 8 ++++++++ ui/new/jsp/tab_alert.jsp | 8 ++++++++ ui/new/jsp/tab_event.jsp | 8 ++++++++ ui/new/jsp/tab_instance.jsp | 2 +- ui/new/jsp/tab_snapshot.jsp | 8 ++++++++ ui/new/jsp/tab_volume.jsp | 8 ++++++++ 6 files changed, 41 insertions(+), 1 deletion(-) diff --git a/ui/new/jsp/tab_account.jsp b/ui/new/jsp/tab_account.jsp index a4b8c117648..c64d2ae9301 100644 --- a/ui/new/jsp/tab_account.jsp +++ b/ui/new/jsp/tab_account.jsp @@ -12,6 +12,14 @@ %> +
    + +

    Account +

    +