diff --git a/agent/src/com/cloud/agent/AgentShell.java b/agent/src/com/cloud/agent/AgentShell.java index eac3e50d92c..0e020935e90 100644 --- a/agent/src/com/cloud/agent/AgentShell.java +++ b/agent/src/com/cloud/agent/AgentShell.java @@ -53,10 +53,7 @@ import com.cloud.utils.ProcessUtil; import com.cloud.utils.PropertiesUtil; import com.cloud.utils.backoff.BackoffAlgorithm; import com.cloud.utils.backoff.impl.ConstantTimeBackoff; -import com.cloud.utils.component.Adapters; -import com.cloud.utils.component.LegacyComponentLocator; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.net.MacAddress; import com.cloud.utils.script.Script; public class AgentShell implements IAgentShell { @@ -146,6 +143,7 @@ public class AgentShell implements IAgentShell { return _guid; } + @Override public Map getCmdLineProperties() { return _cmdLineProperties; } @@ -378,8 +376,6 @@ public class AgentShell implements IAgentShell { public void init(String[] args) throws ConfigurationException { - final LegacyComponentLocator locator = LegacyComponentLocator.getLocator("agent"); - final Class c = this.getClass(); _version = c.getPackage().getImplementationVersion(); if (_version == null) { @@ -396,12 +392,9 @@ public class AgentShell implements IAgentShell { s_logger.debug("Found property: " + property); } - _storage = locator.getManager(StorageComponent.class); - if (_storage == null) { - s_logger.info("Defaulting to using properties file for storage"); - _storage = new PropertiesStorage(); - _storage.configure("Storage", new HashMap()); - } + s_logger.info("Defaulting to using properties file for storage"); + _storage = new PropertiesStorage(); + _storage.configure("Storage", new HashMap()); // merge with properties from command line to let resource access // command line parameters @@ -410,22 +403,9 @@ public class AgentShell implements IAgentShell { _properties.put(cmdLineProp.getKey(), cmdLineProp.getValue()); } - final Adapters adapters = locator.getAdapters(BackoffAlgorithm.class); - final Enumeration en = adapters.enumeration(); - while (en.hasMoreElements()) { - _backoff = (BackoffAlgorithm) en.nextElement(); - break; - } - if (en.hasMoreElements()) { - s_logger.info("More than one backoff algorithm specified. Using the first one "); - } - - if (_backoff == null) { - s_logger.info("Defaulting to the constant time backoff algorithm"); - _backoff = new ConstantTimeBackoff(); - _backoff.configure("ConstantTimeBackoff", - new HashMap()); - } + s_logger.info("Defaulting to the constant time backoff algorithm"); + _backoff = new ConstantTimeBackoff(); + _backoff.configure("ConstantTimeBackoff", new HashMap()); } private void launchAgent() throws ConfigurationException { @@ -469,6 +449,7 @@ public class AgentShell implements IAgentShell { openPortWithIptables(port); _consoleProxyMain = new Thread(new Runnable() { + @Override public void run() { try { Class consoleProxyClazz = Class.forName("com.cloud.consoleproxy.ConsoleProxy"); @@ -522,7 +503,7 @@ public class AgentShell implements IAgentShell { } catch (final SecurityException e) { throw new ConfigurationException( "Security excetion when loading resource: " + name - + " due to: " + e.toString()); + + " due to: " + e.toString()); } catch (final NoSuchMethodException e) { throw new ConfigurationException( "Method not found excetion when loading resource: " @@ -534,7 +515,7 @@ public class AgentShell implements IAgentShell { } catch (final InstantiationException e) { throw new ConfigurationException( "Instantiation excetion when loading resource: " + name - + " due to: " + e.toString()); + + " due to: " + e.toString()); } catch (final IllegalAccessException e) { throw new ConfigurationException( "Illegal access exception when loading resource: " diff --git a/agent/src/com/cloud/agent/VmmAgentShell.java b/agent/src/com/cloud/agent/VmmAgentShell.java index ec2867940a2..190d1168284 100644 --- a/agent/src/com/cloud/agent/VmmAgentShell.java +++ b/agent/src/com/cloud/agent/VmmAgentShell.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; -import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -41,19 +40,15 @@ import com.cloud.agent.dao.impl.PropertiesStorage; import com.cloud.agent.transport.Request; import com.cloud.resource.ServerResource; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.ProcessUtil; import com.cloud.utils.PropertiesUtil; import com.cloud.utils.backoff.BackoffAlgorithm; import com.cloud.utils.backoff.impl.ConstantTimeBackoff; -import com.cloud.utils.component.Adapters; -import com.cloud.utils.component.LegacyComponentLocator; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.MacAddress; import com.cloud.utils.nio.HandlerFactory; import com.cloud.utils.nio.Link; import com.cloud.utils.nio.NioServer; import com.cloud.utils.nio.Task; -import com.cloud.utils.nio.Task.Type; /** * Implementation of agent shell to run the agents on System Center Virtual Machine manager @@ -61,7 +56,7 @@ import com.cloud.utils.nio.Task.Type; public class VmmAgentShell implements IAgentShell, HandlerFactory { - private static final Logger s_logger = Logger.getLogger(VmmAgentShell.class.getName()); + private static final Logger s_logger = Logger.getLogger(VmmAgentShell.class.getName()); private final Properties _properties = new Properties(); private final Map _cmdLineProperties = new HashMap(); private StorageComponent _storage; @@ -76,112 +71,112 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory { private int _proxyPort; private int _workers; private String _guid; - static private NioServer _connection; - static private int _listenerPort=9000; + static private NioServer _connection; + static private int _listenerPort=9000; private int _nextAgentId = 1; private volatile boolean _exit = false; private int _pingRetries; - private Thread _consoleProxyMain = null; + private final Thread _consoleProxyMain = null; private final List _agents = new ArrayList(); public VmmAgentShell() { } - + @Override public Properties getProperties() { - return _properties; + return _properties; } - + @Override public BackoffAlgorithm getBackoffAlgorithm() { - return _backoff; + return _backoff; } - + @Override public int getPingRetries() { - return _pingRetries; + return _pingRetries; } - + @Override public String getZone() { - return _zone; + return _zone; } - + @Override public String getPod() { - return _pod; + return _pod; } - + @Override public String getHost() { - return _host; + return _host; } - + @Override public String getPrivateIp() { - return _privateIp; + return _privateIp; } - + @Override public int getPort() { - return _port; + return _port; } - + @Override public int getProxyPort() { - return _proxyPort; + return _proxyPort; } - + @Override public int getWorkers() { - return _workers; + return _workers; } - + @Override public String getGuid() { - return _guid; + return _guid; } - @Override - public void upgradeAgent(String url) { - // TODO Auto-generated method stub - - } + @Override + public void upgradeAgent(String url) { + // TODO Auto-generated method stub - @Override + } + + @Override public String getVersion() { - return _version; + return _version; } - @Override - public Map getCmdLineProperties() { - // TODO Auto-generated method stub - return _cmdLineProperties; - } - - public String getProperty(String prefix, String name) { - if(prefix != null) - return _properties.getProperty(prefix + "." + name); - - return _properties.getProperty(name); + @Override + public Map getCmdLineProperties() { + // TODO Auto-generated method stub + return _cmdLineProperties; } - - @Override - public String getPersistentProperty(String prefix, String name) { - if(prefix != null) - return _storage.get(prefix + "." + name); - return _storage.get(name); - } - @Override - public void setPersistentProperty(String prefix, String name, String value) { - if(prefix != null) - _storage.persist(prefix + "." + name, value); - else - _storage.persist(name, value); - } + public String getProperty(String prefix, String name) { + if(prefix != null) + return _properties.getProperty(prefix + "." + name); - private void loadProperties() throws ConfigurationException { + return _properties.getProperty(name); + } + + @Override + public String getPersistentProperty(String prefix, String name) { + if(prefix != null) + return _storage.get(prefix + "." + name); + return _storage.get(name); + } + + @Override + public void setPersistentProperty(String prefix, String name, String value) { + if(prefix != null) + _storage.persist(prefix + "." + name, value); + else + _storage.persist(name, value); + } + + private void loadProperties() throws ConfigurationException { final File file = PropertiesUtil.findConfigFile("agent.properties"); if (file == null) { throw new ConfigurationException("Unable to find agent.properties."); @@ -197,7 +192,7 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory { throw new CloudRuntimeException("IOException in reading " + file.getAbsolutePath(), ex); } } - + protected boolean parseCommand(final String[] args) throws ConfigurationException { String host = null; String workers = null; @@ -211,7 +206,7 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory { System.out.println("Invalid Parameter: " + args[i]); continue; } - + // save command line properties _cmdLineProperties.put(tokens[0], tokens[1]); @@ -222,14 +217,14 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory { } else if (tokens[0].equalsIgnoreCase("host")) { host = tokens[1]; } else if(tokens[0].equalsIgnoreCase("zone")) { - zone = tokens[1]; + zone = tokens[1]; } else if(tokens[0].equalsIgnoreCase("pod")) { - pod = tokens[1]; + pod = tokens[1]; } else if(tokens[0].equalsIgnoreCase("guid")) { - guid = tokens[1]; - } else if(tokens[0].equalsIgnoreCase("eth1ip")) { - _privateIp = tokens[1]; - } + guid = tokens[1]; + } else if(tokens[0].equalsIgnoreCase("eth1ip")) { + _privateIp = tokens[1]; + } } if (port == null) { @@ -237,7 +232,7 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory { } _port = NumbersUtil.parseInt(port, 8250); - + _proxyPort = NumbersUtil.parseInt(getProperty(null, "consoleproxy.httpListenPort"), 443); if (workers == null) { @@ -254,42 +249,42 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory { host = "localhost"; } _host = host; - + if(zone != null) - _zone = zone; + _zone = zone; else - _zone = getProperty(null, "zone"); + _zone = getProperty(null, "zone"); if (_zone == null || (_zone.startsWith("@") && _zone.endsWith("@"))) { - _zone = "default"; + _zone = "default"; } if(pod != null) - _pod = pod; + _pod = pod; else - _pod = getProperty(null, "pod"); + _pod = getProperty(null, "pod"); if (_pod == null || (_pod.startsWith("@") && _pod.endsWith("@"))) { - _pod = "default"; + _pod = "default"; } if (_host == null || (_host.startsWith("@") && _host.endsWith("@"))) { throw new ConfigurationException("Host is not configured correctly: " + _host); } - + final String retries = getProperty(null, "ping.retries"); _pingRetries = NumbersUtil.parseInt(retries, 5); String value = getProperty(null, "developer"); boolean developer = Boolean.parseBoolean(value); - + if(guid != null) - _guid = guid; + _guid = guid; else - _guid = getProperty(null, "guid"); + _guid = getProperty(null, "guid"); if (_guid == null) { - if (!developer) { - throw new ConfigurationException("Unable to find the guid"); - } - _guid = MacAddress.getMacAddress().toString(":"); + if (!developer) { + throw new ConfigurationException("Unable to find the guid"); + } + _guid = MacAddress.getMacAddress().toString(":"); } return true; @@ -303,63 +298,46 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory { } s_logger.trace("Launching agent based on type=" + typeInfo); } - + private void launchAgent() throws ConfigurationException { String resourceClassNames = getProperty(null, "resource"); s_logger.trace("resource=" + resourceClassNames); if(resourceClassNames != null) { - launchAgentFromClassInfo(resourceClassNames); - return; + launchAgentFromClassInfo(resourceClassNames); + return; } - + launchAgentFromTypeInfo(); } - + private void init(String[] args) throws ConfigurationException{ - - final LegacyComponentLocator locator = LegacyComponentLocator.getLocator("agent"); - + final Class c = this.getClass(); _version = c.getPackage().getImplementationVersion(); if (_version == null) { throw new CloudRuntimeException("Unable to find the implementation version of this agent"); } s_logger.info("Implementation Version is " + _version); - + parseCommand(args); - - _storage = locator.getManager(StorageComponent.class); - if (_storage == null) { - s_logger.info("Defaulting to using properties file for storage"); - _storage = new PropertiesStorage(); - _storage.configure("Storage", new HashMap()); - } + + s_logger.info("Defaulting to using properties file for storage"); + _storage = new PropertiesStorage(); + _storage.configure("Storage", new HashMap()); // merge with properties from command line to let resource access command line parameters for(Map.Entry cmdLineProp : getCmdLineProperties().entrySet()) { - _properties.put(cmdLineProp.getKey(), cmdLineProp.getValue()); - } - - final Adapters adapters = locator.getAdapters(BackoffAlgorithm.class); - final Enumeration en = adapters.enumeration(); - while (en.hasMoreElements()) { - _backoff = (BackoffAlgorithm)en.nextElement(); - break; - } - if (en.hasMoreElements()) { - s_logger.info("More than one backoff algorithm specified. Using the first one "); + _properties.put(cmdLineProp.getKey(), cmdLineProp.getValue()); } - if (_backoff == null) { - s_logger.info("Defaulting to the constant time backoff algorithm"); - _backoff = new ConstantTimeBackoff(); - _backoff.configure("ConstantTimeBackoff", new HashMap()); - } + s_logger.info("Defaulting to the constant time backoff algorithm"); + _backoff = new ConstantTimeBackoff(); + _backoff.configure("ConstantTimeBackoff", new HashMap()); } private void launchAgentFromClassInfo(String resourceClassNames) throws ConfigurationException { - String[] names = resourceClassNames.split("\\|"); - for(String name: names) { + String[] names = resourceClassNames.split("\\|"); + for(String name: names) { Class impl; try { impl = Class.forName(name); @@ -368,41 +346,41 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory { ServerResource resource = (ServerResource)constructor.newInstance(); launchAgent(getNextAgentId(), resource); } catch (final ClassNotFoundException e) { - throw new ConfigurationException("Resource class not found: " + name); + throw new ConfigurationException("Resource class not found: " + name); } catch (final SecurityException e) { - throw new ConfigurationException("Security excetion when loading resource: " + name); + throw new ConfigurationException("Security excetion when loading resource: " + name); } catch (final NoSuchMethodException e) { - throw new ConfigurationException("Method not found excetion when loading resource: " + name); + throw new ConfigurationException("Method not found excetion when loading resource: " + name); } catch (final IllegalArgumentException e) { - throw new ConfigurationException("Illegal argument excetion when loading resource: " + name); + throw new ConfigurationException("Illegal argument excetion when loading resource: " + name); } catch (final InstantiationException e) { - throw new ConfigurationException("Instantiation excetion when loading resource: " + name); + throw new ConfigurationException("Instantiation excetion when loading resource: " + name); } catch (final IllegalAccessException e) { - throw new ConfigurationException("Illegal access exception when loading resource: " + name); + throw new ConfigurationException("Illegal access exception when loading resource: " + name); } catch (final InvocationTargetException e) { - throw new ConfigurationException("Invocation target exception when loading resource: " + name); + throw new ConfigurationException("Invocation target exception when loading resource: " + name); } - } + } } private void launchAgent(int localAgentId, ServerResource resource) throws ConfigurationException { - // we don't track agent after it is launched for now - Agent agent = new Agent(this, localAgentId, resource); - _agents.add(agent); - agent.start(); + // we don't track agent after it is launched for now + Agent agent = new Agent(this, localAgentId, resource); + _agents.add(agent); + agent.start(); } public synchronized int getNextAgentId() { - return _nextAgentId++; + return _nextAgentId++; } - - private void run(String[] args) { - - try { + + private void run(String[] args) { + + try { System.setProperty("java.net.preferIPv4Stack","true"); - loadProperties(); - init(args); - + loadProperties(); + init(args); + String instance = getProperty(null, "instance"); if (instance == null) { instance = ""; @@ -413,22 +391,22 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory { // TODO need to do this check. For Agentshell running on windows needs different approach //final String run = "agent." + instance + "pid"; //s_logger.debug("Checking to see if " + run + "exists."); - //ProcessUtil.pidCheck(run); - - + //ProcessUtil.pidCheck(run); + + // TODO: For Hyper-V agent.properties need to be revamped to support multiple agents // corresponding to multiple clusters but running on a SCVMM host - + // read the persistent storage and launch the agents - //launchAgent(); + //launchAgent(); // FIXME get rid of this approach of agent listening for boot strap commands from the management server - // now listen for bootstrap request from the management server and launch agents - _connection = new NioServer("VmmAgentShell", _listenerPort, 1, this); - _connection.start(); - s_logger.info("SCVMM agent is listening on port " +_listenerPort + " for bootstrap command from management server"); - while(_connection.isRunning()); + // now listen for bootstrap request from the management server and launch agents + _connection = new NioServer("VmmAgentShell", _listenerPort, 1, this); + _connection.start(); + s_logger.info("SCVMM agent is listening on port " +_listenerPort + " for bootstrap command from management server"); + while(_connection.isRunning()); } catch(final ConfigurationException e) { s_logger.error("Unable to start agent: " + e.getMessage()); System.out.println("Unable to start agent: " + e.getMessage()); @@ -438,89 +416,89 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory { System.out.println("Unable to start agent: " + e.getMessage()); System.exit(ExitStatus.Error.value()); } - } + } - @Override - public Task create(com.cloud.utils.nio.Task.Type type, Link link, - byte[] data) { - return new AgentBootStrapHandler(type, link, data); - } + @Override + public Task create(com.cloud.utils.nio.Task.Type type, Link link, + byte[] data) { + return new AgentBootStrapHandler(type, link, data); + } - public void stop() { - _exit = true; - if(_consoleProxyMain != null) { - _consoleProxyMain.interrupt(); - } - } - - public static void main(String[] args) { - - VmmAgentShell shell = new VmmAgentShell(); - Runtime.getRuntime().addShutdownHook(new ShutdownThread(shell)); - shell.run(args); - } + public void stop() { + _exit = true; + if(_consoleProxyMain != null) { + _consoleProxyMain.interrupt(); + } + } - // class to handle the bootstrap command from the management server - private class AgentBootStrapHandler extends Task { + public static void main(String[] args) { - public AgentBootStrapHandler(Task.Type type, Link link, byte[] data) { - super(type, link, data); - } + VmmAgentShell shell = new VmmAgentShell(); + Runtime.getRuntime().addShutdownHook(new ShutdownThread(shell)); + shell.run(args); + } - @Override - protected void doTask(Task task) throws Exception { - final Type type = task.getType(); - s_logger.info("recieved task of type "+ type.toString() +" to handle in BootStrapTakHandler"); - if (type == Task.Type.DATA) - { - final byte[] data = task.getData(); - final Request request = Request.parse(data); - final Command cmd = request.getCommand(); - - if (cmd instanceof StartupVMMAgentCommand) { + // class to handle the bootstrap command from the management server + private class AgentBootStrapHandler extends Task { - StartupVMMAgentCommand vmmCmd = (StartupVMMAgentCommand) cmd; + public AgentBootStrapHandler(Task.Type type, Link link, byte[] data) { + super(type, link, data); + } - _zone = Long.toString(vmmCmd.getDataCenter()); - _cmdLineProperties.put("zone", _zone); + @Override + protected void doTask(Task task) throws Exception { + final Type type = task.getType(); + s_logger.info("recieved task of type "+ type.toString() +" to handle in BootStrapTakHandler"); + if (type == Task.Type.DATA) + { + final byte[] data = task.getData(); + final Request request = Request.parse(data); + final Command cmd = request.getCommand(); - _pod = Long.toString(vmmCmd.getPod()); - _cmdLineProperties.put("pod", _pod); + if (cmd instanceof StartupVMMAgentCommand) { - _cluster = vmmCmd.getClusterName(); - _cmdLineProperties.put("cluster", _cluster); + StartupVMMAgentCommand vmmCmd = (StartupVMMAgentCommand) cmd; - _guid = vmmCmd.getGuid(); - _cmdLineProperties.put("guid", _guid); + _zone = Long.toString(vmmCmd.getDataCenter()); + _cmdLineProperties.put("zone", _zone); - _host = vmmCmd.getManagementServerIP(); - _port = NumbersUtil.parseInt(vmmCmd.getport(), 8250); + _pod = Long.toString(vmmCmd.getPod()); + _cmdLineProperties.put("pod", _pod); - s_logger.info("Recieved boot strap command from management server with parameters " + - " Zone:"+ _zone + " "+ - " Cluster:"+ _cluster + " "+ - " pod:"+_pod + " "+ - " host:"+ _host +" "+ - " port:"+_port); + _cluster = vmmCmd.getClusterName(); + _cmdLineProperties.put("cluster", _cluster); - launchAgentFromClassInfo("com.cloud.hypervisor.hyperv.resource.HypervResource"); - - // TODO: persist the info in agent.properties for agent restarts - } - } - } - } + _guid = vmmCmd.getGuid(); + _cmdLineProperties.put("guid", _guid); + + _host = vmmCmd.getManagementServerIP(); + _port = NumbersUtil.parseInt(vmmCmd.getport(), 8250); + + s_logger.info("Recieved boot strap command from management server with parameters " + + " Zone:"+ _zone + " "+ + " Cluster:"+ _cluster + " "+ + " pod:"+_pod + " "+ + " host:"+ _host +" "+ + " port:"+_port); + + launchAgentFromClassInfo("com.cloud.hypervisor.hyperv.resource.HypervResource"); + + // TODO: persist the info in agent.properties for agent restarts + } + } + } + } private static class ShutdownThread extends Thread { - VmmAgentShell _shell; + VmmAgentShell _shell; public ShutdownThread(VmmAgentShell shell) { this._shell = shell; } - + @Override public void run() { _shell.stop(); } } - + } \ No newline at end of file diff --git a/agent/src/com/cloud/agent/configuration/AgentComponentLibraryBase.java b/agent/src/com/cloud/agent/configuration/AgentComponentLibraryBase.java deleted file mode 100755 index 4ea101c3951..00000000000 --- a/agent/src/com/cloud/agent/configuration/AgentComponentLibraryBase.java +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.agent.configuration; - -import java.util.List; -import java.util.Map; - -import com.cloud.utils.component.Adapter; -import com.cloud.utils.component.ComponentLibraryBase; -import com.cloud.utils.component.LegacyComponentLocator.ComponentInfo; -import com.cloud.utils.component.Manager; -import com.cloud.utils.component.PluggableService; -import com.cloud.utils.db.GenericDao; - -public class AgentComponentLibraryBase extends ComponentLibraryBase { - @Override - public Map>> getDaos() { - return null; - } - - @Override - public Map> getManagers() { - if (_managers.size() == 0) { - populateManagers(); - } - return _managers; - } - - @Override - public Map>> getAdapters() { - if (_adapters.size() == 0) { - populateAdapters(); - } - return _adapters; - } - - @Override - public Map, Class> getFactories() { - return null; - } - - protected void populateManagers() { - // addManager("StackMaidManager", StackMaidManagerImpl.class); - } - - protected void populateAdapters() { - - } - - protected void populateServices() { - - } - - @Override - public Map> getPluggableServices() { - if (_pluggableServices.size() == 0) { - populateServices(); - } - return _pluggableServices; - } - -} diff --git a/agent/test/com/cloud/agent/TestAgentShell.java b/agent/test/com/cloud/agent/TestAgentShell.java index d7210acbef3..0e9be0f1312 100644 --- a/agent/test/com/cloud/agent/TestAgentShell.java +++ b/agent/test/com/cloud/agent/TestAgentShell.java @@ -19,24 +19,23 @@ package com.cloud.agent; import java.io.File; import java.io.IOException; +import junit.framework.TestCase; + import org.apache.log4j.Logger; -import com.cloud.agent.AgentShell; -import com.cloud.utils.testcase.Log4jEnabledTestCase; - -public class TestAgentShell extends Log4jEnabledTestCase { +public class TestAgentShell extends TestCase { protected final static Logger s_logger = Logger.getLogger(TestAgentShell.class); - + public void testWget() { File file = null; try { file = File.createTempFile("wget", ".html"); AgentShell.wget("http://www.google.com/", file); - + if (s_logger.isDebugEnabled()) { s_logger.debug("file saved to " + file.getAbsolutePath()); } - + } catch (final IOException e) { s_logger.warn("Exception while downloading agent update package, ", e); } diff --git a/api/src/com/cloud/api/commands/CreatePrivateNetworkCmd.java b/api/src/com/cloud/api/commands/CreatePrivateNetworkCmd.java index 92c7ac58be0..263f023b3e5 100644 --- a/api/src/com/cloud/api/commands/CreatePrivateNetworkCmd.java +++ b/api/src/com/cloud/api/commands/CreatePrivateNetworkCmd.java @@ -31,7 +31,7 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.network.Network; import com.cloud.user.UserContext; -@APICommand(description="Creates a private network", responseObject=NetworkResponse.class) +//@APICommand(description="Creates a private network", responseObject=NetworkResponse.class) public class CreatePrivateNetworkCmd extends BaseAsyncCreateCmd { public static final Logger s_logger = Logger.getLogger(CreatePrivateNetworkCmd.class.getName()); @@ -153,6 +153,7 @@ public class CreatePrivateNetworkCmd extends BaseAsyncCreateCmd { if (result != null) { this.setEntityId(result.getId()); + this.setEntityUuid(result.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create a Private network"); } @@ -190,8 +191,4 @@ public class CreatePrivateNetworkCmd extends BaseAsyncCreateCmd { } - @Override - public String getEntityTable() { - return "networks"; - } } diff --git a/api/src/com/cloud/api/commands/DestroyConsoleProxyCmd.java b/api/src/com/cloud/api/commands/DestroyConsoleProxyCmd.java index 17bafb1918d..80269075744 100644 --- a/api/src/com/cloud/api/commands/DestroyConsoleProxyCmd.java +++ b/api/src/com/cloud/api/commands/DestroyConsoleProxyCmd.java @@ -25,7 +25,7 @@ import com.cloud.event.EventTypes; import com.cloud.user.Account; import com.cloud.user.UserContext; -@APICommand(description="Destroys console proxy", responseObject=SuccessResponse.class) +//@APICommand(description="Destroys console proxy", responseObject=SuccessResponse.class) public class DestroyConsoleProxyCmd extends BaseAsyncCmd { public static final Logger s_logger = Logger.getLogger(DestroyConsoleProxyCmd.class.getName()); diff --git a/api/src/com/cloud/api/commands/ListRecurringSnapshotScheduleCmd.java b/api/src/com/cloud/api/commands/ListRecurringSnapshotScheduleCmd.java index 41f28f93110..709da6af30c 100644 --- a/api/src/com/cloud/api/commands/ListRecurringSnapshotScheduleCmd.java +++ b/api/src/com/cloud/api/commands/ListRecurringSnapshotScheduleCmd.java @@ -27,7 +27,7 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.SnapshotScheduleResponse; import com.cloud.storage.snapshot.SnapshotSchedule; -@APICommand(description="Lists recurring snapshot schedule", responseObject=SnapshotScheduleResponse.class) +//@APICommand(description="Lists recurring snapshot schedule", responseObject=SnapshotScheduleResponse.class) public class ListRecurringSnapshotScheduleCmd extends BaseListCmd { private static final String s_name = "listrecurringsnapshotscheduleresponse"; diff --git a/api/src/com/cloud/exception/CloudException.java b/api/src/com/cloud/exception/CloudException.java index fd839565253..036cb1b8adc 100644 --- a/api/src/com/cloud/exception/CloudException.java +++ b/api/src/com/cloud/exception/CloudException.java @@ -16,10 +16,8 @@ // under the License. package com.cloud.exception; -import com.cloud.utils.IdentityProxy; import java.util.ArrayList; import com.cloud.utils.exception.CSExceptionErrorCode; -import com.cloud.utils.AnnotationHelper; /** * by the API response serializer. Any exceptions that are thrown by @@ -56,6 +54,7 @@ public class CloudException extends Exception { return; } + public ArrayList getIdProxyList() { return idList; } diff --git a/api/src/com/cloud/network/NetworkService.java b/api/src/com/cloud/network/NetworkService.java index d5841a4692e..39a746e6776 100755 --- a/api/src/com/cloud/network/NetworkService.java +++ b/api/src/com/cloud/network/NetworkService.java @@ -60,6 +60,8 @@ public interface NetworkService { Network getNetwork(long networkId); + Network getNetwork(String networkUuid); + IpAddress getIp(long id); NetworkProfile convertNetworkToNetworkProfile(long networkId); diff --git a/api/src/com/cloud/user/AccountService.java b/api/src/com/cloud/user/AccountService.java index ce16f5ee063..9f5f4d225e0 100755 --- a/api/src/com/cloud/user/AccountService.java +++ b/api/src/com/cloud/user/AccountService.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; @@ -193,6 +194,8 @@ public interface AccountService { UserAccount getUserByApiKey(String apiKey); + RoleType getRoleType(Account account); + void checkAccess(Account account, Domain domain) throws PermissionDeniedException; void checkAccess(Account account, AccessType accessType, boolean sameOwner, ControlledEntity... entities) throws PermissionDeniedException; diff --git a/api/src/com/cloud/user/DomainService.java b/api/src/com/cloud/user/DomainService.java index 6fbe1b9a8db..cd20060b710 100644 --- a/api/src/com/cloud/user/DomainService.java +++ b/api/src/com/cloud/user/DomainService.java @@ -30,6 +30,8 @@ public interface DomainService { Domain getDomain(long id); + Domain getDomain(String uuid); + /** * Return whether a domain is a child domain of a given domain. * diff --git a/api/src/com/cloud/user/UserContext.java b/api/src/com/cloud/user/UserContext.java index 786590eed09..54c01347097 100644 --- a/api/src/com/cloud/user/UserContext.java +++ b/api/src/com/cloud/user/UserContext.java @@ -16,13 +16,10 @@ // under the License. package com.cloud.user; -import com.cloud.server.ManagementService; -import com.cloud.utils.component.ComponentLocator; - public class UserContext { private static ThreadLocal s_currentContext = new ThreadLocal(); - + private long userId; private String sessionId; private Account account; @@ -82,7 +79,7 @@ public class UserContext { // however, there are many places that run background jobs assume the system context. // // If there is a security concern, all entry points from user (including the front end that takes HTTP - // request in and + // request in and // the core async-job manager that runs commands from user) have explicitly setup the UserContext. // return UserContextInitializer.getInstance().getAdminContext(); diff --git a/api/src/org/apache/cloudstack/acl/APIAccessChecker.java b/api/src/org/apache/cloudstack/acl/APIChecker.java similarity index 71% rename from api/src/org/apache/cloudstack/acl/APIAccessChecker.java rename to api/src/org/apache/cloudstack/acl/APIChecker.java index 3194bd11d17..b14dfe101ba 100644 --- a/api/src/org/apache/cloudstack/acl/APIAccessChecker.java +++ b/api/src/org/apache/cloudstack/acl/APIChecker.java @@ -16,17 +16,12 @@ // under the License. package org.apache.cloudstack.acl; -import java.util.Properties; - import com.cloud.exception.PermissionDeniedException; -import com.cloud.user.Account; -import com.cloud.user.User; +import org.apache.cloudstack.acl.RoleType; import com.cloud.utils.component.Adapter; -/** - * APIAccessChecker checks the ownership and access control to API requests - */ -public interface APIAccessChecker extends Adapter { - // Interface for checking access to an API for an user - boolean canAccessAPI(User user, String apiCommandName) throws PermissionDeniedException; +// APIChecker checks the ownership and access control to API requests +public interface APIChecker extends Adapter { + // Interface for checking access for a role using apiname + boolean checkAccess(RoleType roleType, String apiCommandName) throws PermissionDeniedException; } diff --git a/utils/src/com/cloud/utils/component/Inject.java b/api/src/org/apache/cloudstack/acl/RoleType.java similarity index 60% rename from utils/src/com/cloud/utils/component/Inject.java rename to api/src/org/apache/cloudstack/acl/RoleType.java index 50c890da75b..0d1c4460c1e 100644 --- a/utils/src/com/cloud/utils/component/Inject.java +++ b/api/src/org/apache/cloudstack/acl/RoleType.java @@ -4,9 +4,9 @@ // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at +// with the License. You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an @@ -14,17 +14,24 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.utils.component; +package org.apache.cloudstack.acl; -import static java.lang.annotation.ElementType.FIELD; -import static java.lang.annotation.RetentionPolicy.RUNTIME; +// Enum for default roles in CloudStack +public enum RoleType { -import java.lang.annotation.Retention; -import java.lang.annotation.Target; + Admin(1), + ResourceAdmin(2), + DomainAdmin(4), + User(8), + Unknown(0); -@Target(FIELD) -@Retention(RUNTIME) -public @interface Inject { - Class adapter() default Adapter.class; + private int mask; + + private RoleType(int mask) { + this.mask = mask; + } + + public int getValue() { + return mask; + } } - diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index b4ce24c2bc9..d3bfcd66afc 100644 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -158,6 +158,7 @@ public class ApiConstants { public static final String RECEIVED_BYTES = "receivedbytes"; public static final String REQUIRES_HVM = "requireshvm"; public static final String RESOURCE_TYPE = "resourcetype"; + public static final String RESPONSE = "response"; public static final String QUERY_FILTER = "queryfilter"; public static final String SCHEDULE = "schedule"; public static final String SCOPE = "scope"; diff --git a/api/src/org/apache/cloudstack/api/BaseAsyncCreateCmd.java b/api/src/org/apache/cloudstack/api/BaseAsyncCreateCmd.java index ad9f4c6b31f..1f2d3f17beb 100644 --- a/api/src/org/apache/cloudstack/api/BaseAsyncCreateCmd.java +++ b/api/src/org/apache/cloudstack/api/BaseAsyncCreateCmd.java @@ -25,6 +25,8 @@ public abstract class BaseAsyncCreateCmd extends BaseAsyncCmd { @Parameter(name = "id", type = CommandType.LONG) private Long id; + private String uuid; + public abstract void create() throws ResourceAllocationException; public Long getEntityId() { @@ -35,14 +37,19 @@ public abstract class BaseAsyncCreateCmd extends BaseAsyncCmd { this.id = id; } - public abstract String getEntityTable(); + public String getEntityUuid() { + return uuid; + } - public String getResponse(long jobId, long objectId, String objectEntityTable) { + public void setEntityUuid(String uuid) { + this.uuid = uuid; + } + + public String getResponse(long jobId, String objectUuid) { CreateCmdResponse response = new CreateCmdResponse(); AsyncJob job = _entityMgr.findById(AsyncJob.class, jobId); response.setJobId(job.getUuid()); - response.setId(objectId); - response.setIdEntityTable(objectEntityTable); + response.setId(objectUuid); response.setResponseName(getCommandName()); return _responseGenerator.toSerializedString(response, getResponseType()); } diff --git a/api/src/org/apache/cloudstack/api/BaseCmd.java b/api/src/org/apache/cloudstack/api/BaseCmd.java index fbbee50b578..ba2c4d21a50 100644 --- a/api/src/org/apache/cloudstack/api/BaseCmd.java +++ b/api/src/org/apache/cloudstack/api/BaseCmd.java @@ -19,7 +19,6 @@ package org.apache.cloudstack.api; import java.text.DateFormat; import java.text.SimpleDateFormat; -import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -64,10 +63,8 @@ import com.cloud.user.Account; import com.cloud.user.AccountService; import com.cloud.user.DomainService; import com.cloud.user.ResourceLimitService; -import com.cloud.utils.IdentityProxy; import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentContext; -import com.cloud.utils.component.ComponentLocator; import com.cloud.vm.BareMetalVmService; import com.cloud.vm.UserVmService; @@ -112,7 +109,6 @@ public abstract class BaseCmd { @Parameter(name = "response", type = CommandType.STRING) private String responseType; - public static ComponentLocator s_locator; public static ConfigurationService _configService; public static AccountService _accountService; public static UserVmService _userVmService; @@ -146,37 +142,47 @@ public abstract class BaseCmd { public static QueryService _queryService; public static void setComponents(ResponseGenerator generator) { - ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name); - _mgr = (ManagementService) ComponentLocator.getComponent(ManagementService.Name); - _accountService = locator.getManager(AccountService.class); - _configService = locator.getManager(ConfigurationService.class); - _userVmService = locator.getManager(UserVmService.class); - _storageService = locator.getManager(StorageService.class); - _resourceService = locator.getManager(ResourceService.class); - _networkService = locator.getManager(NetworkService.class); - _templateService = locator.getManager(TemplateService.class); - _securityGroupService = locator.getManager(SecurityGroupService.class); - _snapshotService = locator.getManager(SnapshotService.class); - _consoleProxyService = locator.getManager(ConsoleProxyService.class); - _routerService = locator.getManager(VpcVirtualNetworkApplianceService.class); - _entityMgr = locator.getManager(EntityManager.class); - _rulesService = locator.getManager(RulesService.class); - _lbService = locator.getManager(LoadBalancingRulesService.class); - _autoScaleService = locator.getManager(AutoScaleService.class); - _ravService = locator.getManager(RemoteAccessVpnService.class); + _mgr = ComponentContext.getComponent(ManagementService.class); + _accountService = ComponentContext.getComponent(AccountService.class); + _configService = ComponentContext.getComponent(ConfigurationService.class); + + _userVmService = ComponentContext.getComponent(UserVmService.class); + + // TODO, ugly and will change soon + // + Map svmServices = ComponentContext.getComponentsOfType(UserVmService.class); + _userVmService = svmServices.get("BareMetalVmManagerImpl"); + + _storageService = ComponentContext.getComponent(StorageService.class); + _resourceService = ComponentContext.getComponent(ResourceService.class); + + _networkService = ComponentContext.getComponent(NetworkService.class); + _templateService = ComponentContext.getComponent(TemplateService.class); + + // TODO, will change to looking for primary component + // ugly binding to a specific implementation + Map _sgServices = ComponentContext.getComponentsOfType(SecurityGroupService.class); + _securityGroupService = _sgServices.get("SecurityGroupManagerImpl2"); + + _snapshotService = ComponentContext.getComponent(SnapshotService.class); + _consoleProxyService = ComponentContext.getComponent(ConsoleProxyService.class); + _routerService = ComponentContext.getComponent(VpcVirtualNetworkApplianceService.class); + _entityMgr = ComponentContext.getComponent(EntityManager.class); + _rulesService = ComponentContext.getComponent(RulesService.class); + _lbService = ComponentContext.getComponent(LoadBalancingRulesService.class); + _ravService = ComponentContext.getComponent(RemoteAccessVpnService.class); _responseGenerator = generator; - _bareMetalVmService = locator.getManager(BareMetalVmService.class); - _projectService = locator.getManager(ProjectService.class); - _firewallService = locator.getManager(FirewallService.class); - _domainService = locator.getManager(DomainService.class); - _resourceLimitService = locator.getManager(ResourceLimitService.class); - _identityService = locator.getManager(IdentityService.class); - _storageNetworkService = locator.getManager(StorageNetworkService.class); - _taggedResourceService = locator.getManager(TaggedResourceService.class); - _vpcService = locator.getManager(VpcService.class); - _networkACLService = locator.getManager(NetworkACLService.class); - _s2sVpnService = locator.getManager(Site2SiteVpnService.class); - _queryService = locator.getManager(QueryService.class); + _bareMetalVmService = ComponentContext.getComponent(BareMetalVmService.class); + _projectService = ComponentContext.getComponent(ProjectService.class); + _firewallService = ComponentContext.getComponent(FirewallService.class); + _domainService = ComponentContext.getComponent(DomainService.class); + _resourceLimitService = ComponentContext.getComponent(ResourceLimitService.class); + _identityService = ComponentContext.getComponent(IdentityService.class); + _storageNetworkService = ComponentContext.getComponent(StorageNetworkService.class); + _taggedResourceService = ComponentContext.getComponent(TaggedResourceService.class); + _vpcService = ComponentContext.getComponent(VpcService.class); + _networkACLService = ComponentContext.getComponent(NetworkACLService.class); + _s2sVpnService = ComponentContext.getComponent(Site2SiteVpnService.class); } public abstract void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException; @@ -211,9 +217,9 @@ public abstract class BaseCmd { } public ManagementService getMgmtServiceRef() { - return _mgr; + return _mgr; } - + public static String getDateString(Date date) { if (date == null) { return ""; @@ -526,8 +532,8 @@ public abstract class BaseCmd { if (!enabledOnly || project.getState() == Project.State.Active) { return project.getProjectAccountId(); } else { - PermissionDeniedException ex = new PermissionDeniedException("Can't add resources to the project with specified projectId in state=" + project.getState() + " as it's no longer active"); - ex.addProxyObject(project, projectId, "projectId"); + PermissionDeniedException ex = new PermissionDeniedException("Can't add resources to the project with specified projectId in state=" + project.getState() + " as it's no longer active"); + ex.addProxyObject(project, projectId, "projectId"); throw ex; } } else { diff --git a/api/src/org/apache/cloudstack/api/BaseResponse.java b/api/src/org/apache/cloudstack/api/BaseResponse.java index 28ca6b8c2de..01f1be3253b 100644 --- a/api/src/org/apache/cloudstack/api/BaseResponse.java +++ b/api/src/org/apache/cloudstack/api/BaseResponse.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api; import org.apache.cloudstack.api.ApiConstants; -import com.cloud.utils.IdentityProxy; import org.apache.cloudstack.api.ResponseObject; import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; @@ -46,6 +45,7 @@ public abstract class BaseResponse implements ResponseObject { this.objectName = objectName; } + @Override public String getObjectId() { return null; } @@ -56,18 +56,22 @@ public abstract class BaseResponse implements ResponseObject { @SerializedName(ApiConstants.JOB_STATUS) @Param(description="the current status of the latest async job acting on this object") private Integer jobStatus; + @Override public String getJobId() { return jobId; } + @Override public void setJobId(String jobId) { this.jobId = jobId; } + @Override public Integer getJobStatus() { return jobStatus; } + @Override public void setJobStatus(Integer jobStatus) { this.jobStatus = jobStatus; } diff --git a/api/src/org/apache/cloudstack/api/ResponseGenerator.java b/api/src/org/apache/cloudstack/api/ResponseGenerator.java index e9f988ade60..63df4dc5532 100644 --- a/api/src/org/apache/cloudstack/api/ResponseGenerator.java +++ b/api/src/org/apache/cloudstack/api/ResponseGenerator.java @@ -314,13 +314,6 @@ public interface ResponseGenerator { StorageNetworkIpRangeResponse createStorageNetworkIpRangeResponse(StorageNetworkIpRange result); - /** - * @param tableName TODO - * @param token - * @return - */ - Long getIdentiyId(String tableName, String token); - /** * @param resourceTag * @param keyValueOnly TODO diff --git a/api/src/org/apache/cloudstack/api/command/admin/autoscale/CreateCounterCmd.java b/api/src/org/apache/cloudstack/api/command/admin/autoscale/CreateCounterCmd.java index 7369a6f6d08..a119d0f44bf 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/autoscale/CreateCounterCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/autoscale/CreateCounterCmd.java @@ -81,6 +81,7 @@ public class CreateCounterCmd extends BaseAsyncCreateCmd { if (ctr != null) { this.setEntityId(ctr.getId()); + this.setEntityUuid(ctr.getUuid()); CounterResponse response = _responseGenerator.createCounterResponse(ctr); response.setResponseName(getCommandName()); this.setResponseObject(response); @@ -113,8 +114,5 @@ public class CreateCounterCmd extends BaseAsyncCreateCmd { return Account.ACCOUNT_ID_SYSTEM; } - @Override - public String getEntityTable() { - return "counter"; - } + } diff --git a/api/src/org/apache/cloudstack/api/command/admin/network/AddNetworkDeviceCmd.java b/api/src/org/apache/cloudstack/api/command/admin/network/AddNetworkDeviceCmd.java index 3e1d74df405..793c982d0f1 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/network/AddNetworkDeviceCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/network/AddNetworkDeviceCmd.java @@ -18,23 +18,23 @@ package org.apache.cloudstack.api.command.admin.network; import java.util.Map; -import org.apache.log4j.Logger; +import javax.inject.Inject; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; -import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.NetworkDeviceResponse; +import org.apache.cloudstack.network.ExternalNetworkDeviceManager; +import org.apache.log4j.Logger; + import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; -import org.apache.cloudstack.network.ExternalNetworkDeviceManager; -import com.cloud.server.ManagementService; -import org.apache.cloudstack.api.response.NetworkDeviceResponse; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.exception.CloudRuntimeException; @APICommand(name = "addNetworkDevice", description="Adds a network device of one of the following types: ExternalDhcp, ExternalFirewall, ExternalLoadBalancer, PxeServer", responseObject = NetworkDeviceResponse.class) @@ -46,6 +46,7 @@ public class AddNetworkDeviceCmd extends BaseCmd { // ////////////// API parameters ///////////////////// // /////////////////////////////////////////////////// + @Inject ExternalNetworkDeviceManager nwDeviceMgr; @Parameter(name = ApiConstants.NETWORK_DEVICE_TYPE, type = CommandType.STRING, description = "Network device type, now supports ExternalDhcp, PxeServer, NetscalerMPXLoadBalancer, NetscalerVPXLoadBalancer, NetscalerSDXLoadBalancer, F5BigIpLoadBalancer, JuniperSRXFirewall") private String type; @@ -63,11 +64,8 @@ public class AddNetworkDeviceCmd extends BaseCmd { @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, - ResourceAllocationException { + ResourceAllocationException { try { - ExternalNetworkDeviceManager nwDeviceMgr; - ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name); - nwDeviceMgr = locator.getManager(ExternalNetworkDeviceManager.class); Host device = nwDeviceMgr.addNetworkDevice(this); NetworkDeviceResponse response = nwDeviceMgr.getApiResponse(device); response.setObjectName("networkdevice"); diff --git a/api/src/org/apache/cloudstack/api/command/admin/network/AddNetworkServiceProviderCmd.java b/api/src/org/apache/cloudstack/api/command/admin/network/AddNetworkServiceProviderCmd.java index b6518d8eb59..6d4b962d4a1 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/network/AddNetworkServiceProviderCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/network/AddNetworkServiceProviderCmd.java @@ -59,10 +59,6 @@ public class AddNetworkServiceProviderCmd extends BaseAsyncCreateCmd { @Parameter(name=ApiConstants.SERVICE_LIST, type=CommandType.LIST, collectionType = CommandType.STRING, description="the list of services to be enabled for this physical network service provider") private List enabledServices; - @Override - public String getEntityTable() { - return "physical_network_service_providers"; - } ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -116,6 +112,7 @@ public class AddNetworkServiceProviderCmd extends BaseAsyncCreateCmd { PhysicalNetworkServiceProvider result = _networkService.addProviderToPhysicalNetwork(getPhysicalNetworkId(), getProviderName(), getDestinationPhysicalNetworkId(), getEnabledServices()); if (result != null) { setEntityId(result.getId()); + setEntityUuid(result.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to add service provider entity to physical network"); } diff --git a/api/src/org/apache/cloudstack/api/command/admin/network/CreatePhysicalNetworkCmd.java b/api/src/org/apache/cloudstack/api/command/admin/network/CreatePhysicalNetworkCmd.java index dd3f3231351..f56ae7dbf50 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/network/CreatePhysicalNetworkCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/network/CreatePhysicalNetworkCmd.java @@ -79,10 +79,6 @@ public class CreatePhysicalNetworkCmd extends BaseAsyncCreateCmd { return tags; } - @Override - public String getEntityTable() { - return "physical_network"; - } public Long getZoneId() { return zoneId; @@ -164,6 +160,7 @@ public class CreatePhysicalNetworkCmd extends BaseAsyncCreateCmd { PhysicalNetwork result = _networkService.createPhysicalNetwork(getZoneId(),getVlan(),getNetworkSpeed(), getIsolationMethods(),getBroadcastDomainRange(),getDomainId(), getTags(), getNetworkName()); if (result != null) { setEntityId(result.getId()); + setEntityUuid(result.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create physical network entity"); } diff --git a/api/src/org/apache/cloudstack/api/command/admin/network/DeleteNetworkDeviceCmd.java b/api/src/org/apache/cloudstack/api/command/admin/network/DeleteNetworkDeviceCmd.java index 09451242daf..38971c1f3a9 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/network/DeleteNetworkDeviceCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/network/DeleteNetworkDeviceCmd.java @@ -16,23 +16,23 @@ // under the License. package org.apache.cloudstack.api.command.admin.network; -import org.apache.log4j.Logger; +import javax.inject.Inject; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; -import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.network.ExternalNetworkDeviceManager; import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.network.ExternalNetworkDeviceManager; +import org.apache.log4j.Logger; + import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.server.ManagementService; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.exception.CloudRuntimeException; @APICommand(name = "deleteNetworkDevice", description="Deletes network device.", responseObject=SuccessResponse.class) @@ -40,6 +40,8 @@ public class DeleteNetworkDeviceCmd extends BaseCmd { public static final Logger s_logger = Logger.getLogger(DeleteNetworkDeviceCmd.class); private static final String s_name = "deletenetworkdeviceresponse"; + @Inject ExternalNetworkDeviceManager nwDeviceMgr; + ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// @@ -54,11 +56,8 @@ public class DeleteNetworkDeviceCmd extends BaseCmd { @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, - ResourceAllocationException { + ResourceAllocationException { try { - ExternalNetworkDeviceManager nwDeviceMgr; - ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name); - nwDeviceMgr = locator.getManager(ExternalNetworkDeviceManager.class); boolean result = nwDeviceMgr.deleteNetworkDevice(this); if (result) { SuccessResponse response = new SuccessResponse(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/network/ListNetworkDeviceCmd.java b/api/src/org/apache/cloudstack/api/command/admin/network/ListNetworkDeviceCmd.java index 742ff1f74af..04a26af0781 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/network/ListNetworkDeviceCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/network/ListNetworkDeviceCmd.java @@ -20,25 +20,25 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; +import javax.inject.Inject; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseListCmd; -import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.network.ExternalNetworkDeviceManager; -import org.apache.cloudstack.api.response.NetworkDeviceResponse; import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.NetworkDeviceResponse; +import org.apache.cloudstack.network.ExternalNetworkDeviceManager; +import org.apache.log4j.Logger; + import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; -import com.cloud.server.ManagementService; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.exception.CloudRuntimeException; @APICommand(name = "listNetworkDevice", description="List network devices", responseObject = NetworkDeviceResponse.class) @@ -46,6 +46,7 @@ public class ListNetworkDeviceCmd extends BaseListCmd { public static final Logger s_logger = Logger.getLogger(ListNetworkDeviceCmd.class); private static final String s_name = "listnetworkdevice"; + @Inject ExternalNetworkDeviceManager nwDeviceMgr; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// @@ -66,11 +67,8 @@ public class ListNetworkDeviceCmd extends BaseListCmd { @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, - ResourceAllocationException { + ResourceAllocationException { try { - ExternalNetworkDeviceManager nwDeviceMgr; - ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name); - nwDeviceMgr = locator.getManager(ExternalNetworkDeviceManager.class); List devices = nwDeviceMgr.listNetworkDevice(this); List nwdeviceResponses = new ArrayList(); ListResponse listResponse = new ListResponse(); diff --git a/api/src/org/apache/cloudstack/api/command/admin/router/CreateVirtualRouterElementCmd.java b/api/src/org/apache/cloudstack/api/command/admin/router/CreateVirtualRouterElementCmd.java index 545218f0364..f6a7b744ca3 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/router/CreateVirtualRouterElementCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/router/CreateVirtualRouterElementCmd.java @@ -53,10 +53,7 @@ public class CreateVirtualRouterElementCmd extends BaseAsyncCreateCmd { this.nspId = nspId; } - @Override - public String getEntityTable() { - return "virtual_router_providers"; - } + public Long getNspId() { return nspId; @@ -94,6 +91,7 @@ public class CreateVirtualRouterElementCmd extends BaseAsyncCreateCmd { VirtualRouterProvider result = _service.addElement(getNspId(), VirtualRouterProviderType.VirtualRouter); if (result != null) { setEntityId(result.getId()); + setEntityUuid(result.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to add Virtual Router entity to physical network"); } diff --git a/api/src/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java b/api/src/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java index 1759ff7e8e6..5dca9d2d4c1 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java @@ -66,10 +66,6 @@ public class AddTrafficTypeCmd extends BaseAsyncCreateCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - @Override - public String getEntityTable() { - return "physical_network_traffic_types"; - } public Long getPhysicalNetworkId() { return physicalNetworkId; @@ -136,6 +132,7 @@ public class AddTrafficTypeCmd extends BaseAsyncCreateCmd { PhysicalNetworkTrafficType result = _networkService.addTrafficTypeToPhysicalNetwork(getPhysicalNetworkId(), getTrafficType(), getXenLabel(), getKvmLabel(), getVmwareLabel(), getSimulatorLabel(), getVlan()); if (result != null) { setEntityId(result.getId()); + setEntityUuid(result.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to add traffic type to physical network"); } diff --git a/api/src/org/apache/cloudstack/api/command/admin/vpc/CreatePrivateGatewayCmd.java b/api/src/org/apache/cloudstack/api/command/admin/vpc/CreatePrivateGatewayCmd.java index 7950b877cec..5bb76ab034b 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/vpc/CreatePrivateGatewayCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/vpc/CreatePrivateGatewayCmd.java @@ -123,6 +123,7 @@ public class CreatePrivateGatewayCmd extends BaseAsyncCreateCmd { if (result != null) { this.setEntityId(result.getId()); + this.setEntityUuid(result.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create private gateway"); } @@ -156,10 +157,6 @@ public class CreatePrivateGatewayCmd extends BaseAsyncCreateCmd { return "creating private gateway"; } - @Override - public String getEntityTable() { - return "vpc_gateways"; - } @Override diff --git a/api/src/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java index a0abe99f826..273f7c05233 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java @@ -70,6 +70,7 @@ public class CreateVPCOfferingCmd extends BaseAsyncCreateCmd{ VpcOffering vpcOff = _vpcService.createVpcOffering(getVpcOfferingName(), getDisplayText(), getSupportedServices()); if (vpcOff != null) { this.setEntityId(vpcOff.getId()); + this.setEntityUuid(vpcOff.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create a VPC offering"); } @@ -87,10 +88,6 @@ public class CreateVPCOfferingCmd extends BaseAsyncCreateCmd{ } } - @Override - public String getEntityTable() { - return "vpc_offerings"; - } @Override public String getEventType() { diff --git a/api/src/org/apache/cloudstack/api/command/user/address/AssociateIPAddrCmd.java b/api/src/org/apache/cloudstack/api/command/user/address/AssociateIPAddrCmd.java index 7d4e44bb507..024ba74e8b4 100644 --- a/api/src/org/apache/cloudstack/api/command/user/address/AssociateIPAddrCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/address/AssociateIPAddrCmd.java @@ -87,9 +87,6 @@ public class AssociateIPAddrCmd extends BaseAsyncCreateCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "user_ip_address"; - } public String getAccountName() { if (accountName != null) { @@ -220,6 +217,7 @@ public class AssociateIPAddrCmd extends BaseAsyncCreateCmd { if (ip != null) { this.setEntityId(ip.getId()); + this.setEntityUuid(ip.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to allocate ip address"); } diff --git a/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScalePolicyCmd.java b/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScalePolicyCmd.java index db3aaa6dc5d..e92721d77bf 100644 --- a/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScalePolicyCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScalePolicyCmd.java @@ -62,10 +62,6 @@ public class CreateAutoScalePolicyCmd extends BaseAsyncCreateCmd { private Long conditionDomainId; private Long conditionAccountId; - @Override - public String getEntityTable() { - return "autoscale_policies"; - } public int getDuration() { return duration; @@ -159,6 +155,7 @@ public class CreateAutoScalePolicyCmd extends BaseAsyncCreateCmd { AutoScalePolicy result = _autoScaleService.createAutoScalePolicy(this); if (result != null) { this.setEntityId(result.getId()); + this.setEntityUuid(result.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create AutoScale Policy"); } diff --git a/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmGroupCmd.java b/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmGroupCmd.java index 6297888f5d3..e3d47a09c7d 100644 --- a/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmGroupCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmGroupCmd.java @@ -72,10 +72,6 @@ public class CreateAutoScaleVmGroupCmd extends BaseAsyncCreateCmd { // ///////////////// Accessors /////////////////////// // /////////////////////////////////////////////////// - @Override - public String getEntityTable() { - return "autoscale_vmgroups"; - } public int getMinMembers() { return minMembers; @@ -161,6 +157,7 @@ public class CreateAutoScaleVmGroupCmd extends BaseAsyncCreateCmd { AutoScaleVmGroup result = _autoScaleService.createAutoScaleVmGroup(this); if (result != null) { this.setEntityId(result.getId()); + this.setEntityUuid(result.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create Autoscale Vm Group"); } diff --git a/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java b/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java index daa48501c53..25bb03b778f 100644 --- a/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java @@ -86,10 +86,7 @@ public class CreateAutoScaleVmProfileCmd extends BaseAsyncCreateCmd { private Long domainId; private Long accountId; - @Override - public String getEntityTable() { - return "autoscale_vmprofiles"; - } + public Long getDomainId() { if (domainId == null) { @@ -232,6 +229,7 @@ public class CreateAutoScaleVmProfileCmd extends BaseAsyncCreateCmd { AutoScaleVmProfile result = _autoScaleService.createAutoScaleVmProfile(this); if (result != null) { this.setEntityId(result.getId()); + this.setEntityUuid(result.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create Autoscale Vm Profile"); } diff --git a/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateConditionCmd.java b/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateConditionCmd.java index a9524714ffa..58926f2a4ff 100644 --- a/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateConditionCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateConditionCmd.java @@ -72,6 +72,7 @@ public class CreateConditionCmd extends BaseAsyncCreateCmd { if (condition != null) { this.setEntityId(condition.getId()); + this.setEntityUuid(condition.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create condition."); } @@ -146,8 +147,5 @@ public class CreateConditionCmd extends BaseAsyncCreateCmd { return accountId; } - @Override - public String getEntityTable() { - return "conditions"; - } + } diff --git a/api/src/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java b/api/src/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java index 803301febe9..7039b417ced 100644 --- a/api/src/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java @@ -80,9 +80,6 @@ public class CreateFirewallRuleCmd extends BaseAsyncCreateCmd implements Firewal // ///////////////// Accessors /////////////////////// // /////////////////////////////////////////////////// - public String getEntityTable() { - return "firewall_rules"; - } public Long getIpAddressId() { return ipAddressId; @@ -242,6 +239,7 @@ public class CreateFirewallRuleCmd extends BaseAsyncCreateCmd implements Firewal try { FirewallRule result = _firewallService.createFirewallRule(this); setEntityId(result.getId()); + setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException ex) { s_logger.info("Network rule conflict: " + ex.getMessage()); s_logger.trace("Network Rule Conflict: ", ex); diff --git a/api/src/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java b/api/src/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java index ecccf032ace..1feefde9a1a 100644 --- a/api/src/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java @@ -94,9 +94,6 @@ public class CreatePortForwardingRuleCmd extends BaseAsyncCreateCmd implements P // ///////////////// Accessors /////////////////////// // /////////////////////////////////////////////////// - public String getEntityTable() { - return "firewall_rules"; - } public Long getIpAddressId() { return ipAddressId; @@ -301,6 +298,7 @@ public class CreatePortForwardingRuleCmd extends BaseAsyncCreateCmd implements P try { PortForwardingRule result = _rulesService.createPortForwardingRule(this, virtualMachineId, getOpenFirewall()); setEntityId(result.getId()); + setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException ex) { s_logger.info("Network rule conflict: " , ex); s_logger.trace("Network Rule Conflict: ", ex); diff --git a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBStickinessPolicyCmd.java b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBStickinessPolicyCmd.java index dc80d312769..c01e138c1d1 100644 --- a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBStickinessPolicyCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBStickinessPolicyCmd.java @@ -91,9 +91,7 @@ public class CreateLBStickinessPolicyCmd extends BaseAsyncCreateCmd { return paramList; } - public String getEntityTable() { - return "firewall_rules"; - } + // /////////////////////////////////////////////////// // ///////////// API Implementation/////////////////// // /////////////////////////////////////////////////// @@ -141,6 +139,7 @@ public class CreateLBStickinessPolicyCmd extends BaseAsyncCreateCmd { try { StickinessPolicy result = _lbService.createLBStickinessPolicy(this); this.setEntityId(result.getId()); + this.setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException e) { s_logger.warn("Exception: ", e); throw new ServerApiException(BaseCmd.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); diff --git a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLoadBalancerRuleCmd.java b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLoadBalancerRuleCmd.java index 4aacc8e19b2..4e76a6b676f 100644 --- a/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLoadBalancerRuleCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/loadbalancer/CreateLoadBalancerRuleCmd.java @@ -120,9 +120,6 @@ public class CreateLoadBalancerRuleCmd extends BaseAsyncCreateCmd /*implements return privatePort; } - public String getEntityTable() { - return "firewall_rules"; - } public Long getSourceIpAddressId() { if (publicIpId != null) { @@ -283,6 +280,7 @@ public class CreateLoadBalancerRuleCmd extends BaseAsyncCreateCmd /*implements try { LoadBalancer result = _lbService.createLoadBalancerRule(this, getOpenFirewall()); this.setEntityId(result.getId()); + this.setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException e) { s_logger.warn("Exception: ", e); throw new ServerApiException(BaseCmd.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); diff --git a/api/src/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java b/api/src/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java index e612b84c835..1ce3458dde3 100644 --- a/api/src/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java @@ -75,9 +75,6 @@ public class CreateIpForwardingRuleCmd extends BaseAsyncCreateCmd implements Sta /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "firewall_rules"; - } public Long getIpAddressId() { return ipAddressId; @@ -151,6 +148,7 @@ public class CreateIpForwardingRuleCmd extends BaseAsyncCreateCmd implements Sta try { StaticNatRule rule = _rulesService.createStaticNatRule(this, getOpenFirewall()); this.setEntityId(rule.getId()); + this.setEntityUuid(rule.getUuid()); } catch (NetworkRuleConflictException e) { s_logger.info("Unable to create Static Nat Rule due to ", e); throw new ServerApiException(BaseCmd.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); diff --git a/api/src/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java b/api/src/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java index e2aba5b321e..16843b56d67 100644 --- a/api/src/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java @@ -86,10 +86,6 @@ public class CreateNetworkACLCmd extends BaseAsyncCreateCmd implements FirewallR // ///////////////// Accessors /////////////////////// // /////////////////////////////////////////////////// - public String getEntityTable() { - return "firewall_rules"; - } - public Long getIpAddressId() { return null; } @@ -262,6 +258,7 @@ public class CreateNetworkACLCmd extends BaseAsyncCreateCmd implements FirewallR try { FirewallRule result = _networkACLService.createNetworkACL(this); setEntityId(result.getId()); + setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException ex) { s_logger.info("Network rule conflict: " + ex.getMessage()); s_logger.trace("Network Rule Conflict: ", ex); diff --git a/api/src/org/apache/cloudstack/api/command/user/project/CreateProjectCmd.java b/api/src/org/apache/cloudstack/api/command/user/project/CreateProjectCmd.java index 9500a972b36..865f7a0aa99 100644 --- a/api/src/org/apache/cloudstack/api/command/user/project/CreateProjectCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/project/CreateProjectCmd.java @@ -56,9 +56,6 @@ public class CreateProjectCmd extends BaseAsyncCreateCmd { // ///////////////// Accessors /////////////////////// // /////////////////////////////////////////////////// - public String getEntityTable() { - return "projects"; - } public String getAccountName() { if (accountName != null) { @@ -127,6 +124,7 @@ public class CreateProjectCmd extends BaseAsyncCreateCmd { Project project = _projectService.createProject(getName(), getDisplayText(), getAccountName(), getDomainId()); if (project != null) { this.setEntityId(project.getId()); + this.setEntityUuid(project.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create a project"); } diff --git a/api/src/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java b/api/src/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java index 33469ac4882..14f46540cc3 100644 --- a/api/src/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java @@ -65,9 +65,6 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd { // ///////////////// Accessors /////////////////////// // /////////////////////////////////////////////////// - public String getEntityTable() { - return "snapshots"; - } public String getAccountName() { return accountName; @@ -153,6 +150,7 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd { Snapshot snapshot = _snapshotService.allocSnapshot(getVolumeId(), getPolicyId()); if (snapshot != null) { this.setEntityId(snapshot.getId()); + this.setEntityUuid(snapshot.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create snapshot"); } diff --git a/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java b/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java index 65cc8b9e9a4..e72b49b4e4d 100644 --- a/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java @@ -102,9 +102,6 @@ import com.cloud.user.UserContext; // ///////////////// Accessors /////////////////////// // /////////////////////////////////////////////////// - public String getEntityTable() { - return "vm_template"; - } public Integer getBits() { return bits; @@ -240,13 +237,15 @@ import com.cloud.user.UserContext; public void create() throws ResourceAllocationException { if (isBareMetal()) { _bareMetalVmService.createPrivateTemplateRecord(this, _accountService.getAccount(getEntityOwnerId())); - /*Baremetal creates template record after taking image proceeded, use vmId as entity id here*/ + /*Baremetal creates template record after taking image proceeded, use vmId as entity id and uuid here*/ this.setEntityId(vmId); + this.setEntityUuid(vmId.toString()); } else { VirtualMachineTemplate template = null; template = _userVmService.createPrivateTemplateRecord(this, _accountService.getAccount(getEntityOwnerId())); if (template != null) { this.setEntityId(template.getId()); + this.setEntityUuid(template.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create a template"); diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java index 349f4a12d16..e675c83dd6f 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java @@ -147,10 +147,9 @@ public class DeployVMCmd extends BaseAsyncCreateCmd { private List securityGroupNameList; @ACL(checkKeyAccess=true) - @Parameter(name = ApiConstants.IP_NETWORK_LIST, type = CommandType.MAP, entityType={Network.class,IpAddress.class}, + @Parameter(name = ApiConstants.IP_NETWORK_LIST, type = CommandType.MAP, entityType={Network.class, IpAddress.class}, description = "ip to network mapping. Can't be specified with networkIds parameter." + - " Example: iptonetworklist[0].ip=10.10.10.11&iptonetworklist[0].networkid=204 - requests to" + - " use ip 10.10.10.11 in network id=204") + " Example: iptonetworklist[0].ip=10.10.10.11&iptonetworklist[0].networkid=uuid - requests to use ip 10.10.10.11 in network id=uuid") private Map ipToNetworkList; @Parameter(name=ApiConstants.IP_ADDRESS, type=CommandType.STRING, description="the ip address for default vm's network") @@ -171,9 +170,6 @@ public class DeployVMCmd extends BaseAsyncCreateCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "vm_instance"; - } public String getAccountName() { if (accountName == null) { @@ -287,7 +283,17 @@ public class DeployVMCmd extends BaseAsyncCreateCmd { Iterator iter = ipsCollection.iterator(); while (iter.hasNext()) { HashMap ips = (HashMap) iter.next(); - Long networkId = Long.valueOf(_responseGenerator.getIdentiyId("networks", ips.get("networkid"))); + Long networkId; + Network network = _networkService.getNetwork(ips.get("networkid")); + if (network != null) { + networkId = network.getId(); + } else { + try { + networkId = Long.parseLong(ips.get("networkid")); + } catch(NumberFormatException e) { + throw new InvalidParameterValueException("Unable to translate and find entity with networkId: " + ips.get("networkid")); + } + } String requestedIp = (String) ips.get("ip"); ipToNetworkMap.put(networkId, requestedIp); } @@ -446,6 +452,7 @@ public class DeployVMCmd extends BaseAsyncCreateCmd { if (vm != null) { setEntityId(vm.getId()); + setEntityUuid(vm.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to deploy vm"); } diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java index 512685f77f6..04541b9fda7 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java @@ -76,9 +76,6 @@ public class CreateVolumeCmd extends BaseAsyncCreateCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "volumes"; - } public String getAccountName() { return accountName; @@ -154,6 +151,7 @@ public class CreateVolumeCmd extends BaseAsyncCreateCmd { Volume volume = _storageService.allocVolume(this); if (volume != null) { this.setEntityId(volume.getId()); + this.setEntityUuid(volume.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create volume"); } diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/ExtractVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/ExtractVolumeCmd.java index 7f6cd052470..43b25a83663 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/ExtractVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/ExtractVolumeCmd.java @@ -137,7 +137,7 @@ public class ExtractVolumeCmd extends BaseAsyncCmd { Volume vol = _entityMgr.findById(Volume.class, id); response.setId(vol.getUuid()); response.setName(vol.getName()); - DataCenter zone = _entityMgr.findById(DataCenter.class, id); + DataCenter zone = _entityMgr.findById(DataCenter.class, zoneId); response.setZoneId(zone.getUuid()); response.setZoneName(zone.getName()); response.setMode(mode); diff --git a/api/src/org/apache/cloudstack/api/command/user/vpc/CreateStaticRouteCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpc/CreateStaticRouteCmd.java index 85a0ae45ae8..96de56a5be5 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpc/CreateStaticRouteCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpc/CreateStaticRouteCmd.java @@ -67,6 +67,7 @@ public class CreateStaticRouteCmd extends BaseAsyncCreateCmd{ try { StaticRoute result = _vpcService.createStaticRoute(getGatewayId(), getCidr()); setEntityId(result.getId()); + setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException ex) { s_logger.info("Network rule conflict: " + ex.getMessage()); s_logger.trace("Network rule conflict: ", ex); @@ -74,10 +75,6 @@ public class CreateStaticRouteCmd extends BaseAsyncCreateCmd{ } } - @Override - public String getEntityTable() { - return "static_routes"; - } @Override public String getEventType() { diff --git a/api/src/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java index df16c8edc88..8a2e1f641fb 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java @@ -124,6 +124,7 @@ public class CreateVPCCmd extends BaseAsyncCreateCmd{ getCidr(), getNetworkDomain()); if (vpc != null) { this.setEntityId(vpc.getId()); + this.setEntityUuid(vpc.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create a VPC"); } @@ -157,11 +158,6 @@ public class CreateVPCCmd extends BaseAsyncCreateCmd{ } } - @Override - public String getEntityTable() { - return "vpc"; - } - @Override public String getEventType() { diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java index 674dc6a5809..f2d19a7cce6 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java @@ -103,9 +103,6 @@ public class AddVpnUserCmd extends BaseAsyncCreateCmd { return accountId; } - public String getEntityTable() { - return "vpn_users"; - } @Override public String getEventDescription() { @@ -150,5 +147,6 @@ public class AddVpnUserCmd extends BaseAsyncCreateCmd { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to add vpn user"); } setEntityId(vpnUser.getId()); + setEntityUuid(vpnUser.getUuid()); } } diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java index 37952f8777a..b517af883c3 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java @@ -62,10 +62,6 @@ public class CreateRemoteAccessVpnCmd extends BaseAsyncCreateCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "user_ip_address"; - } - public Long getPublicIpId() { return publicIpId; } @@ -146,6 +142,11 @@ public class CreateRemoteAccessVpnCmd extends BaseAsyncCreateCmd { RemoteAccessVpn vpn = _ravService.createRemoteAccessVpn(publicIpId, ipRange, getOpenFirewall(), getNetworkId()); if (vpn != null) { this.setEntityId(vpn.getServerAddressId()); + // find uuid for server ip address + IpAddress ipAddr = _entityMgr.findById(IpAddress.class, vpn.getServerAddressId()); + if (ipAddr != null) { + this.setEntityUuid(ipAddr.getUuid()); + } } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create remote access vpn"); } diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnConnectionCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnConnectionCmd.java index 7f85fb4ebf6..3dc334d0e2a 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnConnectionCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnConnectionCmd.java @@ -51,9 +51,6 @@ public class CreateVpnConnectionCmd extends BaseAsyncCreateCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "s2s_vpn_connection"; - } public Long getVpnGatewayId() { return vpnGatewayId; @@ -95,6 +92,7 @@ public class CreateVpnConnectionCmd extends BaseAsyncCreateCmd { Site2SiteVpnConnection conn = _s2sVpnService.createVpnConnection(this); if (conn != null) { this.setEntityId(conn.getId()); + this.setEntityUuid(conn.getUuid()); } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create site to site vpn connection"); } diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java index 65085182e0c..bde98b0b44b 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java @@ -78,9 +78,6 @@ public class CreateVpnCustomerGatewayCmd extends BaseAsyncCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "s2s_customer_gateway"; - } public String getName() { return name; diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnGatewayCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnGatewayCmd.java index 89965bd842c..4b405541a90 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnGatewayCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnGatewayCmd.java @@ -47,10 +47,6 @@ public class CreateVpnGatewayCmd extends BaseAsyncCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "s2s_vpn_gateway"; - } - public Long getVpcId() { return vpcId; } diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteVpnConnectionCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteVpnConnectionCmd.java index a079e8bcc30..23a7793ef88 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteVpnConnectionCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteVpnConnectionCmd.java @@ -44,9 +44,6 @@ public class DeleteVpnConnectionCmd extends BaseAsyncCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "s2s_vpn_connection"; - } public Long getId() { return id; diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteVpnCustomerGatewayCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteVpnCustomerGatewayCmd.java index ef5ff3db438..181ee3bbc68 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteVpnCustomerGatewayCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteVpnCustomerGatewayCmd.java @@ -43,9 +43,6 @@ public class DeleteVpnCustomerGatewayCmd extends BaseAsyncCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "s2s_customer_gateway"; - } public Long getId() { return id; diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteVpnGatewayCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteVpnGatewayCmd.java index f9b9e35a420..9ac27d07664 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteVpnGatewayCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteVpnGatewayCmd.java @@ -43,9 +43,6 @@ public class DeleteVpnGatewayCmd extends BaseAsyncCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "s2s_vpn_gateway"; - } public Long getId() { return id; diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/ResetVpnConnectionCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/ResetVpnConnectionCmd.java index 0d7632ac1aa..ed28ea5610f 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/ResetVpnConnectionCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/ResetVpnConnectionCmd.java @@ -53,9 +53,6 @@ public class ResetVpnConnectionCmd extends BaseAsyncCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "s2s_vpn_connection"; - } public Long getDomainId() { return domainId; diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/UpdateVpnCustomerGatewayCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/UpdateVpnCustomerGatewayCmd.java index f2778e06103..7564129c38f 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/UpdateVpnCustomerGatewayCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/UpdateVpnCustomerGatewayCmd.java @@ -78,11 +78,7 @@ public class UpdateVpnCustomerGatewayCmd extends BaseAsyncCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getEntityTable() { - return "s2s_customer_gateway"; - } - - public Long getId() { + public Long getId() { return id; } diff --git a/api/src/org/apache/cloudstack/api/response/CapacityResponse.java b/api/src/org/apache/cloudstack/api/response/CapacityResponse.java index 000705813fb..2c98dc9d6ca 100644 --- a/api/src/org/apache/cloudstack/api/response/CapacityResponse.java +++ b/api/src/org/apache/cloudstack/api/response/CapacityResponse.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.response; import org.apache.cloudstack.api.ApiConstants; -import com.cloud.utils.IdentityProxy; import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; import org.apache.cloudstack.api.BaseResponse; diff --git a/api/src/org/apache/cloudstack/api/response/CreateCmdResponse.java b/api/src/org/apache/cloudstack/api/response/CreateCmdResponse.java index 3c26324e10b..e4c6c60c5ba 100644 --- a/api/src/org/apache/cloudstack/api/response/CreateCmdResponse.java +++ b/api/src/org/apache/cloudstack/api/response/CreateCmdResponse.java @@ -16,24 +16,16 @@ // under the License. package org.apache.cloudstack.api.response; -import org.apache.cloudstack.api.ApiConstants; -import com.cloud.utils.IdentityProxy; -import com.google.gson.annotations.SerializedName; import org.apache.cloudstack.api.BaseResponse; public class CreateCmdResponse extends BaseResponse { - @SerializedName(ApiConstants.ID) - private IdentityProxy id = new IdentityProxy(); + private String id; - public Long getId() { - return id.getValue(); + public String getId() { + return id; } - public void setId(Long id) { - this.id.setValue(id); - } - - public void setIdEntityTable(String entityTable) { - this.id.setTableName(entityTable); + public void setId(String id) { + this.id = id; } } diff --git a/api/src/org/apache/cloudstack/api/response/ResourceCountResponse.java b/api/src/org/apache/cloudstack/api/response/ResourceCountResponse.java index 9e62f4ff7f5..7a291945f76 100644 --- a/api/src/org/apache/cloudstack/api/response/ResourceCountResponse.java +++ b/api/src/org/apache/cloudstack/api/response/ResourceCountResponse.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.response; import org.apache.cloudstack.api.ApiConstants; -import com.cloud.utils.IdentityProxy; import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; import org.apache.cloudstack.api.BaseResponse; diff --git a/api/src/org/apache/cloudstack/api/response/S3Response.java b/api/src/org/apache/cloudstack/api/response/S3Response.java index 5dd0ef0e041..4dab2175a3a 100644 --- a/api/src/org/apache/cloudstack/api/response/S3Response.java +++ b/api/src/org/apache/cloudstack/api/response/S3Response.java @@ -19,7 +19,6 @@ package org.apache.cloudstack.api.response; import com.cloud.serializer.Param; -import com.cloud.utils.IdentityProxy; import com.google.gson.annotations.SerializedName; import org.apache.cloudstack.api.BaseResponse; @@ -29,7 +28,7 @@ public class S3Response extends BaseResponse { @SerializedName(ID) @Param(description = "The ID of the S3 configuration") - private IdentityProxy id = new IdentityProxy("s3"); + private String id; @SerializedName(S3_ACCESS_KEY) @Param(description = "The S3 access key") @@ -135,11 +134,11 @@ public class S3Response extends BaseResponse { @Override public String getObjectId() { - return this.id.getValue().toString(); + return this.id; } - public void setObjectId(Long id) { - this.id.setValue(id); + public void setObjectId(String id) { + this.id = id; } public String getAccessKey() { diff --git a/api/src/org/apache/cloudstack/api/response/TemplateResponse.java b/api/src/org/apache/cloudstack/api/response/TemplateResponse.java index f6f74dac5b7..033c2e243d5 100644 --- a/api/src/org/apache/cloudstack/api/response/TemplateResponse.java +++ b/api/src/org/apache/cloudstack/api/response/TemplateResponse.java @@ -135,8 +135,8 @@ public class TemplateResponse extends BaseResponse implements ControlledEntityRe @SerializedName(ApiConstants.TAGS) @Param(description="the list of resource tags associated with tempate", responseObject = ResourceTagResponse.class) private List tags; - - + @SerializedName(ApiConstants.SSHKEY_ENABLED) @Param(description="true if template is sshkey enabled, false otherwise") + private Boolean sshKeyEnabled; @Override public String getObjectId() { @@ -290,4 +290,9 @@ public class TemplateResponse extends BaseResponse implements ControlledEntityRe public void setTags(List tags) { this.tags = tags; } + + public void setSshKeyEnabled(boolean sshKeyEnabled) { + this.sshKeyEnabled = sshKeyEnabled; + } + } diff --git a/awsapi/src/com/cloud/bridge/auth/ec2/AuthenticationHandler.java b/awsapi/src/com/cloud/bridge/auth/ec2/AuthenticationHandler.java index f79feaad5bc..04060844697 100644 --- a/awsapi/src/com/cloud/bridge/auth/ec2/AuthenticationHandler.java +++ b/awsapi/src/com/cloud/bridge/auth/ec2/AuthenticationHandler.java @@ -16,142 +16,147 @@ // under the License. package com.cloud.bridge.auth.ec2; -import org.apache.axiom.soap.SOAPEnvelope; -import org.apache.log4j.Logger; -import org.apache.axis2.context.MessageContext; -import org.apache.axis2.engine.Handler; -import org.apache.axis2.AxisFault; -import org.apache.axis2.description.HandlerDescription; -import org.apache.axis2.description.Parameter; -import org.apache.commons.codec.binary.Base64; +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; +import javax.inject.Inject; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; + +import org.apache.axiom.soap.SOAPEnvelope; +import org.apache.axis2.AxisFault; +import org.apache.axis2.context.MessageContext; +import org.apache.axis2.description.HandlerDescription; +import org.apache.axis2.description.Parameter; +import org.apache.axis2.engine.Handler; +import org.apache.commons.codec.binary.Base64; +import org.apache.log4j.Logger; import org.w3c.dom.Document; import org.w3c.dom.Node; import org.w3c.dom.NodeList; -import java.security.cert.Certificate; -import java.security.cert.CertificateFactory; -import java.io.ByteArrayInputStream; -import java.io.InputStream; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; - import com.cloud.bridge.model.UserCredentialsVO; -import com.cloud.bridge.persist.dao.UserCredentialsDaoImpl; +import com.cloud.bridge.persist.dao.UserCredentialsDao; import com.cloud.bridge.service.UserContext; import com.cloud.bridge.util.AuthenticationUtils; -import com.cloud.utils.component.ComponentLocator; public class AuthenticationHandler implements Handler { - protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class); - protected final UserCredentialsDaoImpl ucDao = ComponentLocator.inject(UserCredentialsDaoImpl.class); - private DocumentBuilderFactory dbf = null; + protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class); + @Inject protected UserCredentialsDao ucDao; + private DocumentBuilderFactory dbf = null; - protected HandlerDescription handlerDesc = new HandlerDescription( "EC2AuthenticationHandler" ); - private String name = "EC2AuthenticationHandler"; - - public void init( HandlerDescription handlerdesc ) - { - dbf = DocumentBuilderFactory.newInstance(); - dbf.setNamespaceAware( true ); + protected HandlerDescription handlerDesc = new HandlerDescription( "EC2AuthenticationHandler" ); + private String name = "EC2AuthenticationHandler"; - this.handlerDesc = handlerdesc; - } - - public String getName() - { - return name; - } + @Override + public void init( HandlerDescription handlerdesc ) + { + dbf = DocumentBuilderFactory.newInstance(); + dbf.setNamespaceAware( true ); - public String toString() - { - return (name != null) ? name.toString() : null; - } - - public HandlerDescription getHandlerDesc() - { - return handlerDesc; - } - - public Parameter getParameter( String name ) - { - return handlerDesc.getParameter( name ); - } - - - /** - * For EC2 SOAP calls this function's goal is to extract the X509 certificate that is - * part of the WS-Security wrapped SOAP request. We need the cert in order to - * map it to the user's Cloud API key and Cloud Secret Key. - */ - public InvocationResponse invoke(MessageContext msgContext) throws AxisFault - { - // -> the certificate we want is embedded into the soap header - try - { SOAPEnvelope soapEnvelope = msgContext.getEnvelope(); - String xmlHeader = soapEnvelope.toString(); - //System.out.println( "entire request: " + xmlHeader ); - - InputStream is = new ByteArrayInputStream( xmlHeader.getBytes("UTF-8")); - DocumentBuilder db = dbf.newDocumentBuilder(); - Document request = db.parse( is ); - NodeList certs = request.getElementsByTagNameNS( "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd", "BinarySecurityToken" ); - if (0 < certs.getLength()) { - Node item = certs.item(0); - String result = new String( item.getFirstChild().getNodeValue()); - byte[] certBytes = Base64.decodeBase64( result.getBytes()); + this.handlerDesc = handlerdesc; + } - Certificate userCert = null; - CertificateFactory cf = CertificateFactory.getInstance( "X.509" ); - ByteArrayInputStream bs = new ByteArrayInputStream( certBytes ); - while (bs.available() > 0) userCert = cf.generateCertificate(bs); - //System.out.println( "cert: " + userCert.toString()); - String uniqueId = AuthenticationUtils.X509CertUniqueId( userCert ); - logger.debug( "X509 cert's uniqueId: " + uniqueId ); - - // -> find the Cloud API key and the secret key from the cert's uniqueId -/* UserCredentialsDao credentialDao = new UserCredentialsDao(); + @Override + public String getName() + { + return name; + } + + @Override + public String toString() + { + return (name != null) ? name.toString() : null; + } + + @Override + public HandlerDescription getHandlerDesc() + { + return handlerDesc; + } + + @Override + public Parameter getParameter( String name ) + { + return handlerDesc.getParameter( name ); + } + + + /** + * For EC2 SOAP calls this function's goal is to extract the X509 certificate that is + * part of the WS-Security wrapped SOAP request. We need the cert in order to + * map it to the user's Cloud API key and Cloud Secret Key. + */ + @Override + public InvocationResponse invoke(MessageContext msgContext) throws AxisFault + { + // -> the certificate we want is embedded into the soap header + try + { SOAPEnvelope soapEnvelope = msgContext.getEnvelope(); + String xmlHeader = soapEnvelope.toString(); + //System.out.println( "entire request: " + xmlHeader ); + + InputStream is = new ByteArrayInputStream( xmlHeader.getBytes("UTF-8")); + DocumentBuilder db = dbf.newDocumentBuilder(); + Document request = db.parse( is ); + NodeList certs = request.getElementsByTagNameNS( "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd", "BinarySecurityToken" ); + if (0 < certs.getLength()) { + Node item = certs.item(0); + String result = new String( item.getFirstChild().getNodeValue()); + byte[] certBytes = Base64.decodeBase64( result.getBytes()); + + Certificate userCert = null; + CertificateFactory cf = CertificateFactory.getInstance( "X.509" ); + ByteArrayInputStream bs = new ByteArrayInputStream( certBytes ); + while (bs.available() > 0) userCert = cf.generateCertificate(bs); + //System.out.println( "cert: " + userCert.toString()); + String uniqueId = AuthenticationUtils.X509CertUniqueId( userCert ); + logger.debug( "X509 cert's uniqueId: " + uniqueId ); + + // -> find the Cloud API key and the secret key from the cert's uniqueId + /* UserCredentialsDao credentialDao = new UserCredentialsDao(); UserCredentials cloudKeys = credentialDao.getByCertUniqueId( uniqueId ); -*/ - UserCredentialsVO cloudKeys = ucDao.getByCertUniqueId(uniqueId); - if ( null == cloudKeys ) { - logger.error( "Cert does not map to Cloud API keys: " + uniqueId ); - throw new AxisFault( "User not properly registered: Certificate does not map to Cloud API Keys", "Client.Blocked" ); - } - else UserContext.current().initContext( cloudKeys.getAccessKey(), cloudKeys.getSecretKey(), cloudKeys.getAccessKey(), "SOAP Request", null ); - //System.out.println( "end of cert match: " + UserContext.current().getSecretKey()); - } - } - catch (AxisFault e) { - throw e; - } - catch( Exception e ) { + */ + UserCredentialsVO cloudKeys = ucDao.getByCertUniqueId(uniqueId); + if ( null == cloudKeys ) { + logger.error( "Cert does not map to Cloud API keys: " + uniqueId ); + throw new AxisFault( "User not properly registered: Certificate does not map to Cloud API Keys", "Client.Blocked" ); + } + else UserContext.current().initContext( cloudKeys.getAccessKey(), cloudKeys.getSecretKey(), cloudKeys.getAccessKey(), "SOAP Request", null ); + //System.out.println( "end of cert match: " + UserContext.current().getSecretKey()); + } + } + catch (AxisFault e) { + throw e; + } + catch( Exception e ) { logger.error("EC2 Authentication Handler: ", e); - throw new AxisFault( "An unknown error occurred.", "Server.InternalError" ); - } + throw new AxisFault( "An unknown error occurred.", "Server.InternalError" ); + } return InvocationResponse.CONTINUE; - } + } - - public void revoke(MessageContext msgContext) - { - logger.info(msgContext.getEnvelope().toString()); - } - public void setName(String name) - { - this.name = name; - } - - @Override - public void cleanup() - { - } + public void revoke(MessageContext msgContext) + { + logger.info(msgContext.getEnvelope().toString()); + } - @Override - public void flowComplete( MessageContext arg0 ) - { - } + public void setName(String name) + { + this.name = name; + } + + @Override + public void cleanup() + { + } + + @Override + public void flowComplete( MessageContext arg0 ) + { + } } diff --git a/awsapi/src/com/cloud/bridge/auth/s3/AuthenticationHandler.java b/awsapi/src/com/cloud/bridge/auth/s3/AuthenticationHandler.java index b9519169632..5b20b02d0b3 100644 --- a/awsapi/src/com/cloud/bridge/auth/s3/AuthenticationHandler.java +++ b/awsapi/src/com/cloud/bridge/auth/s3/AuthenticationHandler.java @@ -18,197 +18,203 @@ package com.cloud.bridge.auth.s3; import java.sql.SQLException; +import javax.inject.Inject; import javax.servlet.http.HttpServletRequest; -import org.apache.axiom.soap.SOAPEnvelope; import org.apache.axiom.soap.SOAPBody; -import org.apache.log4j.Logger; -import org.apache.axis2.context.MessageContext; -import org.apache.axis2.engine.Handler; +import org.apache.axiom.soap.SOAPEnvelope; import org.apache.axis2.AxisFault; -import org.apache.axis2.description.HandlerDescription; +import org.apache.axis2.context.MessageContext; +import org.apache.axis2.description.HandlerDescription; import org.apache.axis2.description.Parameter; +import org.apache.axis2.engine.Handler; +import org.apache.log4j.Logger; import com.cloud.bridge.model.UserCredentialsVO; import com.cloud.bridge.persist.dao.UserCredentialsDaoImpl; import com.cloud.bridge.service.UserContext; import com.cloud.bridge.util.S3SoapAuth; -import com.cloud.utils.component.ComponentLocator; /* * For SOAP compatibility. */ public class AuthenticationHandler implements Handler { - protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class); - protected final UserCredentialsDaoImpl ucDao = ComponentLocator.inject(UserCredentialsDaoImpl.class); - protected HandlerDescription handlerDesc = new HandlerDescription( "default handler" ); - private String name = "S3AuthenticationHandler"; - - public void init( HandlerDescription handlerdesc ) - { - this.handlerDesc = handlerdesc; - } - - public String getName() - { - //logger.debug( "getName entry S3AuthenticationHandler" + name ); - return name; - } + protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class); + @Inject UserCredentialsDaoImpl ucDao; + protected HandlerDescription handlerDesc = new HandlerDescription( "default handler" ); + private String name = "S3AuthenticationHandler"; - public String toString() - { - return (name != null) ? name.toString() : null; - } - - public HandlerDescription getHandlerDesc() - { - return handlerDesc; - } - - public Parameter getParameter( String name ) - { - return handlerDesc.getParameter( name ); - } - - - /** - * Verify the request's authentication signature by extracting all the - * necessary parts of the request, obtaining the requestor's secret key, and - * recalculating the signature. - * - * On Signature mismatch raise an AxisFault (i.e., a SoapFault) with what Amazon S3 - * defines as a "Client.SignatureMismatch" error. - * - * Special case: need to deal with anonymous requests where no AWSAccessKeyId is - * given. In this case just pass the request on. - */ - public InvocationResponse invoke(MessageContext msgContext) throws AxisFault - { - String accessKey = null; - String operation = null; - String msgSig = null; - String timestamp = null; - String secretKey = null; - String temp = null; - - // [A] Obtain the HttpServletRequest object - HttpServletRequest httpObj =(HttpServletRequest)msgContext.getProperty("transport.http.servletRequest"); - if (null != httpObj) System.out.println("S3 SOAP auth test header access - acceptable Encoding type: "+ httpObj.getHeader("Accept-Encoding")); - - // [A] Try to recalculate the signature for non-anonymous requests - try - { SOAPEnvelope soapEnvelope = msgContext.getEnvelope(); - SOAPBody soapBody = soapEnvelope.getBody(); - String xmlBody = soapBody.toString(); - //logger.debug( "xmlrequest: " + xmlBody ); - - // -> did we get here yet its an EC2 request? - int offset = xmlBody.indexOf( "http://ec2.amazonaws.com" ); - if (-1 != offset) return InvocationResponse.CONTINUE; - - - // -> if it is anonymous request, then no access key should exist - int start = xmlBody.indexOf( "AWSAccessKeyId>" ); - if (-1 == start) { - UserContext.current().initContext(); - return InvocationResponse.CONTINUE; - } - temp = xmlBody.substring( start+15 ); - int end = temp.indexOf( " what if we cannot find the user's key? - if (null != (secretKey = lookupSecretKey( accessKey ))) + @Override + public void init( HandlerDescription handlerdesc ) + { + this.handlerDesc = handlerdesc; + } + + @Override + public String getName() + { + //logger.debug( "getName entry S3AuthenticationHandler" + name ); + return name; + } + + @Override + public String toString() + { + return (name != null) ? name.toString() : null; + } + + @Override + public HandlerDescription getHandlerDesc() + { + return handlerDesc; + } + + @Override + public Parameter getParameter( String name ) + { + return handlerDesc.getParameter( name ); + } + + + /** + * Verify the request's authentication signature by extracting all the + * necessary parts of the request, obtaining the requestor's secret key, and + * recalculating the signature. + * + * On Signature mismatch raise an AxisFault (i.e., a SoapFault) with what Amazon S3 + * defines as a "Client.SignatureMismatch" error. + * + * Special case: need to deal with anonymous requests where no AWSAccessKeyId is + * given. In this case just pass the request on. + */ + @Override + public InvocationResponse invoke(MessageContext msgContext) throws AxisFault + { + String accessKey = null; + String operation = null; + String msgSig = null; + String timestamp = null; + String secretKey = null; + String temp = null; + + // [A] Obtain the HttpServletRequest object + HttpServletRequest httpObj =(HttpServletRequest)msgContext.getProperty("transport.http.servletRequest"); + if (null != httpObj) System.out.println("S3 SOAP auth test header access - acceptable Encoding type: "+ httpObj.getHeader("Accept-Encoding")); + + // [A] Try to recalculate the signature for non-anonymous requests + try + { SOAPEnvelope soapEnvelope = msgContext.getEnvelope(); + SOAPBody soapBody = soapEnvelope.getBody(); + String xmlBody = soapBody.toString(); + //logger.debug( "xmlrequest: " + xmlBody ); + + // -> did we get here yet its an EC2 request? + int offset = xmlBody.indexOf( "http://ec2.amazonaws.com" ); + if (-1 != offset) return InvocationResponse.CONTINUE; + + + // -> if it is anonymous request, then no access key should exist + int start = xmlBody.indexOf( "AWSAccessKeyId>" ); + if (-1 == start) { + UserContext.current().initContext(); + return InvocationResponse.CONTINUE; + } + temp = xmlBody.substring( start+15 ); + int end = temp.indexOf( " what if we cannot find the user's key? + if (null != (secretKey = lookupSecretKey( accessKey ))) + { + // -> if any other field is missing, then the signature will not match + if ( null != (operation = soapBody.getFirstElementLocalName())) + operation = operation.trim(); + else operation = ""; + //logger.debug( "operation " + operation ); + + start = xmlBody.indexOf( "Timestamp>" ); + if ( -1 < start ) { - // -> if any other field is missing, then the signature will not match - if ( null != (operation = soapBody.getFirstElementLocalName())) - operation = operation.trim(); - else operation = ""; - //logger.debug( "operation " + operation ); - - start = xmlBody.indexOf( "Timestamp>" ); - if ( -1 < start ) - { - temp = xmlBody.substring( start+10 ); - end = temp.indexOf( "" ); - if ( -1 < start ) - { - temp = xmlBody.substring( start+10 ); - end = temp.indexOf( "" ); + if ( -1 < start ) + { + temp = xmlBody.substring( start+10 ); + end = temp.indexOf( " for SOAP requests the Cloud API keys are sent here and only here - S3SoapAuth.verifySignature( msgSig, operation, timestamp, accessKey, secretKey ); + S3SoapAuth.verifySignature( msgSig, operation, timestamp, accessKey, secretKey ); UserContext.current().initContext( accessKey, secretKey, accessKey, "S3 SOAP request", httpObj ); return InvocationResponse.CONTINUE; - } + } - - public void revoke(MessageContext msgContext) - { - logger.info(msgContext.getEnvelope().toString()); - } - public void setName(String name) - { - //logger.debug( "setName entry S3AuthenticationHandler " + name ); - this.name = name; - } - - /** - * Given the user's access key, then obtain his secret key in the user database. - * - * @param accessKey - a unique string allocated for each registered user - * @return the secret key or null of no matching user found - */ - private String lookupSecretKey( String accessKey ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - { - UserCredentialsVO cloudKeys = ucDao.getByAccessKey( accessKey ); - if ( null == cloudKeys ) { - logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" ); - return null; - } - else return cloudKeys.getSecretKey(); - } + public void revoke(MessageContext msgContext) + { + logger.info(msgContext.getEnvelope().toString()); + } - @Override - public void cleanup() - { - //logger.debug( "cleanup entry S3AuthenticationHandler " ); - } + public void setName(String name) + { + //logger.debug( "setName entry S3AuthenticationHandler " + name ); + this.name = name; + } - @Override - public void flowComplete( MessageContext arg0 ) - { - //logger.debug( "flowComplete entry S3AuthenticationHandler " ); - } + /** + * Given the user's access key, then obtain his secret key in the user database. + * + * @param accessKey - a unique string allocated for each registered user + * @return the secret key or null of no matching user found + */ + private String lookupSecretKey( String accessKey ) + throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException + { + UserCredentialsVO cloudKeys = ucDao.getByAccessKey( accessKey ); + if ( null == cloudKeys ) { + logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" ); + return null; + } + else return cloudKeys.getSecretKey(); + } + + @Override + public void cleanup() + { + //logger.debug( "cleanup entry S3AuthenticationHandler " ); + } + + @Override + public void flowComplete( MessageContext arg0 ) + { + //logger.debug( "flowComplete entry S3AuthenticationHandler " ); + } } diff --git a/awsapi/src/com/cloud/bridge/persist/dao/BucketPolicyDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/BucketPolicyDaoImpl.java index ce230c3d57a..dd354a39ffb 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/BucketPolicyDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/BucketPolicyDaoImpl.java @@ -23,7 +23,6 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.bridge.model.BucketPolicyVO; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -33,43 +32,43 @@ import com.cloud.utils.db.Transaction; @Local(value={BucketPolicyDao.class}) public class BucketPolicyDaoImpl extends GenericDaoBase implements BucketPolicyDao{ public static final Logger logger = Logger.getLogger(BucketPolicyDaoImpl.class); - public BucketPolicyDaoImpl(){ } + public BucketPolicyDaoImpl(){ } - /** - * Since a bucket policy can exist before its bucket we also need to keep the policy's owner - * so we can restrict who modifies it (because of the "s3:CreateBucket" action). - */ - @Override - public BucketPolicyVO getByName( String bucketName ) { - SearchBuilder searchByBucket = createSearchBuilder(); - searchByBucket.and("BucketName", searchByBucket.entity().getBucketName(), SearchCriteria.Op.EQ); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + /** + * Since a bucket policy can exist before its bucket we also need to keep the policy's owner + * so we can restrict who modifies it (because of the "s3:CreateBucket" action). + */ + @Override + public BucketPolicyVO getByName( String bucketName ) { + SearchBuilder searchByBucket = createSearchBuilder(); + searchByBucket.and("BucketName", searchByBucket.entity().getBucketName(), SearchCriteria.Op.EQ); + Transaction txn = Transaction.open(Transaction.AWSAPI_DB); try { txn.start(); SearchCriteria sc = searchByBucket.create(); sc.setParameters("BucketName", bucketName); return findOneBy(sc); - }finally { - txn.close(); - } - - } - - @Override - public void deletePolicy( String bucketName ) { - SearchBuilder deleteByBucket = createSearchBuilder(); - deleteByBucket.and("BucketName", deleteByBucket.entity().getBucketName(), SearchCriteria.Op.EQ); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); - try { + }finally { + txn.close(); + } + + } + + @Override + public void deletePolicy( String bucketName ) { + SearchBuilder deleteByBucket = createSearchBuilder(); + deleteByBucket.and("BucketName", deleteByBucket.entity().getBucketName(), SearchCriteria.Op.EQ); + Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + try { txn.start(); SearchCriteria sc = deleteByBucket.create(); sc.setParameters("BucketName", bucketName); remove(sc); - - }finally { - txn.close(); - } - - } + + }finally { + txn.close(); + } + + } } diff --git a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackConfigurationDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackConfigurationDaoImpl.java index 511cfa73946..e77061169d9 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackConfigurationDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackConfigurationDaoImpl.java @@ -16,18 +16,12 @@ // under the License. package com.cloud.bridge.persist.dao; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - import javax.ejb.Local; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.bridge.model.CloudStackConfigurationVO; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; @@ -37,31 +31,31 @@ import com.cloud.utils.db.Transaction; @Component @Local(value={CloudStackConfigurationDao.class}) public class CloudStackConfigurationDaoImpl extends GenericDaoBase implements CloudStackConfigurationDao { - private static final Logger s_logger = Logger.getLogger(CloudStackConfigurationDaoImpl.class); - - final SearchBuilder NameSearch= createSearchBuilder(); - - public CloudStackConfigurationDaoImpl() { } - - - @Override - @DB - public String getConfigValue(String name) { + private static final Logger s_logger = Logger.getLogger(CloudStackConfigurationDaoImpl.class); + + final SearchBuilder NameSearch= createSearchBuilder(); + + public CloudStackConfigurationDaoImpl() { } + + + @Override + @DB + public String getConfigValue(String name) { NameSearch.and("name", NameSearch.entity().getName(), SearchCriteria.Op.EQ); Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - SearchCriteria sc = NameSearch.create(); - sc.setParameters("name", name); - CloudStackConfigurationVO configItem = findOneBy(sc); - if (configItem == null) { - s_logger.warn("No configuration item found with name " + name); - return null; - } - return configItem.getValue(); + try { + txn.start(); + SearchCriteria sc = NameSearch.create(); + sc.setParameters("name", name); + CloudStackConfigurationVO configItem = findOneBy(sc); + if (configItem == null) { + s_logger.warn("No configuration item found with name " + name); + return null; + } + return configItem.getValue(); }finally { - - } - } - + + } + } + } diff --git a/awsapi/src/com/cloud/bridge/persist/dao/MultipartLoadDao.java b/awsapi/src/com/cloud/bridge/persist/dao/MultipartLoadDao.java index c9b5ec75b5f..c1a69dc5e47 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/MultipartLoadDao.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/MultipartLoadDao.java @@ -16,21 +16,13 @@ // under the License. package com.cloud.bridge.persist.dao; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; import java.sql.SQLException; -import java.sql.Timestamp; import java.util.ArrayList; import java.util.Calendar; import java.util.Date; import java.util.List; -import java.util.Properties; + +import javax.inject.Inject; import org.apache.log4j.Logger; @@ -40,71 +32,69 @@ import com.cloud.bridge.model.MultipartMetaVO; import com.cloud.bridge.service.core.s3.S3MetaDataEntry; import com.cloud.bridge.service.core.s3.S3MultipartPart; import com.cloud.bridge.service.core.s3.S3MultipartUpload; -import com.cloud.bridge.util.ConfigurationHelper; import com.cloud.bridge.util.OrderedPair; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.Transaction; public class MultipartLoadDao { - public static final Logger logger = Logger.getLogger(MultipartLoadDao.class); - - protected final MultipartMetaDao mpartMetaDao = ComponentLocator.inject(MultipartMetaDaoImpl.class); - protected final MultiPartPartsDao mpartPartsDao = ComponentLocator.inject(MultiPartPartsDaoImpl.class); - protected final MultiPartUploadsDao mpartUploadDao = ComponentLocator.inject(MultiPartUploadsDaoImpl.class); - - public MultipartLoadDao() {} - - /** - * If a multipart upload exists with the uploadId value then return the non-null creators - * accessKey. - * - * @param uploadId - * @return creator of the multipart upload, and NameKey of upload - */ - - - public OrderedPair multipartExits( int uploadId ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - { - return mpartUploadDao.multipartExits(uploadId); - } - - /** - * The multipart upload was either successfully completed or was aborted. In either case, we need - * to remove all of its state from the tables. Note that we have cascade deletes so all tables with - * uploadId as a foreign key are automatically cleaned. - * - * @param uploadId - * - */ - public void deleteUpload( int uploadId ) { - mpartUploadDao.deleteUpload(uploadId); - } - - /** - * The caller needs to know who initiated the multipart upload. - * - * @param uploadId - * @return the access key value defining the initiator - */ - public String getInitiator( int uploadId ) { - return mpartUploadDao.getAtrributeValue("AccessKey", uploadId); - } - - /** - * Create a new "in-process" multipart upload entry to keep track of its state. - * - * @param accessKey - * @param bucketName - * @param key - * @param cannedAccess - * - * @return if positive its the uploadId to be returned to the client - * - */ - public int initiateUpload( String accessKey, String bucketName, String key, String cannedAccess, S3MetaDataEntry[] meta ) { - int uploadId = -1; - Transaction txn = null; + public static final Logger logger = Logger.getLogger(MultipartLoadDao.class); + + @Inject MultipartMetaDao mpartMetaDao; + @Inject MultiPartPartsDao mpartPartsDao; + @Inject MultiPartUploadsDao mpartUploadDao; + + public MultipartLoadDao() {} + + /** + * If a multipart upload exists with the uploadId value then return the non-null creators + * accessKey. + * + * @param uploadId + * @return creator of the multipart upload, and NameKey of upload + */ + + + public OrderedPair multipartExits( int uploadId ) + throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException + { + return mpartUploadDao.multipartExits(uploadId); + } + + /** + * The multipart upload was either successfully completed or was aborted. In either case, we need + * to remove all of its state from the tables. Note that we have cascade deletes so all tables with + * uploadId as a foreign key are automatically cleaned. + * + * @param uploadId + * + */ + public void deleteUpload( int uploadId ) { + mpartUploadDao.deleteUpload(uploadId); + } + + /** + * The caller needs to know who initiated the multipart upload. + * + * @param uploadId + * @return the access key value defining the initiator + */ + public String getInitiator( int uploadId ) { + return mpartUploadDao.getAtrributeValue("AccessKey", uploadId); + } + + /** + * Create a new "in-process" multipart upload entry to keep track of its state. + * + * @param accessKey + * @param bucketName + * @param key + * @param cannedAccess + * + * @return if positive its the uploadId to be returned to the client + * + */ + public int initiateUpload( String accessKey, String bucketName, String key, String cannedAccess, S3MetaDataEntry[] meta ) { + int uploadId = -1; + Transaction txn = null; try { txn = Transaction.open(Transaction.AWSAPI_DB); Date tod = new Date(); @@ -126,26 +116,26 @@ public class MultipartLoadDao { txn.commit(); } } - + return uploadId; } finally { txn.close(); } } - - /** - * Remember all the individual parts that make up the entire multipart upload so that once - * the upload is complete all the parts can be glued together into a single object. Note, - * the caller can over write an existing part. - * - * @param uploadId - * @param partNumber - * @param md5 - * @param storedPath - * @param size - * @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - */ - public void savePart( int uploadId, int partNumber, String md5, String storedPath, int size ) { + + /** + * Remember all the individual parts that make up the entire multipart upload so that once + * the upload is complete all the parts can be glued together into a single object. Note, + * the caller can over write an existing part. + * + * @param uploadId + * @param partNumber + * @param md5 + * @param storedPath + * @param size + * @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException + */ + public void savePart( int uploadId, int partNumber, String md5, String storedPath, int size ) { try { MultiPartPartsVO partVO = null; @@ -169,32 +159,32 @@ public class MultipartLoadDao { } finally { } } - - /** - * It is possible for there to be a null canned access policy defined. - * @param uploadId - * @return the value defined in the x-amz-acl header or null - */ - public String getCannedAccess( int uploadId ) { - return mpartUploadDao.getAtrributeValue("x_amz_acl", uploadId); - } - - /** - * When the multipart are being composed into one object we need any meta data to be saved with - * the new re-constituted object. - * - * @param uploadId - * @return an array of S3MetaDataEntry (will be null if no meta values exist) - * @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - */ - public S3MetaDataEntry[] getMeta( int uploadId ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - { - List metaList = new ArrayList(); - int count = 0; - List metaVO; + + /** + * It is possible for there to be a null canned access policy defined. + * @param uploadId + * @return the value defined in the x-amz-acl header or null + */ + public String getCannedAccess( int uploadId ) { + return mpartUploadDao.getAtrributeValue("x_amz_acl", uploadId); + } + + /** + * When the multipart are being composed into one object we need any meta data to be saved with + * the new re-constituted object. + * + * @param uploadId + * @return an array of S3MetaDataEntry (will be null if no meta values exist) + * @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException + */ + public S3MetaDataEntry[] getMeta( int uploadId ) + throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException + { + List metaList = new ArrayList(); + int count = 0; + List metaVO; try { - + metaVO = mpartMetaDao.getByUploadID(uploadId); for (MultipartMetaVO multipartMetaVO : metaVO) { S3MetaDataEntry oneMeta = new S3MetaDataEntry(); @@ -203,42 +193,42 @@ public class MultipartLoadDao { metaList.add( oneMeta ); count++; } - + if ( 0 == count ) return null; else return metaList.toArray(new S3MetaDataEntry[0]); - + } finally { } - } - - /** - * The result has to be ordered by key and if there is more than one identical key then all the - * identical keys are ordered by create time. - * - * @param bucketName - * @param maxParts - * @param prefix - can be null - * @param keyMarker - can be null - * @param uploadIdMarker - can be null, should only be defined if keyMarker is not-null - * @return OrderedPair - * @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - */ - public OrderedPair getInitiatedUploads( String bucketName, int maxParts, String prefix, String keyMarker, String uploadIdMarker ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - { - S3MultipartUpload[] inProgress = new S3MultipartUpload[maxParts]; - boolean isTruncated = false; - int i = 0; - int pos = 1; - List uploadList; - // -> SQL like condition requires the '%' as a wildcard marker - if (null != prefix) prefix = prefix + "%"; - + } + + /** + * The result has to be ordered by key and if there is more than one identical key then all the + * identical keys are ordered by create time. + * + * @param bucketName + * @param maxParts + * @param prefix - can be null + * @param keyMarker - can be null + * @param uploadIdMarker - can be null, should only be defined if keyMarker is not-null + * @return OrderedPair + * @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException + */ + public OrderedPair getInitiatedUploads( String bucketName, int maxParts, String prefix, String keyMarker, String uploadIdMarker ) + throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException + { + S3MultipartUpload[] inProgress = new S3MultipartUpload[maxParts]; + boolean isTruncated = false; + int i = 0; + int pos = 1; + List uploadList; + // -> SQL like condition requires the '%' as a wildcard marker + if (null != prefix) prefix = prefix + "%"; + try { - uploadList = mpartUploadDao.getInitiatedUploads(bucketName, maxParts, prefix, keyMarker, uploadIdMarker); + uploadList = mpartUploadDao.getInitiatedUploads(bucketName, maxParts, prefix, keyMarker, uploadIdMarker); for (MultiPartUploadsVO uploadsVO : uploadList) { Calendar tod = Calendar.getInstance(); tod.setTime(uploadsVO.getCreateTime()); @@ -258,33 +248,33 @@ public class MultipartLoadDao { }finally { } - } - - /** - * Return info on a range of upload parts that have already been stored in disk. - * Note that parts can be uploaded in any order yet we must returned an ordered list - * of parts thus we use the "ORDERED BY" clause to sort the list. - * - * @param uploadId - * @param maxParts - * @param startAt - * @return an array of S3MultipartPart objects - * @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - */ - public S3MultipartPart[] getParts( int uploadId, int maxParts, int startAt ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - { - S3MultipartPart[] parts = new S3MultipartPart[maxParts]; - int i = 0; - List partsVO; - try { - - partsVO = mpartPartsDao.getParts(uploadId, startAt + maxParts + 1, startAt); - - for (MultiPartPartsVO partVO : partsVO) { + } + + /** + * Return info on a range of upload parts that have already been stored in disk. + * Note that parts can be uploaded in any order yet we must returned an ordered list + * of parts thus we use the "ORDERED BY" clause to sort the list. + * + * @param uploadId + * @param maxParts + * @param startAt + * @return an array of S3MultipartPart objects + * @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException + */ + public S3MultipartPart[] getParts( int uploadId, int maxParts, int startAt ) + throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException + { + S3MultipartPart[] parts = new S3MultipartPart[maxParts]; + int i = 0; + List partsVO; + try { + + partsVO = mpartPartsDao.getParts(uploadId, startAt + maxParts + 1, startAt); + + for (MultiPartPartsVO partVO : partsVO) { Calendar tod = Calendar.getInstance(); tod.setTime(partVO.getCreateTime()); - + parts[i] = new S3MultipartPart(); parts[i].setPartNumber(partVO.getPartNumber()); parts[i].setEtag(partVO.getMd5()); @@ -293,74 +283,74 @@ public class MultipartLoadDao { parts[i].setPath(partVO.getStoredPath()); i++; } - + if (i < maxParts) parts = (S3MultipartPart[])resizeArray(parts,i); return parts; - + } finally { } - } - - /** - * How many parts exist after the endMarker part number? - * - * @param uploadId - * @param endMarker - can be used to see if getUploadedParts was truncated - * @return number of parts with partNumber greater than endMarker - * @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - */ - public int numParts( int uploadId, int endMarker ) { - return mpartPartsDao.getnumParts(uploadId, endMarker); + } + + /** + * How many parts exist after the endMarker part number? + * + * @param uploadId + * @param endMarker - can be used to see if getUploadedParts was truncated + * @return number of parts with partNumber greater than endMarker + * @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException + */ + public int numParts( int uploadId, int endMarker ) { + return mpartPartsDao.getnumParts(uploadId, endMarker); } - /** - * A multipart upload request can have zero to many meta data entries to be applied to the - * final object. We need to remember all of the objects meta data until the multipart is complete. - * - * @param uploadId - defines an in-process multipart upload - * @param meta - an array of meta data to be assocated with the uploadId value - * - */ - private void saveMultipartMeta( int uploadId, S3MetaDataEntry[] meta ) { - if (null == meta) return; - - Transaction txn = null; + /** + * A multipart upload request can have zero to many meta data entries to be applied to the + * final object. We need to remember all of the objects meta data until the multipart is complete. + * + * @param uploadId - defines an in-process multipart upload + * @param meta - an array of meta data to be assocated with the uploadId value + * + */ + private void saveMultipartMeta( int uploadId, S3MetaDataEntry[] meta ) { + if (null == meta) return; + + Transaction txn = null; try { txn = Transaction.open(Transaction.AWSAPI_DB); for( int i=0; i < meta.length; i++ ) { - S3MetaDataEntry entry = meta[i]; - MultipartMetaVO metaVO = new MultipartMetaVO(); - metaVO.setUploadID(uploadId); - metaVO.setName(entry.getName()); - metaVO.setValue(entry.getValue()); - metaVO=mpartMetaDao.persist(metaVO); + S3MetaDataEntry entry = meta[i]; + MultipartMetaVO metaVO = new MultipartMetaVO(); + metaVO.setUploadID(uploadId); + metaVO.setName(entry.getName()); + metaVO.setValue(entry.getValue()); + metaVO=mpartMetaDao.persist(metaVO); } txn.commit(); } finally { txn.close(); } - } - + } - /** - * Reallocates an array with a new size, and copies the contents - * of the old array to the new array. - * - * @param oldArray the old array, to be reallocated. - * @param newSize the new array size. - * @return A new array with the same contents. - */ + + /** + * Reallocates an array with a new size, and copies the contents + * of the old array to the new array. + * + * @param oldArray the old array, to be reallocated. + * @param newSize the new array size. + * @return A new array with the same contents. + */ private static Object resizeArray(Object oldArray, int newSize) { - int oldSize = java.lang.reflect.Array.getLength(oldArray); - Class elementType = oldArray.getClass().getComponentType(); - Object newArray = java.lang.reflect.Array.newInstance( - elementType,newSize); - int preserveLength = Math.min(oldSize,newSize); - if (preserveLength > 0) - System.arraycopy (oldArray,0,newArray,0,preserveLength); - return newArray; + int oldSize = java.lang.reflect.Array.getLength(oldArray); + Class elementType = oldArray.getClass().getComponentType(); + Object newArray = java.lang.reflect.Array.newInstance( + elementType,newSize); + int preserveLength = Math.min(oldSize,newSize); + if (preserveLength > 0) + System.arraycopy (oldArray,0,newArray,0,preserveLength); + return newArray; } } diff --git a/awsapi/src/com/cloud/bridge/persist/dao/SObjectDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/SObjectDaoImpl.java index 3e6815279bd..6d23757b8b5 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/SObjectDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/SObjectDaoImpl.java @@ -22,16 +22,13 @@ import java.util.List; import java.util.Set; import javax.ejb.Local; +import javax.inject.Inject; import org.springframework.stereotype.Component; -import com.cloud.bridge.model.SBucket; import com.cloud.bridge.model.SBucketVO; import com.cloud.bridge.model.SObjectItemVO; import com.cloud.bridge.model.SObjectVO; -import com.cloud.bridge.util.EntityParam; -import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -40,18 +37,18 @@ import com.cloud.utils.db.Transaction; @Component @Local(value={SObjectDao.class}) public class SObjectDaoImpl extends GenericDaoBase implements SObjectDao { - protected final SObjectItemDao itemDao = ComponentLocator.inject(SObjectItemDaoImpl.class); - - public SObjectDaoImpl() {} + @Inject SObjectItemDao itemDao; - @Override - public SObjectVO getByNameKey(SBucketVO bucket, String nameKey) { - SObjectVO object = null; - SearchBuilder SearchByName = createSearchBuilder(); - SearchByName.and("SBucketID", SearchByName.entity().getBucketID() , SearchCriteria.Op.EQ); - SearchByName.and("NameKey", SearchByName.entity().getNameKey() , SearchCriteria.Op.EQ); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); - try { + public SObjectDaoImpl() {} + + @Override + public SObjectVO getByNameKey(SBucketVO bucket, String nameKey) { + SObjectVO object = null; + SearchBuilder SearchByName = createSearchBuilder(); + SearchByName.and("SBucketID", SearchByName.entity().getBucketID() , SearchCriteria.Op.EQ); + SearchByName.and("NameKey", SearchByName.entity().getNameKey() , SearchCriteria.Op.EQ); + Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + try { txn.start(); SearchCriteria sc = SearchByName.create(); sc.setParameters("SBucketID", bucket.getId()); @@ -62,23 +59,23 @@ public class SObjectDaoImpl extends GenericDaoBase implements S itemDao.getItems(object.getId())); object.setItems(items); } - return object; - - }finally { + return object; + + }finally { txn.close(); - } - - } - - @Override - public List listBucketObjects(SBucketVO bucket, String prefix, String marker, int maxKeys) { - StringBuffer sb = new StringBuffer(); - List params = new ArrayList(); - SearchBuilder SearchByBucket = createSearchBuilder(); - List objects = new ArrayList(); - - SearchByBucket.and("SBucketID", SearchByBucket.entity().getBucketID(), SearchCriteria.Op.EQ); - SearchByBucket.and("DeletionMark", SearchByBucket.entity().getDeletionMark(), SearchCriteria.Op.NULL); + } + + } + + @Override + public List listBucketObjects(SBucketVO bucket, String prefix, String marker, int maxKeys) { + StringBuffer sb = new StringBuffer(); + List params = new ArrayList(); + SearchBuilder SearchByBucket = createSearchBuilder(); + List objects = new ArrayList(); + + SearchByBucket.and("SBucketID", SearchByBucket.entity().getBucketID(), SearchCriteria.Op.EQ); + SearchByBucket.and("DeletionMark", SearchByBucket.entity().getDeletionMark(), SearchCriteria.Op.NULL); Transaction txn = Transaction.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); try { txn.start(); @@ -91,19 +88,19 @@ public class SObjectDaoImpl extends GenericDaoBase implements S } return objects; }finally { - txn.close(); + txn.close(); } - } - - @Override - public List listAllBucketObjects(SBucketVO bucket, String prefix, String marker, int maxKeys) { - StringBuffer sb = new StringBuffer(); - List params = new ArrayList(); - SearchBuilder getAllBuckets = createSearchBuilder(); - List objects = new ArrayList(); - getAllBuckets.and("SBucketID", getAllBuckets.entity().getBucketID(), SearchCriteria.Op.EQ); + } - Transaction txn = Transaction.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); + @Override + public List listAllBucketObjects(SBucketVO bucket, String prefix, String marker, int maxKeys) { + StringBuffer sb = new StringBuffer(); + List params = new ArrayList(); + SearchBuilder getAllBuckets = createSearchBuilder(); + List objects = new ArrayList(); + getAllBuckets.and("SBucketID", getAllBuckets.entity().getBucketID(), SearchCriteria.Op.EQ); + + Transaction txn = Transaction.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); try { txn.start(); SearchCriteria sc = getAllBuckets.create(); @@ -115,8 +112,8 @@ public class SObjectDaoImpl extends GenericDaoBase implements S } return objects; }finally { - txn.close(); + txn.close(); } - - } + + } } diff --git a/awsapi/src/com/cloud/bridge/service/EC2MainServlet.java b/awsapi/src/com/cloud/bridge/service/EC2MainServlet.java index e8ccb0c6e8d..f5a2d21e134 100644 --- a/awsapi/src/com/cloud/bridge/service/EC2MainServlet.java +++ b/awsapi/src/com/cloud/bridge/service/EC2MainServlet.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.io.OutputStreamWriter; import java.util.UUID; +import javax.inject.Inject; import javax.servlet.RequestDispatcher; import javax.servlet.ServletConfig; import javax.servlet.ServletException; @@ -30,50 +31,48 @@ import javax.servlet.http.HttpServletResponse; import org.apache.log4j.Logger; import com.cloud.bridge.persist.dao.CloudStackConfigurationDao; -import com.cloud.bridge.persist.dao.CloudStackConfigurationDaoImpl; import com.cloud.bridge.util.ConfigurationHelper; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Transaction; - -import net.sf.ehcache.Cache; @DB public class EC2MainServlet extends HttpServlet{ - private static final long serialVersionUID = 2201599478145974479L; - - public static final String EC2_REST_SERVLET_PATH="/rest/AmazonEC2/"; - public static final String EC2_SOAP_SERVLET_PATH="/services/AmazonEC2/"; - public static final String ENABLE_EC2_API="enable.ec2.api"; - private static boolean isEC2APIEnabled = false; - public static final Logger logger = Logger.getLogger(EC2MainServlet.class); - CloudStackConfigurationDao csDao = ComponentLocator.inject(CloudStackConfigurationDaoImpl.class); - - /** - * We build the path to where the keystore holding the WS-Security X509 certificates - * are stored. - */ - @DB - public void init( ServletConfig config ) throws ServletException { - try{ - ConfigurationHelper.preConfigureConfigPathFromServletContext(config.getServletContext()); - // check if API is enabled - String value = csDao.getConfigValue(ENABLE_EC2_API); - if(value != null){ - isEC2APIEnabled = Boolean.valueOf(value); - } - logger.info("Value of EC2 API Flag ::" + value); - }catch(Exception e){ - throw new ServletException("Error initializing awsapi: " + e.getMessage(), e); - } - } - - protected void doGet(HttpServletRequest req, HttpServletResponse resp) { - doGetOrPost(req, resp); + private static final long serialVersionUID = 2201599478145974479L; + + public static final String EC2_REST_SERVLET_PATH="/rest/AmazonEC2/"; + public static final String EC2_SOAP_SERVLET_PATH="/services/AmazonEC2/"; + public static final String ENABLE_EC2_API="enable.ec2.api"; + private static boolean isEC2APIEnabled = false; + public static final Logger logger = Logger.getLogger(EC2MainServlet.class); + @Inject CloudStackConfigurationDao csDao; + + /** + * We build the path to where the keystore holding the WS-Security X509 certificates + * are stored. + */ + @Override + @DB + public void init( ServletConfig config ) throws ServletException { + try{ + ConfigurationHelper.preConfigureConfigPathFromServletContext(config.getServletContext()); + // check if API is enabled + String value = csDao.getConfigValue(ENABLE_EC2_API); + if(value != null){ + isEC2APIEnabled = Boolean.valueOf(value); + } + logger.info("Value of EC2 API Flag ::" + value); + }catch(Exception e){ + throw new ServletException("Error initializing awsapi: " + e.getMessage(), e); + } } - + + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) { + doGetOrPost(req, resp); + } + + @Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) { - doGetOrPost(req, resp); + doGetOrPost(req, resp); } protected void doGetOrPost(HttpServletRequest request, HttpServletResponse response) { @@ -84,30 +83,30 @@ public class EC2MainServlet extends HttpServlet{ faultResponse(response, "404" , "EC2 API is disabled."); return; } - - if(action != null){ - //We presume it's a Query/Rest call - try { - RequestDispatcher dispatcher = request.getRequestDispatcher(EC2_REST_SERVLET_PATH); - dispatcher.forward(request, response); - } catch (ServletException e) { - throw new RuntimeException(e); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - else { - try { - request.getRequestDispatcher(EC2_SOAP_SERVLET_PATH).forward(request, response); - } catch (ServletException e) { - throw new RuntimeException(e); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - + + if(action != null){ + //We presume it's a Query/Rest call + try { + RequestDispatcher dispatcher = request.getRequestDispatcher(EC2_REST_SERVLET_PATH); + dispatcher.forward(request, response); + } catch (ServletException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + else { + try { + request.getRequestDispatcher(EC2_SOAP_SERVLET_PATH).forward(request, response); + } catch (ServletException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } - + private void faultResponse(HttpServletResponse response, String errorCode, String errorMessage) { try { OutputStreamWriter out = new OutputStreamWriter(response.getOutputStream()); diff --git a/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java b/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java index 4f748731504..8309bfd73f4 100644 --- a/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java +++ b/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java @@ -39,6 +39,7 @@ import java.util.List; import java.util.Properties; import java.util.UUID; +import javax.inject.Inject; import javax.servlet.ServletConfig; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; @@ -132,184 +133,183 @@ import com.cloud.bridge.service.core.ec2.EC2StopInstances; import com.cloud.bridge.service.core.ec2.EC2Volume; import com.cloud.bridge.service.core.ec2.EC2VolumeFilterSet; import com.cloud.bridge.service.exception.EC2ServiceException; +import com.cloud.bridge.service.exception.EC2ServiceException.ClientError; import com.cloud.bridge.service.exception.NoSuchObjectException; import com.cloud.bridge.service.exception.PermissionDeniedException; -import com.cloud.bridge.service.exception.EC2ServiceException.ClientError; import com.cloud.bridge.util.AuthenticationUtils; import com.cloud.bridge.util.ConfigurationHelper; import com.cloud.bridge.util.EC2RestAuth; import com.cloud.stack.models.CloudStackAccount; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.Transaction; public class EC2RestServlet extends HttpServlet { - private static final long serialVersionUID = -6168996266762804888L; - protected final UserCredentialsDaoImpl ucDao = ComponentLocator.inject(UserCredentialsDaoImpl.class); - protected final OfferingDaoImpl ofDao = ComponentLocator.inject(OfferingDaoImpl.class); - - public static final Logger logger = Logger.getLogger(EC2RestServlet.class); - - private OMFactory factory = OMAbstractFactory.getOMFactory(); - private XMLOutputFactory xmlOutFactory = XMLOutputFactory.newInstance(); - - private String pathToKeystore = null; - private String keystorePassword = null; - private String wsdlVersion = null; - private String version = null; - - boolean debug=true; + private static final long serialVersionUID = -6168996266762804888L; + @Inject UserCredentialsDaoImpl ucDao; + @Inject OfferingDaoImpl ofDao; - - /** - * We build the path to where the keystore holding the WS-Security X509 certificates - * are stored. - */ - @Override - public void init( ServletConfig config ) throws ServletException { - File propertiesFile = ConfigurationHelper.findConfigurationFile("ec2-service.properties"); - Properties EC2Prop = null; - - if (null != propertiesFile) { - logger.info("Use EC2 properties file: " + propertiesFile.getAbsolutePath()); - EC2Prop = new Properties(); - try { - EC2Prop.load( new FileInputStream( propertiesFile )); - } catch (FileNotFoundException e) { - logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); - } catch (IOException e) { - logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); - } - String keystore = EC2Prop.getProperty( "keystore" ); - keystorePassword = EC2Prop.getProperty( "keystorePass" ); - wsdlVersion = EC2Prop.getProperty( "WSDLVersion", "2010-11-15" ); - version = EC2Prop.getProperty( "cloudbridgeVersion", "UNKNOWN VERSION" ); - - String installedPath = System.getenv("CATALINA_HOME"); - if (installedPath == null) installedPath = System.getenv("CATALINA_BASE"); - if (installedPath == null) installedPath = System.getProperty("catalina.home"); - String webappPath = config.getServletContext().getRealPath("/"); - //pathToKeystore = new String( installedPath + File.separator + "webapps" + File.separator + webappName + File.separator + "WEB-INF" + File.separator + "classes" + File.separator + keystore ); - pathToKeystore = new String( webappPath + "WEB-INF" + File.separator + "classes" + File.separator + keystore ); - } - } - - @Override - protected void doGet(HttpServletRequest req, HttpServletResponse resp) { - doGetOrPost(req, resp); - } - + public static final Logger logger = Logger.getLogger(EC2RestServlet.class); + + private final OMFactory factory = OMAbstractFactory.getOMFactory(); + private final XMLOutputFactory xmlOutFactory = XMLOutputFactory.newInstance(); + + private String pathToKeystore = null; + private String keystorePassword = null; + private String wsdlVersion = null; + private String version = null; + + boolean debug=true; + + + /** + * We build the path to where the keystore holding the WS-Security X509 certificates + * are stored. + */ @Override - protected void doPost(HttpServletRequest req, HttpServletResponse resp) { - doGetOrPost(req, resp); + public void init( ServletConfig config ) throws ServletException { + File propertiesFile = ConfigurationHelper.findConfigurationFile("ec2-service.properties"); + Properties EC2Prop = null; + + if (null != propertiesFile) { + logger.info("Use EC2 properties file: " + propertiesFile.getAbsolutePath()); + EC2Prop = new Properties(); + try { + EC2Prop.load( new FileInputStream( propertiesFile )); + } catch (FileNotFoundException e) { + logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); + } catch (IOException e) { + logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); + } + String keystore = EC2Prop.getProperty( "keystore" ); + keystorePassword = EC2Prop.getProperty( "keystorePass" ); + wsdlVersion = EC2Prop.getProperty( "WSDLVersion", "2010-11-15" ); + version = EC2Prop.getProperty( "cloudbridgeVersion", "UNKNOWN VERSION" ); + + String installedPath = System.getenv("CATALINA_HOME"); + if (installedPath == null) installedPath = System.getenv("CATALINA_BASE"); + if (installedPath == null) installedPath = System.getProperty("catalina.home"); + String webappPath = config.getServletContext().getRealPath("/"); + //pathToKeystore = new String( installedPath + File.separator + "webapps" + File.separator + webappName + File.separator + "WEB-INF" + File.separator + "classes" + File.separator + keystore ); + pathToKeystore = new String( webappPath + "WEB-INF" + File.separator + "classes" + File.separator + keystore ); + } + } + + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) { + doGetOrPost(req, resp); + } + + @Override + protected void doPost(HttpServletRequest req, HttpServletResponse resp) { + doGetOrPost(req, resp); } protected void doGetOrPost(HttpServletRequest request, HttpServletResponse response) { - - if(debug){ - System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.request_uri: "+request.getAttribute("javax.servlet.forward.request_uri")); - System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.context_path: "+request.getAttribute("javax.servlet.forward.context_path")); - System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.servlet_path: "+request.getAttribute("javax.servlet.forward.servlet_path")); - System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.path_info: "+request.getAttribute("javax.servlet.forward.path_info")); - System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.query_string: "+request.getAttribute("javax.servlet.forward.query_string")); - - } - - - String action = request.getParameter( "Action" ); - logRequest(request); - - // -> unauthenticated calls, should still be done over HTTPS - if (action.equalsIgnoreCase( "SetUserKeys" )) { - setUserKeys(request, response); - return; - } - if (action.equalsIgnoreCase( "CloudEC2Version" )) { - cloudEC2Version(request, response); - return; - } + if(debug){ + System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.request_uri: "+request.getAttribute("javax.servlet.forward.request_uri")); + System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.context_path: "+request.getAttribute("javax.servlet.forward.context_path")); + System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.servlet_path: "+request.getAttribute("javax.servlet.forward.servlet_path")); + System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.path_info: "+request.getAttribute("javax.servlet.forward.path_info")); + System.out.println("EC2RestServlet.doGetOrPost: javax.servlet.forward.query_string: "+request.getAttribute("javax.servlet.forward.query_string")); - // -> authenticated calls + } + + + String action = request.getParameter( "Action" ); + logRequest(request); + + // -> unauthenticated calls, should still be done over HTTPS + if (action.equalsIgnoreCase( "SetUserKeys" )) { + setUserKeys(request, response); + return; + } + + if (action.equalsIgnoreCase( "CloudEC2Version" )) { + cloudEC2Version(request, response); + return; + } + + // -> authenticated calls try { - if (!authenticateRequest( request, response )) return; + if (!authenticateRequest( request, response )) return; + + if (action.equalsIgnoreCase( "AllocateAddress" )) allocateAddress(request, response); + else if (action.equalsIgnoreCase( "AssociateAddress" )) associateAddress(request, response); + else if (action.equalsIgnoreCase( "AttachVolume" )) attachVolume(request, response ); + else if (action.equalsIgnoreCase( "AuthorizeSecurityGroupIngress" )) authorizeSecurityGroupIngress(request, response); + else if (action.equalsIgnoreCase( "CreateImage" )) createImage(request, response); + else if (action.equalsIgnoreCase( "CreateSecurityGroup" )) createSecurityGroup(request, response); + else if (action.equalsIgnoreCase( "CreateSnapshot" )) createSnapshot(request, response); + else if (action.equalsIgnoreCase( "CreateVolume" )) createVolume(request, response); + else if (action.equalsIgnoreCase( "DeleteSecurityGroup" )) deleteSecurityGroup(request, response); + else if (action.equalsIgnoreCase( "DeleteSnapshot" )) deleteSnapshot(request, response); + else if (action.equalsIgnoreCase( "DeleteVolume" )) deleteVolume(request, response); + else if (action.equalsIgnoreCase( "DeregisterImage" )) deregisterImage(request, response); + else if (action.equalsIgnoreCase( "DescribeAddresses" )) describeAddresses(request, response); + else if (action.equalsIgnoreCase( "DescribeAvailabilityZones" )) describeAvailabilityZones(request, response); + else if (action.equalsIgnoreCase( "DescribeImageAttribute" )) describeImageAttribute(request, response); + else if (action.equalsIgnoreCase( "DescribeImages" )) describeImages(request, response); + else if (action.equalsIgnoreCase( "DescribeInstanceAttribute" )) describeInstanceAttribute(request, response); + else if (action.equalsIgnoreCase( "DescribeInstances" )) describeInstances(request, response); + else if (action.equalsIgnoreCase( "DescribeSecurityGroups" )) describeSecurityGroups(request, response); + else if (action.equalsIgnoreCase( "DescribeSnapshots" )) describeSnapshots(request, response); + else if (action.equalsIgnoreCase( "DescribeVolumes" )) describeVolumes(request, response); + else if (action.equalsIgnoreCase( "DetachVolume" )) detachVolume(request, response); + else if (action.equalsIgnoreCase( "DisassociateAddress" )) disassociateAddress(request, response); + else if (action.equalsIgnoreCase( "ModifyImageAttribute" )) modifyImageAttribute(request, response); + else if (action.equalsIgnoreCase( "RebootInstances" )) rebootInstances(request, response); + else if (action.equalsIgnoreCase( "RegisterImage" )) registerImage(request, response); + else if (action.equalsIgnoreCase( "ReleaseAddress" )) releaseAddress(request, response); + else if (action.equalsIgnoreCase( "ResetImageAttribute" )) resetImageAttribute(request, response); + else if (action.equalsIgnoreCase( "RevokeSecurityGroupIngress")) revokeSecurityGroupIngress(request, response); + else if (action.equalsIgnoreCase( "RunInstances" )) runInstances(request, response); + else if (action.equalsIgnoreCase( "StartInstances" )) startInstances(request, response); + else if (action.equalsIgnoreCase( "StopInstances" )) stopInstances(request, response); + else if (action.equalsIgnoreCase( "TerminateInstances" )) terminateInstances(request, response); + else if (action.equalsIgnoreCase( "SetCertificate" )) setCertificate(request, response); + else if (action.equalsIgnoreCase( "DeleteCertificate" )) deleteCertificate(request, response); + else if (action.equalsIgnoreCase( "SetOfferMapping" )) setOfferMapping(request, response); + else if (action.equalsIgnoreCase( "DeleteOfferMapping" )) deleteOfferMapping(request, response); + else if (action.equalsIgnoreCase( "CreateKeyPair" )) createKeyPair(request, response); + else if (action.equalsIgnoreCase( "ImportKeyPair" )) importKeyPair(request, response); + else if (action.equalsIgnoreCase( "DeleteKeyPair" )) deleteKeyPair(request, response); + else if (action.equalsIgnoreCase( "DescribeKeyPairs" )) describeKeyPairs(request, response); + else if (action.equalsIgnoreCase( "GetPasswordData" )) getPasswordData(request, response); + else { + logger.error("Unsupported action " + action); + throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); + } - if (action.equalsIgnoreCase( "AllocateAddress" )) allocateAddress(request, response); - else if (action.equalsIgnoreCase( "AssociateAddress" )) associateAddress(request, response); - else if (action.equalsIgnoreCase( "AttachVolume" )) attachVolume(request, response ); - else if (action.equalsIgnoreCase( "AuthorizeSecurityGroupIngress" )) authorizeSecurityGroupIngress(request, response); - else if (action.equalsIgnoreCase( "CreateImage" )) createImage(request, response); - else if (action.equalsIgnoreCase( "CreateSecurityGroup" )) createSecurityGroup(request, response); - else if (action.equalsIgnoreCase( "CreateSnapshot" )) createSnapshot(request, response); - else if (action.equalsIgnoreCase( "CreateVolume" )) createVolume(request, response); - else if (action.equalsIgnoreCase( "DeleteSecurityGroup" )) deleteSecurityGroup(request, response); - else if (action.equalsIgnoreCase( "DeleteSnapshot" )) deleteSnapshot(request, response); - else if (action.equalsIgnoreCase( "DeleteVolume" )) deleteVolume(request, response); - else if (action.equalsIgnoreCase( "DeregisterImage" )) deregisterImage(request, response); - else if (action.equalsIgnoreCase( "DescribeAddresses" )) describeAddresses(request, response); - else if (action.equalsIgnoreCase( "DescribeAvailabilityZones" )) describeAvailabilityZones(request, response); - else if (action.equalsIgnoreCase( "DescribeImageAttribute" )) describeImageAttribute(request, response); - else if (action.equalsIgnoreCase( "DescribeImages" )) describeImages(request, response); - else if (action.equalsIgnoreCase( "DescribeInstanceAttribute" )) describeInstanceAttribute(request, response); - else if (action.equalsIgnoreCase( "DescribeInstances" )) describeInstances(request, response); - else if (action.equalsIgnoreCase( "DescribeSecurityGroups" )) describeSecurityGroups(request, response); - else if (action.equalsIgnoreCase( "DescribeSnapshots" )) describeSnapshots(request, response); - else if (action.equalsIgnoreCase( "DescribeVolumes" )) describeVolumes(request, response); - else if (action.equalsIgnoreCase( "DetachVolume" )) detachVolume(request, response); - else if (action.equalsIgnoreCase( "DisassociateAddress" )) disassociateAddress(request, response); - else if (action.equalsIgnoreCase( "ModifyImageAttribute" )) modifyImageAttribute(request, response); - else if (action.equalsIgnoreCase( "RebootInstances" )) rebootInstances(request, response); - else if (action.equalsIgnoreCase( "RegisterImage" )) registerImage(request, response); - else if (action.equalsIgnoreCase( "ReleaseAddress" )) releaseAddress(request, response); - else if (action.equalsIgnoreCase( "ResetImageAttribute" )) resetImageAttribute(request, response); - else if (action.equalsIgnoreCase( "RevokeSecurityGroupIngress")) revokeSecurityGroupIngress(request, response); - else if (action.equalsIgnoreCase( "RunInstances" )) runInstances(request, response); - else if (action.equalsIgnoreCase( "StartInstances" )) startInstances(request, response); - else if (action.equalsIgnoreCase( "StopInstances" )) stopInstances(request, response); - else if (action.equalsIgnoreCase( "TerminateInstances" )) terminateInstances(request, response); - else if (action.equalsIgnoreCase( "SetCertificate" )) setCertificate(request, response); - else if (action.equalsIgnoreCase( "DeleteCertificate" )) deleteCertificate(request, response); - else if (action.equalsIgnoreCase( "SetOfferMapping" )) setOfferMapping(request, response); - else if (action.equalsIgnoreCase( "DeleteOfferMapping" )) deleteOfferMapping(request, response); - else if (action.equalsIgnoreCase( "CreateKeyPair" )) createKeyPair(request, response); - else if (action.equalsIgnoreCase( "ImportKeyPair" )) importKeyPair(request, response); - else if (action.equalsIgnoreCase( "DeleteKeyPair" )) deleteKeyPair(request, response); - else if (action.equalsIgnoreCase( "DescribeKeyPairs" )) describeKeyPairs(request, response); - else if (action.equalsIgnoreCase( "GetPasswordData" )) getPasswordData(request, response); - else { - logger.error("Unsupported action " + action); - throw new EC2ServiceException(ClientError.Unsupported, "This operation is not available"); - } - } catch( EC2ServiceException e ) { - response.setStatus(e.getErrorCode()); - - if (e.getCause() != null && e.getCause() instanceof AxisFault) - faultResponse(response, ((AxisFault)e.getCause()).getFaultCode().getLocalPart(), e.getMessage()); - else { - logger.error("EC2ServiceException: " + e.getMessage(), e); - endResponse(response, e.toString()); - } + response.setStatus(e.getErrorCode()); + + if (e.getCause() != null && e.getCause() instanceof AxisFault) + faultResponse(response, ((AxisFault)e.getCause()).getFaultCode().getLocalPart(), e.getMessage()); + else { + logger.error("EC2ServiceException: " + e.getMessage(), e); + endResponse(response, e.toString()); + } } catch( PermissionDeniedException e ) { - logger.error("Unexpected exception: " + e.getMessage(), e); - response.setStatus(403); - endResponse(response, "Access denied"); - + logger.error("Unexpected exception: " + e.getMessage(), e); + response.setStatus(403); + endResponse(response, "Access denied"); + } catch( Exception e ) { - logger.error("Unexpected exception: " + e.getMessage(), e); - response.setStatus(500); - endResponse(response, e.toString()); - + logger.error("Unexpected exception: " + e.getMessage(), e); + response.setStatus(500); + endResponse(response, e.toString()); + } finally { - try { - response.flushBuffer(); - } catch (IOException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } + try { + response.flushBuffer(); + } catch (IOException e) { + logger.error("Unexpected exception " + e.getMessage(), e); + } } } - + /** * Provide an easy way to determine the version of the implementation running. * @@ -320,7 +320,7 @@ public class EC2RestServlet extends HttpServlet { response.setStatus(200); endResponse(response, version_response); } - + /** * This request registers the Cloud.com account holder to the EC2 service. The Cloud.com * account holder saves his API access and secret keys with the EC2 service so that @@ -340,54 +340,54 @@ public class EC2RestServlet extends HttpServlet { * As with all REST calls HTTPS should be used to ensure their security. */ private void setUserKeys( HttpServletRequest request, HttpServletResponse response ) { - String[] accessKey = null; - String[] secretKey = null; - Transaction txn = null; - try { - // -> all these parameters are required + String[] accessKey = null; + String[] secretKey = null; + Transaction txn = null; + try { + // -> all these parameters are required accessKey = request.getParameterValues( "accesskey" ); - if ( null == accessKey || 0 == accessKey.length ) { - response.sendError(530, "Missing accesskey parameter" ); - return; - } + if ( null == accessKey || 0 == accessKey.length ) { + response.sendError(530, "Missing accesskey parameter" ); + return; + } secretKey = request.getParameterValues( "secretkey" ); if ( null == secretKey || 0 == secretKey.length ) { - response.sendError(530, "Missing secretkey parameter" ); - return; + response.sendError(530, "Missing secretkey parameter" ); + return; } } catch( Exception e ) { - logger.error("SetUserKeys exception " + e.getMessage(), e); - response.setStatus(500); - endResponse(response, "SetUserKeys exception " + e.getMessage()); - return; + logger.error("SetUserKeys exception " + e.getMessage(), e); + response.setStatus(500); + endResponse(response, "SetUserKeys exception " + e.getMessage()); + return; } - - // prime UserContext here + + // prime UserContext here // logger.debug("initializing context"); - UserContext context = UserContext.current(); + UserContext context = UserContext.current(); try { txn = Transaction.open(Transaction.AWSAPI_DB); // -> use the keys to see if the account actually exists - ServiceProvider.getInstance().getEC2Engine().validateAccount( accessKey[0], secretKey[0] ); -/* UserCredentialsDao credentialDao = new UserCredentialsDao(); + ServiceProvider.getInstance().getEC2Engine().validateAccount( accessKey[0], secretKey[0] ); + /* UserCredentialsDao credentialDao = new UserCredentialsDao(); credentialDao.setUserKeys( ); -*/ UserCredentialsVO user = new UserCredentialsVO(accessKey[0], secretKey[0]); - ucDao.persist(user); - txn.commit(); - + */ UserCredentialsVO user = new UserCredentialsVO(accessKey[0], secretKey[0]); + ucDao.persist(user); + txn.commit(); + } catch( Exception e ) { - logger.error("SetUserKeys " + e.getMessage(), e); - response.setStatus(401); - endResponse(response, e.toString()); - txn.close(); - return; + logger.error("SetUserKeys " + e.getMessage(), e); + response.setStatus(401); + endResponse(response, e.toString()); + txn.close(); + return; } - response.setStatus(200); + response.setStatus(200); endResponse(response, "User keys set successfully"); } - + /** * The SOAP API for EC2 uses WS-Security to sign all client requests. This requires that * the client have a public/private key pair and the public key defined by a X509 certificate. @@ -405,46 +405,46 @@ public class EC2RestServlet extends HttpServlet { * simply over writes any previously stored value. */ private void setCertificate( HttpServletRequest request, HttpServletResponse response ) - throws Exception { + throws Exception { Transaction txn = null; - try { - // [A] Pull the cert and cloud AccessKey from the request + try { + // [A] Pull the cert and cloud AccessKey from the request String[] certificate = request.getParameterValues( "cert" ); - if (null == certificate || 0 == certificate.length) { - response.sendError(530, "Missing cert parameter" ); - return; - } + if (null == certificate || 0 == certificate.length) { + response.sendError(530, "Missing cert parameter" ); + return; + } // logger.debug( "SetCertificate cert: [" + certificate[0] + "]" ); - + String [] accessKey = request.getParameterValues( "AWSAccessKeyId" ); - if ( null == accessKey || 0 == accessKey.length ) { - response.sendError(530, "Missing AWSAccessKeyId parameter" ); - return; - } + if ( null == accessKey || 0 == accessKey.length ) { + response.sendError(530, "Missing AWSAccessKeyId parameter" ); + return; + } - // [B] Open our keystore - FileInputStream fsIn = new FileInputStream( pathToKeystore ); - KeyStore certStore = KeyStore.getInstance( "JKS" ); - certStore.load( fsIn, keystorePassword.toCharArray()); - - // -> use the Cloud API key to save the cert in the keystore - // -> write the cert into the keystore on disk - Certificate userCert = null; - CertificateFactory cf = CertificateFactory.getInstance( "X.509" ); + // [B] Open our keystore + FileInputStream fsIn = new FileInputStream( pathToKeystore ); + KeyStore certStore = KeyStore.getInstance( "JKS" ); + certStore.load( fsIn, keystorePassword.toCharArray()); - ByteArrayInputStream bs = new ByteArrayInputStream( certificate[0].getBytes()); - while (bs.available() > 0) userCert = cf.generateCertificate(bs); - certStore.setCertificateEntry( accessKey[0], userCert ); + // -> use the Cloud API key to save the cert in the keystore + // -> write the cert into the keystore on disk + Certificate userCert = null; + CertificateFactory cf = CertificateFactory.getInstance( "X.509" ); - FileOutputStream fsOut = new FileOutputStream( pathToKeystore ); - certStore.store( fsOut, keystorePassword.toCharArray()); - - // [C] Associate the cert's uniqueId with the Cloud API keys + ByteArrayInputStream bs = new ByteArrayInputStream( certificate[0].getBytes()); + while (bs.available() > 0) userCert = cf.generateCertificate(bs); + certStore.setCertificateEntry( accessKey[0], userCert ); + + FileOutputStream fsOut = new FileOutputStream( pathToKeystore ); + certStore.store( fsOut, keystorePassword.toCharArray()); + + // [C] Associate the cert's uniqueId with the Cloud API keys String uniqueId = AuthenticationUtils.X509CertUniqueId( userCert ); logger.debug( "SetCertificate, uniqueId: " + uniqueId ); -/* UserCredentialsDao credentialDao = new UserCredentialsDao(); + /* UserCredentialsDao credentialDao = new UserCredentialsDao(); credentialDao.setCertificateId( accessKey[0], uniqueId ); -*/ + */ txn = Transaction.open(Transaction.AWSAPI_DB); UserCredentialsVO user = ucDao.getByAccessKey(accessKey[0]); user.setCertUniqueId(uniqueId); @@ -452,20 +452,20 @@ public class EC2RestServlet extends HttpServlet { response.setStatus(200); endResponse(response, "User certificate set successfully"); txn.commit(); - - } catch( NoSuchObjectException e ) { - logger.error("SetCertificate exception " + e.getMessage(), e); - response.sendError(404, "SetCertificate exception " + e.getMessage()); - + + } catch( NoSuchObjectException e ) { + logger.error("SetCertificate exception " + e.getMessage(), e); + response.sendError(404, "SetCertificate exception " + e.getMessage()); + } catch( Exception e ) { - logger.error("SetCertificate exception " + e.getMessage(), e); - response.sendError(500, "SetCertificate exception " + e.getMessage()); + logger.error("SetCertificate exception " + e.getMessage(), e); + response.sendError(500, "SetCertificate exception " + e.getMessage()); } finally { txn.close(); } - + } - + /** * The SOAP API for EC2 uses WS-Security to sign all client requests. This requires that * the client have a public/private key pair and the public key defined by a X509 certificate. @@ -478,133 +478,133 @@ public class EC2RestServlet extends HttpServlet { * algorithm. */ private void deleteCertificate( HttpServletRequest request, HttpServletResponse response ) - throws Exception { + throws Exception { Transaction txn = null; - try { + try { String [] accessKey = request.getParameterValues( "AWSAccessKeyId" ); - if ( null == accessKey || 0 == accessKey.length ) { - response.sendError(530, "Missing AWSAccessKeyId parameter" ); - return; - } + if ( null == accessKey || 0 == accessKey.length ) { + response.sendError(530, "Missing AWSAccessKeyId parameter" ); + return; + } - // -> delete the specified entry and save back to disk - FileInputStream fsIn = new FileInputStream( pathToKeystore ); - KeyStore certStore = KeyStore.getInstance( "JKS" ); - certStore.load( fsIn, keystorePassword.toCharArray()); + // -> delete the specified entry and save back to disk + FileInputStream fsIn = new FileInputStream( pathToKeystore ); + KeyStore certStore = KeyStore.getInstance( "JKS" ); + certStore.load( fsIn, keystorePassword.toCharArray()); - if ( certStore.containsAlias( accessKey[0] )) { - certStore.deleteEntry( accessKey[0] ); - FileOutputStream fsOut = new FileOutputStream( pathToKeystore ); - certStore.store( fsOut, keystorePassword.toCharArray()); - - // -> dis-associate the cert's uniqueId with the Cloud API keys -/* UserCredentialsDao credentialDao = new UserCredentialsDao(); + if ( certStore.containsAlias( accessKey[0] )) { + certStore.deleteEntry( accessKey[0] ); + FileOutputStream fsOut = new FileOutputStream( pathToKeystore ); + certStore.store( fsOut, keystorePassword.toCharArray()); + + // -> dis-associate the cert's uniqueId with the Cloud API keys + /* UserCredentialsDao credentialDao = new UserCredentialsDao(); credentialDao.setCertificateId( accessKey[0], null ); - -*/ txn = Transaction.open(Transaction.AWSAPI_DB); - UserCredentialsVO user = ucDao.getByAccessKey(accessKey[0]); - user.setCertUniqueId(null); - ucDao.update(user.getId(), user); - response.setStatus(200); - endResponse(response, "User certificate deleted successfully"); - txn.commit(); - } - else response.setStatus(404); - - } catch( NoSuchObjectException e ) { - logger.error("SetCertificate exception " + e.getMessage(), e); - response.sendError(404, "SetCertificate exception " + e.getMessage()); + + */ txn = Transaction.open(Transaction.AWSAPI_DB); + UserCredentialsVO user = ucDao.getByAccessKey(accessKey[0]); + user.setCertUniqueId(null); + ucDao.update(user.getId(), user); + response.setStatus(200); + endResponse(response, "User certificate deleted successfully"); + txn.commit(); + } + else response.setStatus(404); + + } catch( NoSuchObjectException e ) { + logger.error("SetCertificate exception " + e.getMessage(), e); + response.sendError(404, "SetCertificate exception " + e.getMessage()); } catch( Exception e ) { - logger.error("DeleteCertificate exception " + e.getMessage(), e); - response.sendError(500, "DeleteCertificate exception " + e.getMessage()); + logger.error("DeleteCertificate exception " + e.getMessage(), e); + response.sendError(500, "DeleteCertificate exception " + e.getMessage()); } finally { txn.close(); } } - + /** * Allow the caller to define the mapping between the Amazon instance type strings * (e.g., m1.small, cc1.4xlarge) and the cloudstack service offering ids. Setting * an existing mapping just over writes the prevous values. */ private void setOfferMapping( HttpServletRequest request, HttpServletResponse response ) { - String amazonOffer = null; - String cloudOffer = null; - - try { - // -> all these parameters are required + String amazonOffer = null; + String cloudOffer = null; + + try { + // -> all these parameters are required amazonOffer = request.getParameter( "amazonoffer" ); - if ( null == amazonOffer ) { - response.sendError(530, "Missing amazonoffer parameter" ); - return; - } + if ( null == amazonOffer ) { + response.sendError(530, "Missing amazonoffer parameter" ); + return; + } cloudOffer = request.getParameter( "cloudoffer" ); if ( null == cloudOffer ) { - response.sendError(530, "Missing cloudoffer parameter" ); - return; + response.sendError(530, "Missing cloudoffer parameter" ); + return; } } catch( Exception e ) { - logger.error("SetOfferMapping exception " + e.getMessage(), e); - response.setStatus(500); - endResponse(response, "SetOfferMapping exception " + e.getMessage()); - return; + logger.error("SetOfferMapping exception " + e.getMessage(), e); + response.setStatus(500); + endResponse(response, "SetOfferMapping exception " + e.getMessage()); + return; + } + + // validate account is admin level + try { + CloudStackAccount currentAccount = ServiceProvider.getInstance().getEC2Engine().getCurrentAccount(); + + if (currentAccount.getAccountType() != 1) { + logger.debug("SetOfferMapping called by non-admin user!"); + response.setStatus(500); + endResponse(response, "Permission denied for non-admin user to setOfferMapping!"); + return; + } + } catch (Exception e) { + logger.error("SetOfferMapping " + e.getMessage(), e); + response.setStatus(401); + endResponse(response, e.toString()); + return; } - - // validate account is admin level - try { - CloudStackAccount currentAccount = ServiceProvider.getInstance().getEC2Engine().getCurrentAccount(); - - if (currentAccount.getAccountType() != 1) { - logger.debug("SetOfferMapping called by non-admin user!"); - response.setStatus(500); - endResponse(response, "Permission denied for non-admin user to setOfferMapping!"); - return; - } - } catch (Exception e) { - logger.error("SetOfferMapping " + e.getMessage(), e); - response.setStatus(401); - endResponse(response, e.toString()); - return; - } try { - - ofDao.setOfferMapping( amazonOffer, cloudOffer ); - + + ofDao.setOfferMapping( amazonOffer, cloudOffer ); + } catch( Exception e ) { - logger.error("SetOfferMapping " + e.getMessage(), e); - response.setStatus(401); - endResponse(response, e.toString()); - return; + logger.error("SetOfferMapping " + e.getMessage(), e); + response.setStatus(401); + endResponse(response, e.toString()); + return; } - response.setStatus(200); + response.setStatus(200); endResponse(response, "offering mapping set successfully"); } private void deleteOfferMapping( HttpServletRequest request, HttpServletResponse response ) { - String amazonOffer = null; - - try { - // -> all these parameters are required + String amazonOffer = null; + + try { + // -> all these parameters are required amazonOffer = request.getParameter( "amazonoffer" ); - if ( null == amazonOffer ) { - response.sendError(530, "Missing amazonoffer parameter" ); - return; - } + if ( null == amazonOffer ) { + response.sendError(530, "Missing amazonoffer parameter" ); + return; + } } catch( Exception e ) { - logger.error("DeleteOfferMapping exception " + e.getMessage(), e); - response.setStatus(500); - endResponse(response, "DeleteOfferMapping exception " + e.getMessage()); - return; + logger.error("DeleteOfferMapping exception " + e.getMessage(), e); + response.setStatus(500); + endResponse(response, "DeleteOfferMapping exception " + e.getMessage()); + return; } - - // validate account is admin level - try { + + // validate account is admin level + try { CloudStackAccount currentAccount = ServiceProvider.getInstance().getEC2Engine().getCurrentAccount(); - + if (currentAccount.getAccountType() != 1) { logger.debug("deleteOfferMapping called by non-admin user!"); response.setStatus(500); @@ -619,14 +619,14 @@ public class EC2RestServlet extends HttpServlet { } try { - ofDao.deleteOfferMapping( amazonOffer ); + ofDao.deleteOfferMapping( amazonOffer ); } catch( Exception e ) { - logger.error("DeleteOfferMapping " + e.getMessage(), e); - response.setStatus(401); - endResponse(response, e.toString()); - return; + logger.error("DeleteOfferMapping " + e.getMessage(), e); + response.setStatus(401); + endResponse(response, e.toString()); + return; } - response.setStatus(200); + response.setStatus(200); endResponse(response, "offering mapping deleted successfully"); } @@ -641,257 +641,257 @@ public class EC2RestServlet extends HttpServlet { * response XML. */ private void attachVolume( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Volume EC2request = new EC2Volume(); - - // -> all these parameters are required + throws ADBException, XMLStreamException, IOException { + EC2Volume EC2request = new EC2Volume(); + + // -> all these parameters are required String[] volumeId = request.getParameterValues( "VolumeId" ); - if ( null != volumeId && 0 < volumeId.length ) - EC2request.setId( volumeId[0] ); - else { response.sendError(530, "Missing VolumeId parameter" ); return; } + if ( null != volumeId && 0 < volumeId.length ) + EC2request.setId( volumeId[0] ); + else { response.sendError(530, "Missing VolumeId parameter" ); return; } String[] instanceId = request.getParameterValues( "InstanceId" ); if ( null != instanceId && 0 < instanceId.length ) - EC2request.setInstanceId( instanceId[0] ); - else { response.sendError(530, "Missing InstanceId parameter" ); return; } + EC2request.setInstanceId( instanceId[0] ); + else { response.sendError(530, "Missing InstanceId parameter" ); return; } String[] device = request.getParameterValues( "Device" ); if ( null != device && 0 < device.length ) - EC2request.setDevice( device[0] ); - else { response.sendError(530, "Missing Device parameter" ); return; } - - // -> execute the request - AttachVolumeResponse EC2response = EC2SoapServiceImpl.toAttachVolumeResponse( ServiceProvider.getInstance().getEC2Engine().attachVolume( EC2request )); - serializeResponse(response, EC2response); + EC2request.setDevice( device[0] ); + else { response.sendError(530, "Missing Device parameter" ); return; } + + // -> execute the request + AttachVolumeResponse EC2response = EC2SoapServiceImpl.toAttachVolumeResponse( ServiceProvider.getInstance().getEC2Engine().attachVolume( EC2request )); + serializeResponse(response, EC2response); } - + /** * The SOAP equivalent of this function appears to allow multiple permissions per request, yet * in the REST API documentation only one permission is allowed. */ private void revokeSecurityGroupIngress( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { + throws ADBException, XMLStreamException, IOException { EC2AuthorizeRevokeSecurityGroup EC2request = new EC2AuthorizeRevokeSecurityGroup(); String[] groupName = request.getParameterValues( "GroupName" ); - if ( null != groupName && 0 < groupName.length ) - EC2request.setName( groupName[0] ); - else { response.sendError(530, "Missing GroupName parameter" ); return; } + if ( null != groupName && 0 < groupName.length ) + EC2request.setName( groupName[0] ); + else { response.sendError(530, "Missing GroupName parameter" ); return; } - EC2IpPermission perm = new EC2IpPermission(); + EC2IpPermission perm = new EC2IpPermission(); String[] protocol = request.getParameterValues( "IpProtocol" ); - if ( null != protocol && 0 < protocol.length ) - perm.setProtocol( protocol[0] ); - else { response.sendError(530, "Missing IpProtocol parameter" ); return; } + if ( null != protocol && 0 < protocol.length ) + perm.setProtocol( protocol[0] ); + else { response.sendError(530, "Missing IpProtocol parameter" ); return; } String[] fromPort = request.getParameterValues( "FromPort" ); - if ( null != fromPort && 0 < fromPort.length ) - perm.setProtocol( fromPort[0] ); - else { response.sendError(530, "Missing FromPort parameter" ); return; } + if ( null != fromPort && 0 < fromPort.length ) + perm.setProtocol( fromPort[0] ); + else { response.sendError(530, "Missing FromPort parameter" ); return; } String[] toPort = request.getParameterValues( "ToPort" ); - if ( null != toPort && 0 < toPort.length ) - perm.setProtocol( toPort[0] ); - else { response.sendError(530, "Missing ToPort parameter" ); return; } - - String[] ranges = request.getParameterValues( "CidrIp" ); - if ( null != ranges && 0 < ranges.length) - perm.addIpRange( ranges[0] ); - else { response.sendError(530, "Missing CidrIp parameter" ); return; } - - String[] user = request.getParameterValues( "SourceSecurityGroupOwnerId" ); - if ( null == user || 0 == user.length) { - response.sendError(530, "Missing SourceSecurityGroupOwnerId parameter" ); - return; - } - - String[] name = request.getParameterValues( "SourceSecurityGroupName" ); - if ( null == name || 0 == name.length) { - response.sendError(530, "Missing SourceSecurityGroupName parameter" ); - return; - } + if ( null != toPort && 0 < toPort.length ) + perm.setProtocol( toPort[0] ); + else { response.sendError(530, "Missing ToPort parameter" ); return; } - EC2SecurityGroup group = new EC2SecurityGroup(); - group.setAccount( user[0] ); - group.setName( name[0] ); - perm.addUser( group ); - EC2request.addIpPermission( perm ); - - // -> execute the request + String[] ranges = request.getParameterValues( "CidrIp" ); + if ( null != ranges && 0 < ranges.length) + perm.addIpRange( ranges[0] ); + else { response.sendError(530, "Missing CidrIp parameter" ); return; } + + String[] user = request.getParameterValues( "SourceSecurityGroupOwnerId" ); + if ( null == user || 0 == user.length) { + response.sendError(530, "Missing SourceSecurityGroupOwnerId parameter" ); + return; + } + + String[] name = request.getParameterValues( "SourceSecurityGroupName" ); + if ( null == name || 0 == name.length) { + response.sendError(530, "Missing SourceSecurityGroupName parameter" ); + return; + } + + EC2SecurityGroup group = new EC2SecurityGroup(); + group.setAccount( user[0] ); + group.setName( name[0] ); + perm.addUser( group ); + EC2request.addIpPermission( perm ); + + // -> execute the request RevokeSecurityGroupIngressResponse EC2response = EC2SoapServiceImpl.toRevokeSecurityGroupIngressResponse( - ServiceProvider.getInstance().getEC2Engine().revokeSecurityGroup( EC2request )); + ServiceProvider.getInstance().getEC2Engine().revokeSecurityGroup( EC2request )); serializeResponse(response, EC2response); } - private void authorizeSecurityGroupIngress( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - // -> parse the complicated paramters into our standard object + private void authorizeSecurityGroupIngress( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + // -> parse the complicated paramters into our standard object EC2AuthorizeRevokeSecurityGroup EC2request = new EC2AuthorizeRevokeSecurityGroup(); String[] groupName = request.getParameterValues( "GroupName" ); - if ( null != groupName && 0 < groupName.length ) - EC2request.setName( groupName[0] ); - else { response.sendError(530, "Missing GroupName parameter" ); return; } + if ( null != groupName && 0 < groupName.length ) + EC2request.setName( groupName[0] ); + else { response.sendError(530, "Missing GroupName parameter" ); return; } - // -> not clear how many parameters there are until we fail to get IpPermissions.n.IpProtocol - int nCount = 1; - do - { EC2IpPermission perm = new EC2IpPermission(); + // -> not clear how many parameters there are until we fail to get IpPermissions.n.IpProtocol + int nCount = 1; + do + { EC2IpPermission perm = new EC2IpPermission(); - String[] protocol = request.getParameterValues( "IpPermissions." + nCount + ".IpProtocol" ); - if ( null != protocol && 0 < protocol.length ) - perm.setProtocol( protocol[0] ); - else break; + String[] protocol = request.getParameterValues( "IpPermissions." + nCount + ".IpProtocol" ); + if ( null != protocol && 0 < protocol.length ) + perm.setProtocol( protocol[0] ); + else break; - String[] fromPort = request.getParameterValues( "IpPermissions." + nCount + ".FromPort" ); - if (null != fromPort && 0 < fromPort.length) perm.setProtocol( fromPort[0] ); + String[] fromPort = request.getParameterValues( "IpPermissions." + nCount + ".FromPort" ); + if (null != fromPort && 0 < fromPort.length) perm.setProtocol( fromPort[0] ); - String[] toPort = request.getParameterValues( "IpPermissions." + nCount + ".ToPort" ); - if (null != toPort && 0 < toPort.length) perm.setProtocol( toPort[0] ); - - // -> list: IpPermissions.n.IpRanges.m.CidrIp - int mCount = 1; - do - { String[] ranges = request.getParameterValues( "IpPermissions." + nCount + ".IpRanges." + mCount + ".CidrIp" ); - if ( null != ranges && 0 < ranges.length) - perm.addIpRange( ranges[0] ); - else break; - mCount++; - - } while( true ); + String[] toPort = request.getParameterValues( "IpPermissions." + nCount + ".ToPort" ); + if (null != toPort && 0 < toPort.length) perm.setProtocol( toPort[0] ); - // -> list: IpPermissions.n.Groups.m.UserId and IpPermissions.n.Groups.m.GroupName - mCount = 1; - do - { String[] user = request.getParameterValues( "IpPermissions." + nCount + ".Groups." + mCount + ".UserId" ); - if ( null == user || 0 == user.length) break; - - String[] name = request.getParameterValues( "IpPermissions." + nCount + ".Groups." + mCount + ".GroupName" ); - if ( null == name || 0 == name.length) break; + // -> list: IpPermissions.n.IpRanges.m.CidrIp + int mCount = 1; + do + { String[] ranges = request.getParameterValues( "IpPermissions." + nCount + ".IpRanges." + mCount + ".CidrIp" ); + if ( null != ranges && 0 < ranges.length) + perm.addIpRange( ranges[0] ); + else break; + mCount++; - EC2SecurityGroup group = new EC2SecurityGroup(); - group.setAccount( user[0] ); - group.setName( name[0] ); - perm.addUser( group ); - mCount++; - - } while( true ); - - // -> multiple IP permissions can be specified per group name - EC2request.addIpPermission( perm ); - nCount++; - - } while( true ); - - if (1 == nCount) { response.sendError(530, "At least one IpPermissions required" ); return; } + } while( true ); - - // -> execute the request + // -> list: IpPermissions.n.Groups.m.UserId and IpPermissions.n.Groups.m.GroupName + mCount = 1; + do + { String[] user = request.getParameterValues( "IpPermissions." + nCount + ".Groups." + mCount + ".UserId" ); + if ( null == user || 0 == user.length) break; + + String[] name = request.getParameterValues( "IpPermissions." + nCount + ".Groups." + mCount + ".GroupName" ); + if ( null == name || 0 == name.length) break; + + EC2SecurityGroup group = new EC2SecurityGroup(); + group.setAccount( user[0] ); + group.setName( name[0] ); + perm.addUser( group ); + mCount++; + + } while( true ); + + // -> multiple IP permissions can be specified per group name + EC2request.addIpPermission( perm ); + nCount++; + + } while( true ); + + if (1 == nCount) { response.sendError(530, "At least one IpPermissions required" ); return; } + + + // -> execute the request AuthorizeSecurityGroupIngressResponse EC2response = EC2SoapServiceImpl.toAuthorizeSecurityGroupIngressResponse( - ServiceProvider.getInstance().getEC2Engine().authorizeSecurityGroup( EC2request )); + ServiceProvider.getInstance().getEC2Engine().authorizeSecurityGroup( EC2request )); serializeResponse(response, EC2response); } - + private void detachVolume( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Volume EC2request = new EC2Volume(); - + throws ADBException, XMLStreamException, IOException { + EC2Volume EC2request = new EC2Volume(); + String[] volumeId = request.getParameterValues( "VolumeId" ); - if ( null != volumeId && 0 < volumeId.length ) - EC2request.setId(volumeId[0]); - else { response.sendError(530, "Missing VolumeId parameter" ); return; } + if ( null != volumeId && 0 < volumeId.length ) + EC2request.setId(volumeId[0]); + else { response.sendError(530, "Missing VolumeId parameter" ); return; } String[] instanceId = request.getParameterValues( "InstanceId" ); if ( null != instanceId && 0 < instanceId.length ) - EC2request.setInstanceId(instanceId[0]); + EC2request.setInstanceId(instanceId[0]); String[] device = request.getParameterValues( "Device" ); if ( null != device && 0 < device.length ) - EC2request.setDevice( device[0] ); - - // -> execute the request - DetachVolumeResponse EC2response = EC2SoapServiceImpl.toDetachVolumeResponse( ServiceProvider.getInstance().getEC2Engine().detachVolume( EC2request )); - serializeResponse(response, EC2response); + EC2request.setDevice( device[0] ); + + // -> execute the request + DetachVolumeResponse EC2response = EC2SoapServiceImpl.toDetachVolumeResponse( ServiceProvider.getInstance().getEC2Engine().detachVolume( EC2request )); + serializeResponse(response, EC2response); } private void deleteVolume( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Volume EC2request = new EC2Volume(); - - String[] volumeId = request.getParameterValues( "VolumeId" ); - if ( null != volumeId && 0 < volumeId.length ) - EC2request.setId(volumeId[0]); - else { response.sendError(530, "Missing VolumeId parameter" ); return; } + throws ADBException, XMLStreamException, IOException { + EC2Volume EC2request = new EC2Volume(); - // -> execute the request - DeleteVolumeResponse EC2response = EC2SoapServiceImpl.toDeleteVolumeResponse( ServiceProvider.getInstance().getEC2Engine().deleteVolume( EC2request )); - serializeResponse(response, EC2response); + String[] volumeId = request.getParameterValues( "VolumeId" ); + if ( null != volumeId && 0 < volumeId.length ) + EC2request.setId(volumeId[0]); + else { response.sendError(530, "Missing VolumeId parameter" ); return; } + + // -> execute the request + DeleteVolumeResponse EC2response = EC2SoapServiceImpl.toDeleteVolumeResponse( ServiceProvider.getInstance().getEC2Engine().deleteVolume( EC2request )); + serializeResponse(response, EC2response); } private void createVolume( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2CreateVolume EC2request = new EC2CreateVolume(); - + throws ADBException, XMLStreamException, IOException { + EC2CreateVolume EC2request = new EC2CreateVolume(); + String[] zoneName = request.getParameterValues( "AvailabilityZone" ); if ( null != zoneName && 0 < zoneName.length ) - EC2request.setZoneName( zoneName[0] ); - else { response.sendError(530, "Missing AvailabilityZone parameter" ); return; } - + EC2request.setZoneName( zoneName[0] ); + else { response.sendError(530, "Missing AvailabilityZone parameter" ); return; } + String[] size = request.getParameterValues( "Size" ); String[] snapshotId = request.getParameterValues("SnapshotId"); boolean useSnapshot = false; boolean useSize = false; - - if (null != size && 0 < size.length) - useSize = true; - - if (snapshotId != null && snapshotId.length != 0) - useSnapshot = true; - - if (useSize && !useSnapshot) { - EC2request.setSize( size[0] ); - } else if (useSnapshot && !useSize) { - EC2request.setSnapshotId(snapshotId[0]); - } else if (useSize && useSnapshot) { - response.sendError(530, "Size and SnapshotId parameters are mutually exclusive" ); return; - } else { - response.sendError(530, "Size or SnapshotId has to be specified" ); return; - } - - // -> execute the request - CreateVolumeResponse EC2response = EC2SoapServiceImpl.toCreateVolumeResponse( ServiceProvider.getInstance().getEC2Engine().createVolume( EC2request )); - serializeResponse(response, EC2response); + if (null != size && 0 < size.length) + useSize = true; + + if (snapshotId != null && snapshotId.length != 0) + useSnapshot = true; + + if (useSize && !useSnapshot) { + EC2request.setSize( size[0] ); + } else if (useSnapshot && !useSize) { + EC2request.setSnapshotId(snapshotId[0]); + } else if (useSize && useSnapshot) { + response.sendError(530, "Size and SnapshotId parameters are mutually exclusive" ); return; + } else { + response.sendError(530, "Size or SnapshotId has to be specified" ); return; + } + + + // -> execute the request + CreateVolumeResponse EC2response = EC2SoapServiceImpl.toCreateVolumeResponse( ServiceProvider.getInstance().getEC2Engine().createVolume( EC2request )); + serializeResponse(response, EC2response); } private void createSecurityGroup( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - - String groupName, groupDescription = null; - + throws ADBException, XMLStreamException, IOException { + + String groupName, groupDescription = null; + String[] name = request.getParameterValues( "GroupName" ); - if ( null != name && 0 < name.length ) - groupName = name[0]; - else { response.sendError(530, "Missing GroupName parameter" ); return; } - + if ( null != name && 0 < name.length ) + groupName = name[0]; + else { response.sendError(530, "Missing GroupName parameter" ); return; } + String[] desc = request.getParameterValues( "GroupDescription" ); if ( null != desc && 0 < desc.length ) - groupDescription = desc[0]; - else { response.sendError(530, "Missing GroupDescription parameter" ); return; } + groupDescription = desc[0]; + else { response.sendError(530, "Missing GroupDescription parameter" ); return; } - // -> execute the request + // -> execute the request CreateSecurityGroupResponse EC2response = EC2SoapServiceImpl.toCreateSecurityGroupResponse( ServiceProvider.getInstance().getEC2Engine().createSecurityGroup( groupName, groupDescription )); serializeResponse(response, EC2response); } private void deleteSecurityGroup( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - String groupName = null; - + throws ADBException, XMLStreamException, IOException { + String groupName = null; + String[] name = request.getParameterValues( "GroupName" ); if ( null != name && 0 < name.length ) - groupName = name[0]; + groupName = name[0]; else { response.sendError(530, "Missing GroupName parameter" ); return; } // -> execute the request @@ -900,217 +900,217 @@ public class EC2RestServlet extends HttpServlet { } private void deleteSnapshot( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - String snapshotId = null; - + throws ADBException, XMLStreamException, IOException { + String snapshotId = null; + String[] snapSet = request.getParameterValues( "SnapshotId" ); - if ( null != snapSet && 0 < snapSet.length ) - snapshotId = snapSet[0]; - else { response.sendError(530, "Missing SnapshotId parameter" ); return; } - - // -> execute the request - DeleteSnapshotResponse EC2response = EC2SoapServiceImpl.toDeleteSnapshotResponse( ServiceProvider.getInstance().getEC2Engine().deleteSnapshot( snapshotId )); - serializeResponse(response, EC2response); + if ( null != snapSet && 0 < snapSet.length ) + snapshotId = snapSet[0]; + else { response.sendError(530, "Missing SnapshotId parameter" ); return; } + + // -> execute the request + DeleteSnapshotResponse EC2response = EC2SoapServiceImpl.toDeleteSnapshotResponse( ServiceProvider.getInstance().getEC2Engine().deleteSnapshot( snapshotId )); + serializeResponse(response, EC2response); } private void createSnapshot( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - String volumeId = null; - + throws ADBException, XMLStreamException, IOException { + String volumeId = null; + String[] volSet = request.getParameterValues( "VolumeId" ); - if ( null != volSet && 0 < volSet.length ) - volumeId = volSet[0]; - else { response.sendError(530, "Missing VolumeId parameter" ); return; } - - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + if ( null != volSet && 0 < volSet.length ) + volumeId = volSet[0]; + else { response.sendError(530, "Missing VolumeId parameter" ); return; } + + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); CreateSnapshotResponse EC2response = EC2SoapServiceImpl.toCreateSnapshotResponse( engine.createSnapshot( volumeId ), engine); serializeResponse(response, EC2response); } - + private void deregisterImage( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Image image = new EC2Image(); - + throws ADBException, XMLStreamException, IOException { + EC2Image image = new EC2Image(); + String[] imageId = request.getParameterValues( "ImageId" ); - if ( null != imageId && 0 < imageId.length ) - image.setId( imageId[0] ); - else { response.sendError(530, "Missing ImageId parameter" ); return; } - - // -> execute the request - DeregisterImageResponse EC2response = EC2SoapServiceImpl.toDeregisterImageResponse( ServiceProvider.getInstance().getEC2Engine().deregisterImage( image )); - serializeResponse(response, EC2response); + if ( null != imageId && 0 < imageId.length ) + image.setId( imageId[0] ); + else { response.sendError(530, "Missing ImageId parameter" ); return; } + + // -> execute the request + DeregisterImageResponse EC2response = EC2SoapServiceImpl.toDeregisterImageResponse( ServiceProvider.getInstance().getEC2Engine().deregisterImage( image )); + serializeResponse(response, EC2response); } - + private void createImage( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2CreateImage EC2request = new EC2CreateImage(); - + throws ADBException, XMLStreamException, IOException { + EC2CreateImage EC2request = new EC2CreateImage(); + String[] instanceId = request.getParameterValues( "InstanceId" ); - if ( null != instanceId && 0 < instanceId.length ) - EC2request.setInstanceId( instanceId[0] ); - else { response.sendError(530, "Missing InstanceId parameter" ); return; } - + if ( null != instanceId && 0 < instanceId.length ) + EC2request.setInstanceId( instanceId[0] ); + else { response.sendError(530, "Missing InstanceId parameter" ); return; } + String[] name = request.getParameterValues( "Name" ); if ( null != name && 0 < name.length ) - EC2request.setName( name[0] ); - else { response.sendError(530, "Missing Name parameter" ); return; } + EC2request.setName( name[0] ); + else { response.sendError(530, "Missing Name parameter" ); return; } String[] description = request.getParameterValues( "Description" ); if ( null != description && 0 < description.length ) - EC2request.setDescription( description[0] ); + EC2request.setDescription( description[0] ); - // -> execute the request + // -> execute the request CreateImageResponse EC2response = EC2SoapServiceImpl.toCreateImageResponse( ServiceProvider.getInstance().getEC2Engine().createImage( EC2request )); serializeResponse(response, EC2response); } private void registerImage( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2RegisterImage EC2request = new EC2RegisterImage(); - + throws ADBException, XMLStreamException, IOException { + EC2RegisterImage EC2request = new EC2RegisterImage(); + String[] location = request.getParameterValues( "ImageLocation" ); - if ( null != location && 0 < location.length ) - EC2request.setLocation( location[0] ); - else { response.sendError(530, "Missing ImageLocation parameter" ); return; } + if ( null != location && 0 < location.length ) + EC2request.setLocation( location[0] ); + else { response.sendError(530, "Missing ImageLocation parameter" ); return; } String[] cloudRedfined = request.getParameterValues( "Architecture" ); - if ( null != cloudRedfined && 0 < cloudRedfined.length ) - EC2request.setArchitecture( cloudRedfined[0] ); - else { response.sendError(530, "Missing Architecture parameter" ); return; } + if ( null != cloudRedfined && 0 < cloudRedfined.length ) + EC2request.setArchitecture( cloudRedfined[0] ); + else { response.sendError(530, "Missing Architecture parameter" ); return; } String[] name = request.getParameterValues( "Name" ); if ( null != name && 0 < name.length ) - EC2request.setName( name[0] ); + EC2request.setName( name[0] ); String[] description = request.getParameterValues( "Description" ); if ( null != description && 0 < description.length ) - EC2request.setDescription( description[0] ); + EC2request.setDescription( description[0] ); - // -> execute the request + // -> execute the request RegisterImageResponse EC2response = EC2SoapServiceImpl.toRegisterImageResponse( ServiceProvider.getInstance().getEC2Engine().registerImage( EC2request )); serializeResponse(response, EC2response); } private void modifyImageAttribute( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Image image = new EC2Image(); - - // -> its interesting to note that the SOAP API docs has description but the REST API docs do not + throws ADBException, XMLStreamException, IOException { + EC2Image image = new EC2Image(); + + // -> its interesting to note that the SOAP API docs has description but the REST API docs do not String[] imageId = request.getParameterValues( "ImageId" ); - if ( null != imageId && 0 < imageId.length ) - image.setId( imageId[0] ); - else { response.sendError(530, "Missing ImageId parameter" ); return; } + if ( null != imageId && 0 < imageId.length ) + image.setId( imageId[0] ); + else { response.sendError(530, "Missing ImageId parameter" ); return; } String[] description = request.getParameterValues( "Description" ); - if ( null != description && 0 < description.length ) - image.setDescription( description[0] ); - else { response.sendError(530, "Missing Description parameter" ); return; } + if ( null != description && 0 < description.length ) + image.setDescription( description[0] ); + else { response.sendError(530, "Missing Description parameter" ); return; } - // -> execute the request - ModifyImageAttributeResponse EC2response = EC2SoapServiceImpl.toModifyImageAttributeResponse( ServiceProvider.getInstance().getEC2Engine().modifyImageAttribute( image )); - serializeResponse(response, EC2response); + // -> execute the request + ModifyImageAttributeResponse EC2response = EC2SoapServiceImpl.toModifyImageAttributeResponse( ServiceProvider.getInstance().getEC2Engine().modifyImageAttribute( image )); + serializeResponse(response, EC2response); } private void resetImageAttribute( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2Image image = new EC2Image(); - + throws ADBException, XMLStreamException, IOException { + EC2Image image = new EC2Image(); + String[] imageId = request.getParameterValues( "ImageId" ); - if ( null != imageId && 0 < imageId.length ) - image.setId( imageId[0] ); - else { response.sendError(530, "Missing ImageId parameter" ); return; } - - // -> execute the request - image.setDescription( "" ); - ResetImageAttributeResponse EC2response = EC2SoapServiceImpl.toResetImageAttributeResponse( ServiceProvider.getInstance().getEC2Engine().modifyImageAttribute( image )); - serializeResponse(response, EC2response); + if ( null != imageId && 0 < imageId.length ) + image.setId( imageId[0] ); + else { response.sendError(530, "Missing ImageId parameter" ); return; } + + // -> execute the request + image.setDescription( "" ); + ResetImageAttributeResponse EC2response = EC2SoapServiceImpl.toResetImageAttributeResponse( ServiceProvider.getInstance().getEC2Engine().modifyImageAttribute( image )); + serializeResponse(response, EC2response); } private void runInstances( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2RunInstances EC2request = new EC2RunInstances(); - - // -> so in the Amazon docs for this REST call there is no userData even though there is in the SOAP docs + throws ADBException, XMLStreamException, IOException { + EC2RunInstances EC2request = new EC2RunInstances(); + + // -> so in the Amazon docs for this REST call there is no userData even though there is in the SOAP docs String[] imageId = request.getParameterValues( "ImageId" ); - if ( null != imageId && 0 < imageId.length ) - EC2request.setTemplateId( imageId[0] ); - else { response.sendError(530, "Missing ImageId parameter" ); return; } + if ( null != imageId && 0 < imageId.length ) + EC2request.setTemplateId( imageId[0] ); + else { response.sendError(530, "Missing ImageId parameter" ); return; } String[] minCount = request.getParameterValues( "MinCount" ); - if ( null != minCount && 0 < minCount.length ) - EC2request.setMinCount( Integer.parseInt( minCount[0] )); - else { response.sendError(530, "Missing MinCount parameter" ); return; } + if ( null != minCount && 0 < minCount.length ) + EC2request.setMinCount( Integer.parseInt( minCount[0] )); + else { response.sendError(530, "Missing MinCount parameter" ); return; } String[] maxCount = request.getParameterValues( "MaxCount" ); - if ( null != maxCount && 0 < maxCount.length ) - EC2request.setMaxCount( Integer.parseInt( maxCount[0] )); - else { response.sendError(530, "Missing MaxCount parameter" ); return; } + if ( null != maxCount && 0 < maxCount.length ) + EC2request.setMaxCount( Integer.parseInt( maxCount[0] )); + else { response.sendError(530, "Missing MaxCount parameter" ); return; } String[] instanceType = request.getParameterValues( "InstanceType" ); - if ( null != instanceType && 0 < instanceType.length ) - EC2request.setInstanceType( instanceType[0] ); + if ( null != instanceType && 0 < instanceType.length ) + EC2request.setInstanceType( instanceType[0] ); String[] zoneName = request.getParameterValues( "Placement.AvailabilityZone" ); - if ( null != zoneName && 0 < zoneName.length ) - EC2request.setZoneName( zoneName[0] ); - - String[] size = request.getParameterValues("size"); - if (size != null) { - EC2request.setSize(Integer.valueOf(size[0])); - } + if ( null != zoneName && 0 < zoneName.length ) + EC2request.setZoneName( zoneName[0] ); - String[] keyName = request.getParameterValues("KeyName"); - if (keyName != null) { - EC2request.setKeyName(keyName[0]); - } - - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - RunInstancesResponse EC2response = EC2SoapServiceImpl.toRunInstancesResponse( engine.runInstances( EC2request ), engine); - serializeResponse(response, EC2response); + String[] size = request.getParameterValues("size"); + if (size != null) { + EC2request.setSize(Integer.valueOf(size[0])); + } + + String[] keyName = request.getParameterValues("KeyName"); + if (keyName != null) { + EC2request.setKeyName(keyName[0]); + } + + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + RunInstancesResponse EC2response = EC2SoapServiceImpl.toRunInstancesResponse( engine.runInstances( EC2request ), engine); + serializeResponse(response, EC2response); } private void rebootInstances( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { + throws ADBException, XMLStreamException, IOException { EC2RebootInstances EC2request = new EC2RebootInstances(); int count = 0; // -> load in all the "InstanceId.n" parameters if any - Enumeration names = request.getParameterNames(); + Enumeration names = request.getParameterNames(); while( names.hasMoreElements()) { String key = (String)names.nextElement(); if (key.startsWith("InstanceId")) { String[] value = request.getParameterValues( key ); if (null != value && 0 < value.length) { - EC2request.addInstanceId( value[0] ); - count++; + EC2request.addInstanceId( value[0] ); + count++; } } } if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } - + // -> execute the request RebootInstancesResponse EC2response = EC2SoapServiceImpl.toRebootInstancesResponse( ServiceProvider.getInstance().getEC2Engine().rebootInstances(EC2request)); serializeResponse(response, EC2response); } private void startInstances( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { + throws ADBException, XMLStreamException, IOException { EC2StartInstances EC2request = new EC2StartInstances(); int count = 0; // -> load in all the "InstanceId.n" parameters if any - Enumeration names = request.getParameterNames(); + Enumeration names = request.getParameterNames(); while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("InstanceId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) { - EC2request.addInstanceId( value[0] ); - count++; - } - } + String key = (String)names.nextElement(); + if (key.startsWith("InstanceId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) { + EC2request.addInstanceId( value[0] ); + count++; + } + } } if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } @@ -1120,50 +1120,50 @@ public class EC2RestServlet extends HttpServlet { } private void stopInstances( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2StopInstances EC2request = new EC2StopInstances(); - int count = 0; - - // -> load in all the "InstanceId.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("InstanceId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) { - EC2request.addInstanceId( value[0] ); - count++; - } - } - } - if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } - - // -> execute the request - StopInstancesResponse EC2response = EC2SoapServiceImpl.toStopInstancesResponse( ServiceProvider.getInstance().getEC2Engine().stopInstances( EC2request )); - serializeResponse(response, EC2response); - } - - private void terminateInstances( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { + throws ADBException, XMLStreamException, IOException { EC2StopInstances EC2request = new EC2StopInstances(); int count = 0; // -> load in all the "InstanceId.n" parameters if any - Enumeration names = request.getParameterNames(); + Enumeration names = request.getParameterNames(); while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("InstanceId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) { - EC2request.addInstanceId( value[0] ); - count++; - } - } + String key = (String)names.nextElement(); + if (key.startsWith("InstanceId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) { + EC2request.addInstanceId( value[0] ); + count++; + } + } + } + if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } + + // -> execute the request + StopInstancesResponse EC2response = EC2SoapServiceImpl.toStopInstancesResponse( ServiceProvider.getInstance().getEC2Engine().stopInstances( EC2request )); + serializeResponse(response, EC2response); + } + + private void terminateInstances( HttpServletRequest request, HttpServletResponse response ) + throws ADBException, XMLStreamException, IOException { + EC2StopInstances EC2request = new EC2StopInstances(); + int count = 0; + + // -> load in all the "InstanceId.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("InstanceId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) { + EC2request.addInstanceId( value[0] ); + count++; + } + } } if (0 == count) { response.sendError(530, "Missing InstanceId parameter" ); return; } // -> execute the request - EC2request.setDestroyInstances( true ); + EC2request.setDestroyInstances( true ); TerminateInstancesResponse EC2response = EC2SoapServiceImpl.toTermInstancesResponse( ServiceProvider.getInstance().getEC2Engine().stopInstances( EC2request )); serializeResponse(response, EC2response); } @@ -1173,100 +1173,100 @@ public class EC2RestServlet extends HttpServlet { * resulting EC2 Amazon object into XML to return to the client. */ private void describeAvailabilityZones( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2DescribeAvailabilityZones EC2request = new EC2DescribeAvailabilityZones(); - - // -> load in all the "ZoneName.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("ZoneName")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) EC2request.addZone( value[0] ); - } - } - // -> execute the request - DescribeAvailabilityZonesResponse EC2response = EC2SoapServiceImpl.toDescribeAvailabilityZonesResponse( ServiceProvider.getInstance().getEC2Engine().handleRequest( EC2request )); - serializeResponse(response, EC2response); + throws ADBException, XMLStreamException, IOException { + EC2DescribeAvailabilityZones EC2request = new EC2DescribeAvailabilityZones(); + + // -> load in all the "ZoneName.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("ZoneName")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) EC2request.addZone( value[0] ); + } + } + // -> execute the request + DescribeAvailabilityZonesResponse EC2response = EC2SoapServiceImpl.toDescribeAvailabilityZonesResponse( ServiceProvider.getInstance().getEC2Engine().handleRequest( EC2request )); + serializeResponse(response, EC2response); } private void describeImages( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2DescribeImages EC2request = new EC2DescribeImages(); - - // -> load in all the "ImageId.n" parameters if any, and ignore all other parameters - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("ImageId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) EC2request.addImageSet( value[0] ); - } - } - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - DescribeImagesResponse EC2response = EC2SoapServiceImpl.toDescribeImagesResponse( engine.describeImages( EC2request )); - serializeResponse(response, EC2response); + throws ADBException, XMLStreamException, IOException { + EC2DescribeImages EC2request = new EC2DescribeImages(); + + // -> load in all the "ImageId.n" parameters if any, and ignore all other parameters + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("ImageId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) EC2request.addImageSet( value[0] ); + } + } + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + DescribeImagesResponse EC2response = EC2SoapServiceImpl.toDescribeImagesResponse( engine.describeImages( EC2request )); + serializeResponse(response, EC2response); } - + private void describeImageAttribute( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2DescribeImages EC2request = new EC2DescribeImages(); - - // -> only works for queries about descriptions - String[] descriptions = request.getParameterValues( "Description" ); - if ( null != descriptions && 0 < descriptions.length ) { - String[] value = request.getParameterValues( "ImageId" ); - EC2request.addImageSet( value[0] ); - } - else { - response.sendError(501, "Unsupported - only description supported" ); - return; - } + throws ADBException, XMLStreamException, IOException { + EC2DescribeImages EC2request = new EC2DescribeImages(); - // -> execute the request - DescribeImageAttributeResponse EC2response = EC2SoapServiceImpl.toDescribeImageAttributeResponse( ServiceProvider.getInstance().getEC2Engine().describeImages( EC2request )); - serializeResponse(response, EC2response); + // -> only works for queries about descriptions + String[] descriptions = request.getParameterValues( "Description" ); + if ( null != descriptions && 0 < descriptions.length ) { + String[] value = request.getParameterValues( "ImageId" ); + EC2request.addImageSet( value[0] ); + } + else { + response.sendError(501, "Unsupported - only description supported" ); + return; + } + + // -> execute the request + DescribeImageAttributeResponse EC2response = EC2SoapServiceImpl.toDescribeImageAttributeResponse( ServiceProvider.getInstance().getEC2Engine().describeImages( EC2request )); + serializeResponse(response, EC2response); } - + private void describeInstances( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException - { - EC2DescribeInstances EC2request = new EC2DescribeInstances(); - - // -> load in all the "InstanceId.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) - { - String key = (String)names.nextElement(); - if (key.startsWith("InstanceId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) EC2request.addInstanceId( value[0] ); - } - } - + throws ADBException, XMLStreamException, IOException + { + EC2DescribeInstances EC2request = new EC2DescribeInstances(); + + // -> load in all the "InstanceId.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) + { + String key = (String)names.nextElement(); + if (key.startsWith("InstanceId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) EC2request.addInstanceId( value[0] ); + } + } + // -> are there any filters with this request? EC2Filter[] filterSet = extractFilters( request ); if (null != filterSet) { - EC2InstanceFilterSet ifs = new EC2InstanceFilterSet(); - for( int i=0; i < filterSet.length; i++ ) ifs.addFilter( filterSet[i] ); - EC2request.setFilterSet( ifs ); + EC2InstanceFilterSet ifs = new EC2InstanceFilterSet(); + for( int i=0; i < filterSet.length; i++ ) ifs.addFilter( filterSet[i] ); + EC2request.setFilterSet( ifs ); } - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - DescribeInstancesResponse EC2response = EC2SoapServiceImpl.toDescribeInstancesResponse( engine.describeInstances( EC2request ), engine); - serializeResponse(response, EC2response); - } - + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + DescribeInstancesResponse EC2response = EC2SoapServiceImpl.toDescribeInstancesResponse( engine.describeInstances( EC2request ), engine); + serializeResponse(response, EC2response); + } + private void describeAddresses( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { + throws ADBException, XMLStreamException, IOException { EC2DescribeAddresses ec2Request = new EC2DescribeAddresses(); // -> load in all the "PublicIp.n" parameters if any - Enumeration names = request.getParameterNames(); + Enumeration names = request.getParameterNames(); while( names.hasMoreElements()) { String key = (String)names.nextElement(); if (key.startsWith("PublicIp")) { @@ -1280,38 +1280,38 @@ public class EC2RestServlet extends HttpServlet { } private void allocateAddress( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - - AllocateAddressResponse ec2Response = EC2SoapServiceImpl.toAllocateAddressResponse( engine.allocateAddress()); - - serializeResponse(response, ec2Response); + throws ADBException, XMLStreamException, IOException { + + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + + AllocateAddressResponse ec2Response = EC2SoapServiceImpl.toAllocateAddressResponse( engine.allocateAddress()); + + serializeResponse(response, ec2Response); } private void releaseAddress( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + throws ADBException, XMLStreamException, IOException { - String publicIp = request.getParameter( "PublicIp" ); - if (publicIp == null) { - response.sendError(530, "Missing PublicIp parameter"); - return; - } - - EC2ReleaseAddress ec2Request = new EC2ReleaseAddress(); - if (ec2Request != null) { - ec2Request.setPublicIp(publicIp); - } - - ReleaseAddressResponse EC2Response = EC2SoapServiceImpl.toReleaseAddressResponse( engine.releaseAddress( ec2Request )); + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - serializeResponse(response, EC2Response); + String publicIp = request.getParameter( "PublicIp" ); + if (publicIp == null) { + response.sendError(530, "Missing PublicIp parameter"); + return; + } + + EC2ReleaseAddress ec2Request = new EC2ReleaseAddress(); + if (ec2Request != null) { + ec2Request.setPublicIp(publicIp); + } + + ReleaseAddressResponse EC2Response = EC2SoapServiceImpl.toReleaseAddressResponse( engine.releaseAddress( ec2Request )); + + serializeResponse(response, EC2Response); } private void associateAddress( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { + throws ADBException, XMLStreamException, IOException { EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); String publicIp = request.getParameter( "PublicIp" ); @@ -1324,20 +1324,20 @@ public class EC2RestServlet extends HttpServlet { response.sendError(530, "Missing InstanceId parameter" ); return; } - + EC2AssociateAddress ec2Request = new EC2AssociateAddress(); if (ec2Request != null) { - ec2Request.setInstanceId(instanceId); - ec2Request.setPublicIp(publicIp); + ec2Request.setInstanceId(instanceId); + ec2Request.setPublicIp(publicIp); } AssociateAddressResponse ec2Response = EC2SoapServiceImpl.toAssociateAddressResponse( engine.associateAddress( ec2Request )); - + serializeResponse(response, ec2Response); } private void disassociateAddress( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { + throws ADBException, XMLStreamException, IOException { EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); String publicIp = request.getParameter( "PublicIp" ); @@ -1345,145 +1345,145 @@ public class EC2RestServlet extends HttpServlet { response.sendError(530, "Missing PublicIp parameter" ); return; } - + EC2DisassociateAddress ec2Request = new EC2DisassociateAddress(); if (ec2Request != null) { - ec2Request.setPublicIp(publicIp); + ec2Request.setPublicIp(publicIp); } - + DisassociateAddressResponse ec2Response = EC2SoapServiceImpl.toDisassociateAddressResponse( engine.disassociateAddress( ec2Request ) ); - + serializeResponse(response, ec2Response); } - + private void describeSecurityGroups( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException - { - EC2DescribeSecurityGroups EC2request = new EC2DescribeSecurityGroups(); - - // -> load in all the "GroupName.n" parameters if any - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("GroupName")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) EC2request.addGroupName( value[0] ); - } - } - + throws ADBException, XMLStreamException, IOException + { + EC2DescribeSecurityGroups EC2request = new EC2DescribeSecurityGroups(); + + // -> load in all the "GroupName.n" parameters if any + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("GroupName")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) EC2request.addGroupName( value[0] ); + } + } + // -> are there any filters with this request? EC2Filter[] filterSet = extractFilters( request ); if (null != filterSet) { - EC2GroupFilterSet gfs = new EC2GroupFilterSet(); - for (EC2Filter filter : filterSet) gfs.addFilter( filter ); - EC2request.setFilterSet( gfs ); + EC2GroupFilterSet gfs = new EC2GroupFilterSet(); + for (EC2Filter filter : filterSet) gfs.addFilter( filter ); + EC2request.setFilterSet( gfs ); } - - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - - DescribeSecurityGroupsResponse EC2response = EC2SoapServiceImpl.toDescribeSecurityGroupsResponse( engine.describeSecurityGroups( EC2request )); - serializeResponse(response, EC2response); - } - - + + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + + DescribeSecurityGroupsResponse EC2response = EC2SoapServiceImpl.toDescribeSecurityGroupsResponse( engine.describeSecurityGroups( EC2request )); + serializeResponse(response, EC2response); + } + + private void describeInstanceAttribute( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException { - EC2DescribeInstances EC2request = new EC2DescribeInstances(); - String instanceType = null; - - // -> we are only handling queries about the "Attribute=instanceType" - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("Attribute")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length && value[0].equalsIgnoreCase( "instanceType" )) { - instanceType = value[0]; - break; - } - } - } - if ( null != instanceType ) { - String[] value = request.getParameterValues( "InstanceId" ); - EC2request.addInstanceId( value[0] ); - } - else { - response.sendError(501, "Unsupported - only instanceType supported" ); - return; - } - - // -> execute the request - DescribeInstanceAttributeResponse EC2response = EC2SoapServiceImpl.toDescribeInstanceAttributeResponse( ServiceProvider.getInstance().getEC2Engine().describeInstances(EC2request)); - serializeResponse(response, EC2response); + throws ADBException, XMLStreamException, IOException { + EC2DescribeInstances EC2request = new EC2DescribeInstances(); + String instanceType = null; + + // -> we are only handling queries about the "Attribute=instanceType" + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) { + String key = (String)names.nextElement(); + if (key.startsWith("Attribute")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length && value[0].equalsIgnoreCase( "instanceType" )) { + instanceType = value[0]; + break; + } + } + } + if ( null != instanceType ) { + String[] value = request.getParameterValues( "InstanceId" ); + EC2request.addInstanceId( value[0] ); + } + else { + response.sendError(501, "Unsupported - only instanceType supported" ); + return; + } + + // -> execute the request + DescribeInstanceAttributeResponse EC2response = EC2SoapServiceImpl.toDescribeInstanceAttributeResponse( ServiceProvider.getInstance().getEC2Engine().describeInstances(EC2request)); + serializeResponse(response, EC2response); } - + private void describeSnapshots( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException - { - EC2DescribeSnapshots EC2request = new EC2DescribeSnapshots(); - - // -> load in all the "SnapshotId.n" parameters if any, and ignore any other parameters - Enumeration names = request.getParameterNames(); - while( names.hasMoreElements()) - { - String key = (String)names.nextElement(); - if (key.startsWith("SnapshotId")) { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) EC2request.addSnapshotId( value[0] ); - } - } - + throws ADBException, XMLStreamException, IOException + { + EC2DescribeSnapshots EC2request = new EC2DescribeSnapshots(); + + // -> load in all the "SnapshotId.n" parameters if any, and ignore any other parameters + Enumeration names = request.getParameterNames(); + while( names.hasMoreElements()) + { + String key = (String)names.nextElement(); + if (key.startsWith("SnapshotId")) { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) EC2request.addSnapshotId( value[0] ); + } + } + // -> are there any filters with this request? EC2Filter[] filterSet = extractFilters( request ); if (null != filterSet) { - EC2SnapshotFilterSet sfs = new EC2SnapshotFilterSet(); - for( int i=0; i < filterSet.length; i++ ) sfs.addFilter( filterSet[i] ); - EC2request.setFilterSet( sfs ); + EC2SnapshotFilterSet sfs = new EC2SnapshotFilterSet(); + for( int i=0; i < filterSet.length; i++ ) sfs.addFilter( filterSet[i] ); + EC2request.setFilterSet( sfs ); } - // -> execute the request - EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); - DescribeSnapshotsResponse EC2response = EC2SoapServiceImpl.toDescribeSnapshotsResponse( engine.handleRequest( EC2request )); - serializeResponse(response, EC2response); - } + // -> execute the request + EC2Engine engine = ServiceProvider.getInstance().getEC2Engine(); + DescribeSnapshotsResponse EC2response = EC2SoapServiceImpl.toDescribeSnapshotsResponse( engine.handleRequest( EC2request )); + serializeResponse(response, EC2response); + } + - private void describeVolumes( HttpServletRequest request, HttpServletResponse response ) - throws ADBException, XMLStreamException, IOException - { + throws ADBException, XMLStreamException, IOException + { EC2DescribeVolumes EC2request = new EC2DescribeVolumes(); // -> load in all the "VolumeId.n" parameters if any Enumeration names = request.getParameterNames(); while( names.hasMoreElements()) { - String key = (String)names.nextElement(); - if (key.startsWith("VolumeId")) - { - String[] value = request.getParameterValues( key ); - if (null != value && 0 < value.length) EC2request.addVolumeId( value[0] ); - } + String key = (String)names.nextElement(); + if (key.startsWith("VolumeId")) + { + String[] value = request.getParameterValues( key ); + if (null != value && 0 < value.length) EC2request.addVolumeId( value[0] ); + } } - + // -> are there any filters with this request? EC2Filter[] filterSet = extractFilters( request ); if (null != filterSet) { - EC2VolumeFilterSet vfs = new EC2VolumeFilterSet(); - for( int i=0; i < filterSet.length; i++ ) vfs.addFilter( filterSet[i] ); - EC2request.setFilterSet( vfs ); + EC2VolumeFilterSet vfs = new EC2VolumeFilterSet(); + for( int i=0; i < filterSet.length; i++ ) vfs.addFilter( filterSet[i] ); + EC2request.setFilterSet( vfs ); } - + // -> execute the request DescribeVolumesResponse EC2response = EC2SoapServiceImpl.toDescribeVolumesResponse( ServiceProvider.getInstance().getEC2Engine().handleRequest( EC2request )); serializeResponse(response, EC2response); - } - - + } + + /** * Example of how the filters are defined in a REST request: * https:///?Action=DescribeVolumes @@ -1497,148 +1497,148 @@ public class EC2RestServlet extends HttpServlet { */ private EC2Filter[] extractFilters( HttpServletRequest request ) { - String filterName = null; - String value = null; - EC2Filter nextFilter = null; - boolean timeFilter = false; - int filterCount = 1; - int valueCount = 1; - - List filterSet = new ArrayList(); - - do - { filterName = request.getParameter( "Filter." + filterCount + ".Name" ); - if (null != filterName) - { - nextFilter = new EC2Filter(); - nextFilter.setName( filterName ); - timeFilter = (filterName.equalsIgnoreCase( "attachment.attach-time" ) || filterName.equalsIgnoreCase( "create-time" )); - valueCount = 1; - do - { - value = request.getParameter( "Filter." + filterCount + ".Value." + valueCount ); - if (null != value) - { - // -> time values are not encoded as regexes - if ( timeFilter ) - nextFilter.addValue( value ); - else nextFilter.addValueEncoded( value ); - - valueCount++; - } - } - while( null != value ); - - filterSet.add( nextFilter ); - filterCount++; - } - } - while( null != filterName ); - - if ( 1 == filterCount ) - return null; - else return filterSet.toArray(new EC2Filter[0]); + String filterName = null; + String value = null; + EC2Filter nextFilter = null; + boolean timeFilter = false; + int filterCount = 1; + int valueCount = 1; + + List filterSet = new ArrayList(); + + do + { filterName = request.getParameter( "Filter." + filterCount + ".Name" ); + if (null != filterName) + { + nextFilter = new EC2Filter(); + nextFilter.setName( filterName ); + timeFilter = (filterName.equalsIgnoreCase( "attachment.attach-time" ) || filterName.equalsIgnoreCase( "create-time" )); + valueCount = 1; + do + { + value = request.getParameter( "Filter." + filterCount + ".Value." + valueCount ); + if (null != value) + { + // -> time values are not encoded as regexes + if ( timeFilter ) + nextFilter.addValue( value ); + else nextFilter.addValueEncoded( value ); + + valueCount++; + } + } + while( null != value ); + + filterSet.add( nextFilter ); + filterCount++; + } + } + while( null != filterName ); + + if ( 1 == filterCount ) + return null; + else return filterSet.toArray(new EC2Filter[0]); } - + private void describeKeyPairs(HttpServletRequest request, HttpServletResponse response) - throws ADBException, XMLStreamException, IOException { - EC2DescribeKeyPairs ec2Request = new EC2DescribeKeyPairs(); - - + throws ADBException, XMLStreamException, IOException { + EC2DescribeKeyPairs ec2Request = new EC2DescribeKeyPairs(); + + String[] keyNames = request.getParameterValues( "KeyName" ); if (keyNames != null) { - for (String keyName : keyNames) { - ec2Request.addKeyName(keyName); - } + for (String keyName : keyNames) { + ec2Request.addKeyName(keyName); + } } - EC2Filter[] filterSet = extractFilters( request ); + EC2Filter[] filterSet = extractFilters( request ); if (null != filterSet){ - EC2KeyPairFilterSet vfs = new EC2KeyPairFilterSet(); - for (EC2Filter filter : filterSet) { - vfs.addFilter(filter); - } - ec2Request.setKeyFilterSet(vfs); + EC2KeyPairFilterSet vfs = new EC2KeyPairFilterSet(); + for (EC2Filter filter : filterSet) { + vfs.addFilter(filter); + } + ec2Request.setKeyFilterSet(vfs); } - DescribeKeyPairsResponse EC2Response = EC2SoapServiceImpl.toDescribeKeyPairs( - ServiceProvider.getInstance().getEC2Engine().describeKeyPairs( ec2Request )); - serializeResponse(response, EC2Response); + DescribeKeyPairsResponse EC2Response = EC2SoapServiceImpl.toDescribeKeyPairs( + ServiceProvider.getInstance().getEC2Engine().describeKeyPairs( ec2Request )); + serializeResponse(response, EC2Response); } private void importKeyPair(HttpServletRequest request, HttpServletResponse response) - throws ADBException, XMLStreamException, IOException { - - String keyName = request.getParameter("KeyName"); - String publicKeyMaterial = request.getParameter("PublicKeyMaterial"); - if (keyName==null && publicKeyMaterial==null) { - response.sendError(530, "Missing parameter"); - return; - } + throws ADBException, XMLStreamException, IOException { - if (!publicKeyMaterial.contains(" ")) + String keyName = request.getParameter("KeyName"); + String publicKeyMaterial = request.getParameter("PublicKeyMaterial"); + if (keyName==null && publicKeyMaterial==null) { + response.sendError(530, "Missing parameter"); + return; + } + + if (!publicKeyMaterial.contains(" ")) publicKeyMaterial = new String(Base64.decodeBase64(publicKeyMaterial.getBytes())); - - - EC2ImportKeyPair ec2Request = new EC2ImportKeyPair(); - if (ec2Request != null) { - ec2Request.setKeyName(request.getParameter("KeyName")); - ec2Request.setPublicKeyMaterial(request.getParameter("PublicKeyMaterial")); - } - - ImportKeyPairResponse EC2Response = EC2SoapServiceImpl.toImportKeyPair( - ServiceProvider.getInstance().getEC2Engine().importKeyPair( ec2Request )); - serializeResponse(response, EC2Response); + + + EC2ImportKeyPair ec2Request = new EC2ImportKeyPair(); + if (ec2Request != null) { + ec2Request.setKeyName(request.getParameter("KeyName")); + ec2Request.setPublicKeyMaterial(request.getParameter("PublicKeyMaterial")); + } + + ImportKeyPairResponse EC2Response = EC2SoapServiceImpl.toImportKeyPair( + ServiceProvider.getInstance().getEC2Engine().importKeyPair( ec2Request )); + serializeResponse(response, EC2Response); } private void createKeyPair(HttpServletRequest request, HttpServletResponse response) - throws ADBException, XMLStreamException, IOException { - String keyName = request.getParameter("KeyName"); - if (keyName==null) { - response.sendError(530, "Missing KeyName parameter"); - return; - } - - EC2CreateKeyPair ec2Request = new EC2CreateKeyPair(); - if (ec2Request != null) { - ec2Request.setKeyName(keyName); - } - - CreateKeyPairResponse EC2Response = EC2SoapServiceImpl.toCreateKeyPair( - ServiceProvider.getInstance().getEC2Engine().createKeyPair(ec2Request)); - serializeResponse(response, EC2Response); + throws ADBException, XMLStreamException, IOException { + String keyName = request.getParameter("KeyName"); + if (keyName==null) { + response.sendError(530, "Missing KeyName parameter"); + return; + } + + EC2CreateKeyPair ec2Request = new EC2CreateKeyPair(); + if (ec2Request != null) { + ec2Request.setKeyName(keyName); + } + + CreateKeyPairResponse EC2Response = EC2SoapServiceImpl.toCreateKeyPair( + ServiceProvider.getInstance().getEC2Engine().createKeyPair(ec2Request)); + serializeResponse(response, EC2Response); } private void deleteKeyPair(HttpServletRequest request, HttpServletResponse response) - throws ADBException, XMLStreamException, IOException { - String keyName = request.getParameter("KeyName"); - if (keyName==null) { - response.sendError(530, "Missing KeyName parameter"); - return; - } - - EC2DeleteKeyPair ec2Request = new EC2DeleteKeyPair(); - ec2Request.setKeyName(keyName); - - DeleteKeyPairResponse EC2Response = EC2SoapServiceImpl.toDeleteKeyPair( - ServiceProvider.getInstance().getEC2Engine().deleteKeyPair(ec2Request)); - serializeResponse(response, EC2Response); + throws ADBException, XMLStreamException, IOException { + String keyName = request.getParameter("KeyName"); + if (keyName==null) { + response.sendError(530, "Missing KeyName parameter"); + return; + } + + EC2DeleteKeyPair ec2Request = new EC2DeleteKeyPair(); + ec2Request.setKeyName(keyName); + + DeleteKeyPairResponse EC2Response = EC2SoapServiceImpl.toDeleteKeyPair( + ServiceProvider.getInstance().getEC2Engine().deleteKeyPair(ec2Request)); + serializeResponse(response, EC2Response); } - + private void getPasswordData(HttpServletRequest request, HttpServletResponse response) - throws ADBException, XMLStreamException, IOException { - String instanceId = request.getParameter("InstanceId"); - if (instanceId==null) { - response.sendError(530, "Missing InstanceId parameter"); - return; - } - - GetPasswordDataResponse EC2Response = EC2SoapServiceImpl.toGetPasswordData( - ServiceProvider.getInstance().getEC2Engine().getPasswordData(instanceId)); - serializeResponse(response, EC2Response); + throws ADBException, XMLStreamException, IOException { + String instanceId = request.getParameter("InstanceId"); + if (instanceId==null) { + response.sendError(530, "Missing InstanceId parameter"); + return; + } + + GetPasswordDataResponse EC2Response = EC2SoapServiceImpl.toGetPasswordData( + ServiceProvider.getInstance().getEC2Engine().getPasswordData(instanceId)); + serializeResponse(response, EC2Response); } - + /** * This function implements the EC2 REST authentication algorithm. It uses the given * "AWSAccessKeyId" parameter to look up the Cloud.com account holder's secret key which is @@ -1646,107 +1646,107 @@ public class EC2RestServlet extends HttpServlet { * parameter to see if the signature has expired and if so the request fails. */ private boolean authenticateRequest( HttpServletRequest request, HttpServletResponse response ) - throws SignatureException, IOException, InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException, ParseException - { - String cloudSecretKey = null; - String cloudAccessKey = null; - String signature = null; - String sigMethod = null; + throws SignatureException, IOException, InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException, ParseException + { + String cloudSecretKey = null; + String cloudAccessKey = null; + String signature = null; + String sigMethod = null; - // [A] Basic parameters required for an authenticated rest request - // -> note that the Servlet engine will un-URL encode all parameters we extract via "getParameterValues()" calls + // [A] Basic parameters required for an authenticated rest request + // -> note that the Servlet engine will un-URL encode all parameters we extract via "getParameterValues()" calls String[] awsAccess = request.getParameterValues( "AWSAccessKeyId" ); - if ( null != awsAccess && 0 < awsAccess.length ) - cloudAccessKey = awsAccess[0]; - else { response.sendError(530, "Missing AWSAccessKeyId parameter" ); return false; } + if ( null != awsAccess && 0 < awsAccess.length ) + cloudAccessKey = awsAccess[0]; + else { response.sendError(530, "Missing AWSAccessKeyId parameter" ); return false; } String[] clientSig = request.getParameterValues( "Signature" ); - if ( null != clientSig && 0 < clientSig.length ) - signature = clientSig[0]; - else { response.sendError(530, "Missing Signature parameter" ); return false; } + if ( null != clientSig && 0 < clientSig.length ) + signature = clientSig[0]; + else { response.sendError(530, "Missing Signature parameter" ); return false; } String[] method = request.getParameterValues( "SignatureMethod" ); - if ( null != method && 0 < method.length ) - { - sigMethod = method[0]; - if (!sigMethod.equals( "HmacSHA256" ) && !sigMethod.equals( "HmacSHA1" )) { - response.sendError(531, "Unsupported SignatureMethod value: " + sigMethod + " expecting: HmacSHA256 or HmacSHA1" ); - return false; - } - } - else { response.sendError(530, "Missing SignatureMethod parameter" ); return false; } + if ( null != method && 0 < method.length ) + { + sigMethod = method[0]; + if (!sigMethod.equals( "HmacSHA256" ) && !sigMethod.equals( "HmacSHA1" )) { + response.sendError(531, "Unsupported SignatureMethod value: " + sigMethod + " expecting: HmacSHA256 or HmacSHA1" ); + return false; + } + } + else { response.sendError(530, "Missing SignatureMethod parameter" ); return false; } String[] version = request.getParameterValues( "Version" ); - if ( null != version && 0 < version.length ) - { - if (!version[0].equals( wsdlVersion )) { - response.sendError(531, "Unsupported Version value: " + version[0] + " expecting: " + wsdlVersion ); - return false; - } - } - else { response.sendError(530, "Missing Version parameter" ); return false; } + if ( null != version && 0 < version.length ) + { + if (!version[0].equals( wsdlVersion )) { + response.sendError(531, "Unsupported Version value: " + version[0] + " expecting: " + wsdlVersion ); + return false; + } + } + else { response.sendError(530, "Missing Version parameter" ); return false; } String[] sigVersion = request.getParameterValues( "SignatureVersion" ); - if ( null != sigVersion && 0 < sigVersion.length ) - { - if (!sigVersion[0].equals( "2" )) { - response.sendError(531, "Unsupported SignatureVersion value: " + sigVersion[0] + " expecting: 2" ); - return false; - } - } - else { response.sendError(530, "Missing SignatureVersion parameter" ); return false; } + if ( null != sigVersion && 0 < sigVersion.length ) + { + if (!sigVersion[0].equals( "2" )) { + response.sendError(531, "Unsupported SignatureVersion value: " + sigVersion[0] + " expecting: 2" ); + return false; + } + } + else { response.sendError(530, "Missing SignatureVersion parameter" ); return false; } - // -> can have only one but not both { Expires | Timestamp } headers + // -> can have only one but not both { Expires | Timestamp } headers String[] expires = request.getParameterValues( "Expires" ); - if ( null != expires && 0 < expires.length ) - { - // -> contains the date and time at which the signature included in the request EXPIRES - if (hasSignatureExpired( expires[0] )) { - response.sendError(531, "Expires parameter indicates signature has expired: " + expires[0] ); - return false; - } - } - else - { // -> contains the date and time at which the request is SIGNED - String[] time = request.getParameterValues( "Timestamp" ); - if ( null == time || 0 == time.length ) { - response.sendError(530, "Missing Timestamp and Expires parameter, one is required" ); - return false; - } - } - - // [B] Use the cloudAccessKey to get the users secret key in the db - UserCredentialsVO cloudKeys = ucDao.getByAccessKey( cloudAccessKey ); + if ( null != expires && 0 < expires.length ) + { + // -> contains the date and time at which the signature included in the request EXPIRES + if (hasSignatureExpired( expires[0] )) { + response.sendError(531, "Expires parameter indicates signature has expired: " + expires[0] ); + return false; + } + } + else + { // -> contains the date and time at which the request is SIGNED + String[] time = request.getParameterValues( "Timestamp" ); + if ( null == time || 0 == time.length ) { + response.sendError(530, "Missing Timestamp and Expires parameter, one is required" ); + return false; + } + } - if ( null == cloudKeys ) - { - logger.debug( cloudAccessKey + " is not defined in the EC2 service - call SetUserKeys" ); - response.sendError(404, cloudAccessKey + " is not defined in the EC2 service - call SetUserKeys" ); - return false; - } - else cloudSecretKey = cloudKeys.getSecretKey(); + // [B] Use the cloudAccessKey to get the users secret key in the db + UserCredentialsVO cloudKeys = ucDao.getByAccessKey( cloudAccessKey ); - - // [C] Verify the signature - // -> getting the query-string in this way maintains its URL encoding - EC2RestAuth restAuth = new EC2RestAuth(); - restAuth.setHostHeader( request.getHeader( "Host" )); - String requestUri = request.getRequestURI(); - - // If forwarded from another basepath: - String forwardedPath = (String) request.getAttribute("javax.servlet.forward.request_uri"); - if(forwardedPath!=null){ - requestUri=forwardedPath; - } - restAuth.setHTTPRequestURI( requestUri); + if ( null == cloudKeys ) + { + logger.debug( cloudAccessKey + " is not defined in the EC2 service - call SetUserKeys" ); + response.sendError(404, cloudAccessKey + " is not defined in the EC2 service - call SetUserKeys" ); + return false; + } + else cloudSecretKey = cloudKeys.getSecretKey(); - String queryString = request.getQueryString(); - // getQueryString returns null (does it ever NOT return null for these), - // we need to construct queryString to avoid changing the auth code... - if (queryString == null) { - // construct our idea of a queryString with parameters! - Enumeration params = request.getParameterNames(); - if (params != null) { + + // [C] Verify the signature + // -> getting the query-string in this way maintains its URL encoding + EC2RestAuth restAuth = new EC2RestAuth(); + restAuth.setHostHeader( request.getHeader( "Host" )); + String requestUri = request.getRequestURI(); + + // If forwarded from another basepath: + String forwardedPath = (String) request.getAttribute("javax.servlet.forward.request_uri"); + if(forwardedPath!=null){ + requestUri=forwardedPath; + } + restAuth.setHTTPRequestURI( requestUri); + + String queryString = request.getQueryString(); + // getQueryString returns null (does it ever NOT return null for these), + // we need to construct queryString to avoid changing the auth code... + if (queryString == null) { + // construct our idea of a queryString with parameters! + Enumeration params = request.getParameterNames(); + if (params != null) { while(params.hasMoreElements()) { String paramName = (String) params.nextElement(); // exclude the signature string obviously. ;) @@ -1756,16 +1756,16 @@ public class EC2RestServlet extends HttpServlet { else queryString = queryString + "&" + paramName + "=" + URLEncoder.encode(request.getParameter(paramName), "UTF-8"); } - } - } - restAuth.setQueryString(queryString); - - if ( restAuth.verifySignature( request.getMethod(), cloudSecretKey, signature, sigMethod )) { - UserContext.current().initContext( cloudAccessKey, cloudSecretKey, cloudAccessKey, "REST request", null ); - return true; - } - else throw new PermissionDeniedException("Invalid signature"); - } + } + } + restAuth.setQueryString(queryString); + + if ( restAuth.verifySignature( request.getMethod(), cloudSecretKey, signature, sigMethod )) { + UserContext.current().initContext( cloudAccessKey, cloudSecretKey, cloudAccessKey, "REST request", null ); + return true; + } + else throw new PermissionDeniedException("Invalid signature"); + } /** * We check this to reduce replay attacks. @@ -1777,94 +1777,94 @@ public class EC2RestServlet extends HttpServlet { private boolean hasSignatureExpired( String timeStamp ) { Calendar cal = EC2RestAuth.parseDateString( timeStamp ); if (null == cal) return false; - + Date expiredTime = cal.getTime(); - Date today = new Date(); // -> gets set to time of creation + Date today = new Date(); // -> gets set to time of creation if ( 0 >= expiredTime.compareTo( today )) { - logger.debug( "timestamp given: [" + timeStamp + "], now: [" + today.toString() + "]" ); - return true; + logger.debug( "timestamp given: [" + timeStamp + "], now: [" + today.toString() + "]" ); + return true; } else return false; } - + private static void endResponse(HttpServletResponse response, String content) { - try { + try { byte[] data = content.getBytes(); response.setContentLength(data.length); OutputStream os = response.getOutputStream(); os.write(data); os.close(); - - } catch(Throwable e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } + + } catch(Throwable e) { + logger.error("Unexpected exception " + e.getMessage(), e); + } } private void logRequest(HttpServletRequest request) { - if(logger.isInfoEnabled()) { - logger.info("EC2 Request method: " + request.getMethod()); - logger.info("Request contextPath: " + request.getContextPath()); - logger.info("Request pathInfo: " + request.getPathInfo()); - logger.info("Request pathTranslated: " + request.getPathTranslated()); - logger.info("Request queryString: " + request.getQueryString()); - logger.info("Request requestURI: " + request.getRequestURI()); - logger.info("Request requestURL: " + request.getRequestURL()); - logger.info("Request servletPath: " + request.getServletPath()); - Enumeration headers = request.getHeaderNames(); - if(headers != null) { - while(headers.hasMoreElements()) { - Object headerName = headers.nextElement(); - logger.info("Request header " + headerName + ":" + request.getHeader((String)headerName)); - } - } - - Enumeration params = request.getParameterNames(); - if(params != null) { - while(params.hasMoreElements()) { - Object paramName = params.nextElement(); - logger.info("Request parameter " + paramName + ":" + - request.getParameter((String)paramName)); - } - } - } + if(logger.isInfoEnabled()) { + logger.info("EC2 Request method: " + request.getMethod()); + logger.info("Request contextPath: " + request.getContextPath()); + logger.info("Request pathInfo: " + request.getPathInfo()); + logger.info("Request pathTranslated: " + request.getPathTranslated()); + logger.info("Request queryString: " + request.getQueryString()); + logger.info("Request requestURI: " + request.getRequestURI()); + logger.info("Request requestURL: " + request.getRequestURL()); + logger.info("Request servletPath: " + request.getServletPath()); + Enumeration headers = request.getHeaderNames(); + if(headers != null) { + while(headers.hasMoreElements()) { + Object headerName = headers.nextElement(); + logger.info("Request header " + headerName + ":" + request.getHeader((String)headerName)); + } + } + + Enumeration params = request.getParameterNames(); + if(params != null) { + while(params.hasMoreElements()) { + Object paramName = params.nextElement(); + logger.info("Request parameter " + paramName + ":" + + request.getParameter((String)paramName)); + } + } + } } - + /** - * Send out an error response according to Amazon convention. + * Send out an error response according to Amazon convention. */ private void faultResponse(HttpServletResponse response, String errorCode, String errorMessage) { try { - OutputStreamWriter out = new OutputStreamWriter(response.getOutputStream()); - response.setContentType("text/xml; charset=UTF-8"); - out.write(""); - out.write(""); - out.write(errorCode); - out.write(""); - out.write(errorMessage); - out.write(""); - out.write(UUID.randomUUID().toString()); - out.write(""); - out.flush(); - out.close(); - } catch (IOException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } + OutputStreamWriter out = new OutputStreamWriter(response.getOutputStream()); + response.setContentType("text/xml; charset=UTF-8"); + out.write(""); + out.write(""); + out.write(errorCode); + out.write(""); + out.write(errorMessage); + out.write(""); + out.write(UUID.randomUUID().toString()); + out.write(""); + out.flush(); + out.close(); + } catch (IOException e) { + logger.error("Unexpected exception " + e.getMessage(), e); + } } - + /** - * Serialize Axis beans to XML output. + * Serialize Axis beans to XML output. */ private void serializeResponse(HttpServletResponse response, ADBBean EC2Response) - throws ADBException, XMLStreamException, IOException { - OutputStream os = response.getOutputStream(); - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - XMLStreamWriter xmlWriter = xmlOutFactory.createXMLStreamWriter( os ); - MTOMAwareXMLSerializer MTOMWriter = new MTOMAwareXMLSerializer( xmlWriter ); - MTOMWriter.setDefaultNamespace("http://ec2.amazonaws.com/doc/" + wsdlVersion + "/"); - EC2Response.serialize( null, factory, MTOMWriter ); - xmlWriter.flush(); - xmlWriter.close(); - os.close(); + throws ADBException, XMLStreamException, IOException { + OutputStream os = response.getOutputStream(); + response.setStatus(200); + response.setContentType("text/xml; charset=UTF-8"); + XMLStreamWriter xmlWriter = xmlOutFactory.createXMLStreamWriter( os ); + MTOMAwareXMLSerializer MTOMWriter = new MTOMAwareXMLSerializer( xmlWriter ); + MTOMWriter.setDefaultNamespace("http://ec2.amazonaws.com/doc/" + wsdlVersion + "/"); + EC2Response.serialize( null, factory, MTOMWriter ); + xmlWriter.flush(); + xmlWriter.close(); + os.close(); } } diff --git a/awsapi/src/com/cloud/bridge/service/S3RestServlet.java b/awsapi/src/com/cloud/bridge/service/S3RestServlet.java index c1458a7b4af..c824fcadd5b 100644 --- a/awsapi/src/com/cloud/bridge/service/S3RestServlet.java +++ b/awsapi/src/com/cloud/bridge/service/S3RestServlet.java @@ -18,21 +18,22 @@ package com.cloud.bridge.service; import java.io.ByteArrayInputStream; import java.io.IOException; -import java.io.OutputStream; import java.io.InputStream; +import java.io.OutputStream; import java.io.UnsupportedEncodingException; import java.security.SignatureException; import java.sql.SQLException; import java.util.Enumeration; +import javax.inject.Inject; import javax.servlet.ServletConfig; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; +import javax.xml.bind.DatatypeConverter; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.bind.*; import org.apache.axis2.AxisFault; import org.apache.log4j.Logger; @@ -45,10 +46,7 @@ import com.cloud.bridge.io.MultiPartDimeInputStream; import com.cloud.bridge.model.SAcl; import com.cloud.bridge.model.UserCredentialsVO; import com.cloud.bridge.persist.dao.CloudStackConfigurationDao; -import com.cloud.bridge.persist.dao.CloudStackConfigurationDaoImpl; import com.cloud.bridge.persist.dao.UserCredentialsDao; - -import com.cloud.bridge.persist.dao.UserCredentialsDaoImpl; import com.cloud.bridge.service.controller.s3.S3BucketAction; import com.cloud.bridge.service.controller.s3.S3ObjectAction; import com.cloud.bridge.service.controller.s3.ServiceProvider; @@ -66,151 +64,155 @@ import com.cloud.bridge.util.ConfigurationHelper; import com.cloud.bridge.util.HeaderParam; import com.cloud.bridge.util.RestAuth; import com.cloud.bridge.util.S3SoapAuth; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; - -import net.sf.ehcache.Cache; public class S3RestServlet extends HttpServlet { - private static final long serialVersionUID = -6168996266762804877L; - public static final String ENABLE_S3_API="enable.s3.api"; - private static boolean isS3APIEnabled = false; + private static final long serialVersionUID = -6168996266762804877L; + public static final String ENABLE_S3_API="enable.s3.api"; + private static boolean isS3APIEnabled = false; - public static final Logger logger = Logger.getLogger(S3RestServlet.class); - protected final CloudStackConfigurationDao csDao = ComponentLocator.inject(CloudStackConfigurationDaoImpl.class); - protected final UserCredentialsDao ucDao = ComponentLocator.inject(UserCredentialsDaoImpl.class); - - protected void doGet(HttpServletRequest req, HttpServletResponse resp) { - processRequest( req, resp, "GET" ); - } - + public static final Logger logger = Logger.getLogger(S3RestServlet.class); + @Inject CloudStackConfigurationDao csDao; + @Inject UserCredentialsDao ucDao; + + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) { + processRequest( req, resp, "GET" ); + } + + @Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) { - // -> DIME requests are authenticated via the SOAP auth mechanism - String type = req.getHeader( "Content-Type" ); - if ( null != type && type.equalsIgnoreCase( "application/dime" )) - processDimeRequest(req, resp); - else processRequest( req, resp, "POST" ); + // -> DIME requests are authenticated via the SOAP auth mechanism + String type = req.getHeader( "Content-Type" ); + if ( null != type && type.equalsIgnoreCase( "application/dime" )) + processDimeRequest(req, resp); + else processRequest( req, resp, "POST" ); } - + + @Override protected void doPut(HttpServletRequest req, HttpServletResponse resp) { processRequest( req, resp, "PUT" ); } - + + @Override protected void doHead(HttpServletRequest req, HttpServletResponse resp) { processRequest( req, resp, "HEAD" ); } - + + @Override protected void doOptions(HttpServletRequest req, HttpServletResponse resp) { processRequest( req, resp, "OPTIONS" ); } - + + @Override protected void doDelete( HttpServletRequest req, HttpServletResponse resp ) { processRequest( req, resp, "DELETE" ); } - + + @Override public void init( ServletConfig config ) throws ServletException { - try{ - ConfigurationHelper.preConfigureConfigPathFromServletContext(config.getServletContext()); - // check if API is enabled - String value = csDao.getConfigValue(ENABLE_S3_API); - if(value != null) { - isS3APIEnabled = Boolean.valueOf(value); - } - logger.info("S3Engine :: Configuration value is : " + value); - - }catch(Exception e){ - throw new ServletException("Error initializing awsapi: " + e.getMessage()); + try{ + ConfigurationHelper.preConfigureConfigPathFromServletContext(config.getServletContext()); + // check if API is enabled + String value = csDao.getConfigValue(ENABLE_S3_API); + if(value != null) { + isS3APIEnabled = Boolean.valueOf(value); + } + logger.info("S3Engine :: Configuration value is : " + value); + + }catch(Exception e){ + throw new ServletException("Error initializing awsapi: " + e.getMessage()); } - - } - - - - /** - * POST requests do not get authenticated on entry. The associated - * access key and signature headers are embedded in the message not encoded - * as HTTP headers. - */ + + } + + + + /** + * POST requests do not get authenticated on entry. The associated + * access key and signature headers are embedded in the message not encoded + * as HTTP headers. + */ private void processRequest( HttpServletRequest request, HttpServletResponse response, String method ) { Transaction txn = Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); try { - logRequest(request); - - // Our extensions to the S3 REST API for simple management actions - // are conveyed with Request parameter "Action". - // The present extensions are either to set up the user credentials - // (see the cloud-bridge-register script for more detail) or - // to report our version of this capability. - // -> unauthenticated calls, should still be done over HTTPS - String cloudAction = request.getParameter( "Action" ); - - if(!isS3APIEnabled){ - throw new RuntimeException("Amazon S3 API is disabled."); - } - - + logRequest(request); + + // Our extensions to the S3 REST API for simple management actions + // are conveyed with Request parameter "Action". + // The present extensions are either to set up the user credentials + // (see the cloud-bridge-register script for more detail) or + // to report our version of this capability. + // -> unauthenticated calls, should still be done over HTTPS + String cloudAction = request.getParameter( "Action" ); + + if(!isS3APIEnabled){ + throw new RuntimeException("Amazon S3 API is disabled."); + } + + if (null != cloudAction) { - if (cloudAction.equalsIgnoreCase( "SetUserKeys" )) { - setUserKeys(request, response); - return; - } - - if (cloudAction.equalsIgnoreCase( "SetCertificate" )) - // At present a noop - return; + if (cloudAction.equalsIgnoreCase( "SetUserKeys" )) { + setUserKeys(request, response); + return; + } + + if (cloudAction.equalsIgnoreCase( "SetCertificate" )) + // At present a noop + return; + + if (cloudAction.equalsIgnoreCase( "CloudS3Version" )) { + cloudS3Version(request, response); + return; + } + } - if (cloudAction.equalsIgnoreCase( "CloudS3Version" )) { - cloudS3Version(request, response); - return; - } - } - txn.start(); - // -> authenticated calls - if ( !((method.equalsIgnoreCase( "POST" ) && !(request.getQueryString().equalsIgnoreCase("delete"))) ) ){ - S3AuthParams params = extractRequestHeaders( request ); - authenticateRequest( request, params ); - } + // -> authenticated calls + if ( !((method.equalsIgnoreCase( "POST" ) && !(request.getQueryString().equalsIgnoreCase("delete"))) ) ){ + S3AuthParams params = extractRequestHeaders( request ); + authenticateRequest( request, params ); + } - ServletAction action = routeRequest(request); - if ( action != null ) { - action.execute(request, response); - } - else { - response.setStatus(404); - endResponse(response, "File not found"); - } - txn.close(); + ServletAction action = routeRequest(request); + if ( action != null ) { + action.execute(request, response); + } + else { + response.setStatus(404); + endResponse(response, "File not found"); + } + txn.close(); } catch( InvalidBucketName e) { - logger.error("Unexpected exception " + e.getMessage(), e); - response.setStatus(400); - endResponse(response, "Invalid Bucket Name - " + e.toString()); + logger.error("Unexpected exception " + e.getMessage(), e); + response.setStatus(400); + endResponse(response, "Invalid Bucket Name - " + e.toString()); } catch(PermissionDeniedException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - response.setStatus(403); - endResponse(response, "Access denied - " + e.toString()); + logger.error("Unexpected exception " + e.getMessage(), e); + response.setStatus(403); + endResponse(response, "Access denied - " + e.toString()); } catch(Throwable e) { - logger.error("Unexpected exception " + e.getMessage(), e); - response.setStatus(404); - endResponse(response, "Bad request"); - + logger.error("Unexpected exception " + e.getMessage(), e); + response.setStatus(404); + endResponse(response, "Bad request"); + } finally { - - try { - response.flushBuffer(); - } catch (IOException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } + + try { + response.flushBuffer(); + } catch (IOException e) { + logger.error("Unexpected exception " + e.getMessage(), e); + } } } - + /** * Provide an easy way to determine the version of the implementation running. * @@ -218,7 +220,7 @@ public class S3RestServlet extends HttpServlet { */ private void cloudS3Version( HttpServletRequest request, HttpServletResponse response ) { String version = new String( "1.04" ); - response.setStatus(200); + response.setStatus(200); endResponse(response, version); } @@ -242,236 +244,236 @@ public class S3RestServlet extends HttpServlet { */ @DB private void setUserKeys( HttpServletRequest request, HttpServletResponse response ) { - String[] accessKey = null; - String[] secretKey = null; - - try { - // -> both these parameters are required + String[] accessKey = null; + String[] secretKey = null; + + try { + // -> both these parameters are required accessKey = request.getParameterValues( "accesskey" ); - if ( null == accessKey || 0 == accessKey.length ) { - response.sendError(530, "Missing accesskey parameter" ); - return; - } + if ( null == accessKey || 0 == accessKey.length ) { + response.sendError(530, "Missing accesskey parameter" ); + return; + } secretKey = request.getParameterValues( "secretkey" ); if ( null == secretKey || 0 == secretKey.length ) { - response.sendError(530, "Missing secretkey parameter" ); - return; + response.sendError(530, "Missing secretkey parameter" ); + return; } } catch( Exception e ) { - logger.error("SetUserKeys exception " + e.getMessage(), e); - response.setStatus(500); - endResponse(response, "SetUserKeys exception " + e.getMessage()); - return; + logger.error("SetUserKeys exception " + e.getMessage(), e); + response.setStatus(500); + endResponse(response, "SetUserKeys exception " + e.getMessage()); + return; } try { // -> use the keys to see if the account actually exists - //ServiceProvider.getInstance().getEC2Engine().validateAccount( accessKey[0], secretKey[0] ); - //UserCredentialsDaoImpl credentialDao = new UserCredentialsDao(); + //ServiceProvider.getInstance().getEC2Engine().validateAccount( accessKey[0], secretKey[0] ); + //UserCredentialsDaoImpl credentialDao = new UserCredentialsDao(); Transaction txn = Transaction.open(Transaction.AWSAPI_DB); txn.start(); - UserCredentialsVO user = new UserCredentialsVO(accessKey[0], secretKey[0]); - user = ucDao.persist(user); - txn.commit(); - txn.close(); - //credentialDao.setUserKeys( accessKey[0], secretKey[0] ); - + UserCredentialsVO user = new UserCredentialsVO(accessKey[0], secretKey[0]); + user = ucDao.persist(user); + txn.commit(); + txn.close(); + //credentialDao.setUserKeys( accessKey[0], secretKey[0] ); + } catch( Exception e ) { - logger.error("SetUserKeys " + e.getMessage(), e); - response.setStatus(401); - endResponse(response, e.toString()); - return; + logger.error("SetUserKeys " + e.getMessage(), e); + response.setStatus(401); + endResponse(response, e.toString()); + return; } - response.setStatus(200); + response.setStatus(200); endResponse(response, "User keys set successfully"); } - + /** * We are using the S3AuthParams class to hide where the header values are coming * from so that the authenticateRequest call can be made from several places. */ public static S3AuthParams extractRequestHeaders( HttpServletRequest request ) { - S3AuthParams params = new S3AuthParams(); - - Enumeration headers = request.getHeaderNames(); - if (null != headers) - { - while( headers.hasMoreElements()) - { - HeaderParam oneHeader = new HeaderParam(); - String headerName = (String)headers.nextElement(); - oneHeader.setName( headerName ); - oneHeader.setValue( request.getHeader( headerName )); - params.addHeader( oneHeader ); - } - } - return params; + S3AuthParams params = new S3AuthParams(); + + Enumeration headers = request.getHeaderNames(); + if (null != headers) + { + while( headers.hasMoreElements()) + { + HeaderParam oneHeader = new HeaderParam(); + String headerName = (String)headers.nextElement(); + oneHeader.setName( headerName ); + oneHeader.setValue( request.getHeader( headerName )); + params.addHeader( oneHeader ); + } + } + return params; } - + public static void authenticateRequest( HttpServletRequest request, S3AuthParams params ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException - { - RestAuth auth = new RestAuth(ServiceProvider.getInstance().getUseSubDomain()); - String AWSAccessKey = null; - String signature = null; - String authorization = null; - - // [A] Is it an annonymous request? - if (null == (authorization = params.getHeader( "Authorization" ))) { + throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException + { + RestAuth auth = new RestAuth(ServiceProvider.getInstance().getUseSubDomain()); + String AWSAccessKey = null; + String signature = null; + String authorization = null; + + // [A] Is it an annonymous request? + if (null == (authorization = params.getHeader( "Authorization" ))) { UserContext.current().initContext(); return; - } - - // [B] Is it an authenticated request? - int offset = authorization.indexOf( "AWS" ); - if (-1 != offset) { - String temp = authorization.substring( offset+3 ).trim(); - offset = temp.indexOf( ":" ); - AWSAccessKey = temp.substring( 0, offset ); - signature = temp.substring( offset+1 ); - } - - // [C] Calculate the signature from the request's headers - auth.setDateHeader( request.getHeader( "Date" )); - auth.setContentTypeHeader( request.getHeader( "Content-Type" )); - auth.setContentMD5Header( request.getHeader( "Content-MD5" )); - auth.setHostHeader( request.getHeader( "Host" )); - auth.setQueryString( request.getQueryString()); - auth.addUriPath( request.getRequestURI()); + } + + // [B] Is it an authenticated request? + int offset = authorization.indexOf( "AWS" ); + if (-1 != offset) { + String temp = authorization.substring( offset+3 ).trim(); + offset = temp.indexOf( ":" ); + AWSAccessKey = temp.substring( 0, offset ); + signature = temp.substring( offset+1 ); + } + + // [C] Calculate the signature from the request's headers + auth.setDateHeader( request.getHeader( "Date" )); + auth.setContentTypeHeader( request.getHeader( "Content-Type" )); + auth.setContentMD5Header( request.getHeader( "Content-MD5" )); + auth.setHostHeader( request.getHeader( "Host" )); + auth.setQueryString( request.getQueryString()); + auth.addUriPath( request.getRequestURI()); + + // -> are their any Amazon specific (i.e. 'x-amz-' ) headers? + HeaderParam[] headers = params.getHeaders(); + for( int i=0; null != headers && i < headers.length; i++ ) + { + String headerName = headers[i].getName(); + String ignoreCase = headerName.toLowerCase(); + if (ignoreCase.startsWith( "x-amz-" )) + auth.addAmazonHeader( headerName + ":" + headers[i].getValue()); + } + + UserInfo info = ServiceProvider.getInstance().getUserInfo(AWSAccessKey); + if (info == null) throw new PermissionDeniedException("Unable to authenticate access key: " + AWSAccessKey); + + try { + if (auth.verifySignature( request.getMethod(), info.getSecretKey(), signature )) { + UserContext.current().initContext(AWSAccessKey, info.getSecretKey(), AWSAccessKey, info.getDescription(), request); + return; + } + + + } catch (SignatureException e) { + throw new PermissionDeniedException(e); + + } catch (UnsupportedEncodingException e) { + throw new PermissionDeniedException(e); + } + throw new PermissionDeniedException("Invalid signature"); + } + + - // -> are their any Amazon specific (i.e. 'x-amz-' ) headers? - HeaderParam[] headers = params.getHeaders(); - for( int i=0; null != headers && i < headers.length; i++ ) - { - String headerName = headers[i].getName(); - String ignoreCase = headerName.toLowerCase(); - if (ignoreCase.startsWith( "x-amz-" )) - auth.addAmazonHeader( headerName + ":" + headers[i].getValue()); - } - - UserInfo info = ServiceProvider.getInstance().getUserInfo(AWSAccessKey); - if (info == null) throw new PermissionDeniedException("Unable to authenticate access key: " + AWSAccessKey); - - try { - if (auth.verifySignature( request.getMethod(), info.getSecretKey(), signature )) { - UserContext.current().initContext(AWSAccessKey, info.getSecretKey(), AWSAccessKey, info.getDescription(), request); - return; - } - - - } catch (SignatureException e) { - throw new PermissionDeniedException(e); - - } catch (UnsupportedEncodingException e) { - throw new PermissionDeniedException(e); - } - throw new PermissionDeniedException("Invalid signature"); - } - - - private ServletAction routeRequest(HttpServletRequest request) { - // URL routing for S3 REST calls. - String pathInfo = request.getPathInfo(); - String bucketName = null; - String key = null; - - String serviceEndpoint = ServiceProvider.getInstance().getServiceEndpoint(); - String host = request.getHeader("Host"); - - // Check for unrecognized forms of URI information in request - - if ( ( pathInfo == null ) || ( pathInfo.indexOf('/') != 0 ) ) - if ( "POST".equalsIgnoreCase(request.getMethod()) ) - // Case where request is POST operation with no pathinfo - // This is the POST alternative to PUT described at s3.amazonaws.com API doc page 141 - { - return routePlainPostRequest (request); - } - - - // Irrespective of whether the requester is using subdomain or full host naming of path expressions - // to buckets, wherever the request is made up of a service endpoint followed by a /, in AWS S3 this always - // conveys a ListAllMyBuckets command - - if ( (serviceEndpoint.equalsIgnoreCase( host )) && (pathInfo.equalsIgnoreCase("/")) ) { - request.setAttribute(S3Constants.BUCKET_ATTR_KEY, "/"); - return new S3BucketAction(); // for ListAllMyBuckets - } + // URL routing for S3 REST calls. + String pathInfo = request.getPathInfo(); + String bucketName = null; + String key = null; - // Because there is a leading / at position 0 of pathInfo, now subtract this to process the remainder - pathInfo = pathInfo.substring(1); - - if (ServiceProvider.getInstance().getUseSubDomain()) - - { - // -> verify the format of the bucket name - int endPos = host.indexOf( ServiceProvider.getInstance().getMasterDomain()); - if ( endPos > 0 ) - { - bucketName = host.substring(0, endPos); - S3Engine.verifyBucketName( bucketName, false ); - request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketName); - } - else request.setAttribute(S3Constants.BUCKET_ATTR_KEY, ""); - - if (pathInfo == null || pathInfo.equalsIgnoreCase("/")) - { - return new S3BucketAction(); - } - else { - String objectKey = pathInfo.substring(1); - request.setAttribute(S3Constants.OBJECT_ATTR_KEY, objectKey); - return new S3ObjectAction(); - } - } - - else - - { - - int endPos = pathInfo.indexOf('/'); // Subsequent / character? - - if (endPos < 1) - { - bucketName = pathInfo; - S3Engine.verifyBucketName( bucketName, false ); - request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketName); - return new S3BucketAction(); - } - else - { - bucketName = pathInfo.substring(0, endPos); - key = pathInfo.substring(endPos + 1); - S3Engine.verifyBucketName( bucketName, false ); - - if (!key.isEmpty()) - { - request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketName); - request.setAttribute(S3Constants.OBJECT_ATTR_KEY, pathInfo.substring(endPos + 1)); - return new S3ObjectAction(); - } - else { - request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketName); - return new S3BucketAction(); - } - } - } + String serviceEndpoint = ServiceProvider.getInstance().getServiceEndpoint(); + String host = request.getHeader("Host"); + + // Check for unrecognized forms of URI information in request + + if ( ( pathInfo == null ) || ( pathInfo.indexOf('/') != 0 ) ) + if ( "POST".equalsIgnoreCase(request.getMethod()) ) + // Case where request is POST operation with no pathinfo + // This is the POST alternative to PUT described at s3.amazonaws.com API doc page 141 + { + return routePlainPostRequest (request); + } + + + // Irrespective of whether the requester is using subdomain or full host naming of path expressions + // to buckets, wherever the request is made up of a service endpoint followed by a /, in AWS S3 this always + // conveys a ListAllMyBuckets command + + if ( (serviceEndpoint.equalsIgnoreCase( host )) && (pathInfo.equalsIgnoreCase("/")) ) { + request.setAttribute(S3Constants.BUCKET_ATTR_KEY, "/"); + return new S3BucketAction(); // for ListAllMyBuckets + } + + // Because there is a leading / at position 0 of pathInfo, now subtract this to process the remainder + pathInfo = pathInfo.substring(1); + + if (ServiceProvider.getInstance().getUseSubDomain()) + + { + // -> verify the format of the bucket name + int endPos = host.indexOf( ServiceProvider.getInstance().getMasterDomain()); + if ( endPos > 0 ) + { + bucketName = host.substring(0, endPos); + S3Engine.verifyBucketName( bucketName, false ); + request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketName); + } + else request.setAttribute(S3Constants.BUCKET_ATTR_KEY, ""); + + if (pathInfo == null || pathInfo.equalsIgnoreCase("/")) + { + return new S3BucketAction(); + } + else { + String objectKey = pathInfo.substring(1); + request.setAttribute(S3Constants.OBJECT_ATTR_KEY, objectKey); + return new S3ObjectAction(); + } + } + + else + + { + + int endPos = pathInfo.indexOf('/'); // Subsequent / character? + + if (endPos < 1) + { + bucketName = pathInfo; + S3Engine.verifyBucketName( bucketName, false ); + request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketName); + return new S3BucketAction(); + } + else + { + bucketName = pathInfo.substring(0, endPos); + key = pathInfo.substring(endPos + 1); + S3Engine.verifyBucketName( bucketName, false ); + + if (!key.isEmpty()) + { + request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketName); + request.setAttribute(S3Constants.OBJECT_ATTR_KEY, pathInfo.substring(endPos + 1)); + return new S3ObjectAction(); + } + else { + request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketName); + return new S3BucketAction(); + } + } + } } - - + + public static void endResponse(HttpServletResponse response, String content) { - try { + try { byte[] data = content.getBytes(); response.setContentLength(data.length); OutputStream os = response.getOutputStream(); os.write(data); os.close(); - } catch(Throwable e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } + } catch(Throwable e) { + logger.error("Unexpected exception " + e.getMessage(), e); + } } public static void writeResponse(HttpServletResponse response, String content) throws IOException { @@ -479,61 +481,61 @@ public class S3RestServlet extends HttpServlet { OutputStream os = response.getOutputStream(); os.write(data); } - + public static void writeResponse(HttpServletResponse response, InputStream is) throws IOException { - byte[] data = new byte[4096]; - int length = 0; - while((length = is.read(data)) > 0) { - response.getOutputStream().write(data, 0, length); - } + byte[] data = new byte[4096]; + int length = 0; + while((length = is.read(data)) > 0) { + response.getOutputStream().write(data, 0, length); + } } // Route for the case where request is POST operation with no pathinfo // This is the POST alternative to PUT described at s3.amazonaws.com API doc, Amazon Simple // Storage Service API Reference API Version 2006-03-01 page 141. // The purpose of the plain POST operation is to add an object to a specified bucket using HTML forms. - -private S3ObjectAction routePlainPostRequest (HttpServletRequest request) + + private S3ObjectAction routePlainPostRequest (HttpServletRequest request) { - // TODO - Remove the unnecessary fields below - // Obtain the mandatory fields from the HTML form or otherwise fail with a logger message - String keyString = request.getParameter("key"); - String metatagString = request.getParameter("x-amz-meta-tag"); - String bucketString = request.getParameter("Bucket"); - String aclString = request.getParameter("acl"); - String fileString = request.getParameter("file"); - - String accessKeyString = request.getParameter("AWSAccessKeyId"); - String signatureString = request.getParameter("Signature"); + // TODO - Remove the unnecessary fields below + // Obtain the mandatory fields from the HTML form or otherwise fail with a logger message + String keyString = request.getParameter("key"); + String metatagString = request.getParameter("x-amz-meta-tag"); + String bucketString = request.getParameter("Bucket"); + String aclString = request.getParameter("acl"); + String fileString = request.getParameter("file"); - // Obtain the discretionary fields from the HTML form - String policyKeyString = request.getParameter("Policy"); - String metauuidString = request.getParameter("x-amz-meta-uuid"); - String redirectString = request.getParameter("redirect"); - - // if none of the above are null then ... - request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketString); - request.setAttribute(S3Constants.OBJECT_ATTR_KEY, keyString); - request.setAttribute(S3Constants.PLAIN_POST_ACCESS_KEY, accessKeyString); - request.setAttribute(S3Constants.PLAIN_POST_SIGNATURE, signatureString); - - // -> authenticated calls - try { - // S3AuthParams params = extractRequestHeaders( request ); - S3AuthParams params = new S3AuthParams(); - HeaderParam headerParam1 = new HeaderParam("accessKey", accessKeyString); - params.addHeader(headerParam1); - HeaderParam headerParam2 = new HeaderParam("secretKey", signatureString); - params.addHeader(headerParam2); - authenticateRequest( request, params ); - } - catch (Exception e) - { logger.warn("Authentication details insufficient"); } + String accessKeyString = request.getParameter("AWSAccessKeyId"); + String signatureString = request.getParameter("Signature"); + + // Obtain the discretionary fields from the HTML form + String policyKeyString = request.getParameter("Policy"); + String metauuidString = request.getParameter("x-amz-meta-uuid"); + String redirectString = request.getParameter("redirect"); + + // if none of the above are null then ... + request.setAttribute(S3Constants.BUCKET_ATTR_KEY, bucketString); + request.setAttribute(S3Constants.OBJECT_ATTR_KEY, keyString); + request.setAttribute(S3Constants.PLAIN_POST_ACCESS_KEY, accessKeyString); + request.setAttribute(S3Constants.PLAIN_POST_SIGNATURE, signatureString); + + // -> authenticated calls + try { + // S3AuthParams params = extractRequestHeaders( request ); + S3AuthParams params = new S3AuthParams(); + HeaderParam headerParam1 = new HeaderParam("accessKey", accessKeyString); + params.addHeader(headerParam1); + HeaderParam headerParam2 = new HeaderParam("secretKey", signatureString); + params.addHeader(headerParam2); + authenticateRequest( request, params ); + } + catch (Exception e) + { logger.warn("Authentication details insufficient"); } + + return new S3ObjectAction(); + + } - return new S3ObjectAction(); - - } - /** * A DIME request is really a SOAP request that we are dealing with, and so its * authentication is the SOAP authentication approach. Since Axis2 does not handle @@ -543,17 +545,17 @@ private S3ObjectAction routePlainPostRequest (HttpServletRequest request) * @param response */ private void processDimeRequest(HttpServletRequest request, HttpServletResponse response) { - S3PutObjectRequest putRequest = null; - S3PutObjectResponse putResponse = null; - int bytesRead = 0; - + S3PutObjectRequest putRequest = null; + S3PutObjectResponse putResponse = null; + int bytesRead = 0; + S3Engine engine = new S3Engine(); - + try { - logRequest(request); - + logRequest(request); + MultiPartDimeInputStream ds = new MultiPartDimeInputStream( request.getInputStream()); - + // -> the first stream MUST be the SOAP party if (ds.nextInputStream()) { @@ -564,26 +566,26 @@ private S3ObjectAction routePlainPostRequest (HttpServletRequest request) ByteArrayInputStream bis = new ByteArrayInputStream( buffer, 0, bytesRead ); putRequest = toEnginePutObjectRequest( bis ); } - + // -> we only need to support a DIME message with two bodyparts if (null != putRequest && ds.nextInputStream()) { - InputStream is = ds.getInputStream(); - putRequest.setData( is ); + InputStream is = ds.getInputStream(); + putRequest.setData( is ); } // -> need to do SOAP level auth here, on failure return the SOAP fault StringBuffer xml = new StringBuffer(); String AWSAccessKey = putRequest.getAccessKey(); - UserInfo info = ServiceProvider.getInstance().getUserInfo(AWSAccessKey); - try - { S3SoapAuth.verifySignature( putRequest.getSignature(), "PutObject", putRequest.getRawTimestamp(), AWSAccessKey, info.getSecretKey()); - - } catch( AxisFault e ) { - String reason = e.toString(); - int start = reason.indexOf( ".AxisFault:" ); - if (-1 != start) reason = reason.substring( start+11 ); - + UserInfo info = ServiceProvider.getInstance().getUserInfo(AWSAccessKey); + try + { S3SoapAuth.verifySignature( putRequest.getSignature(), "PutObject", putRequest.getRawTimestamp(), AWSAccessKey, info.getSecretKey()); + + } catch( AxisFault e ) { + String reason = e.toString(); + int start = reason.indexOf( ".AxisFault:" ); + if (-1 != start) reason = reason.substring( start+11 ); + xml.append( "" ); xml.append( "\n" ); xml.append( "\n" ); @@ -592,15 +594,15 @@ private S3ObjectAction routePlainPostRequest (HttpServletRequest request) xml.append( "" ).append( reason ).append( "\n" ); xml.append( "\n" ); xml.append( "" ); - - endResponse(response, xml.toString()); - return; - } - - // -> PutObject S3 Bucket Policy would be done in the engine.handleRequest() call - UserContext.current().initContext( AWSAccessKey, info.getSecretKey(), AWSAccessKey, "S3 DIME request", request ); + + endResponse(response, xml.toString()); + return; + } + + // -> PutObject S3 Bucket Policy would be done in the engine.handleRequest() call + UserContext.current().initContext( AWSAccessKey, info.getSecretKey(), AWSAccessKey, "S3 DIME request", request ); putResponse = engine.handleRequest( putRequest ); - + xml.append( "" ); xml.append( "" ); xml.append( "" ); @@ -610,24 +612,24 @@ private S3ObjectAction routePlainPostRequest (HttpServletRequest request) xml.append( "").append( DatatypeConverter.printDateTime(putResponse.getLastModified())).append( "" ); xml.append( "" ); xml.append( "" ); - - endResponse(response, xml.toString()); + + endResponse(response, xml.toString()); } catch(PermissionDeniedException e) { - logger.error("Unexpected exception " + e.getMessage(), e); - response.setStatus(403); - endResponse(response, "Access denied"); + logger.error("Unexpected exception " + e.getMessage(), e); + response.setStatus(403); + endResponse(response, "Access denied"); } catch(Throwable e) { - logger.error("Unexpected exception " + e.getMessage(), e); + logger.error("Unexpected exception " + e.getMessage(), e); } finally { } } - + /** * Convert the SOAP XML we extract from the DIME message into our local object. * Here Axis2 is not parsing the SOAP for us. I tried to use the Amazon PutObject @@ -637,240 +639,240 @@ private S3ObjectAction routePlainPostRequest (HttpServletRequest request) * @return * @throws Exception */ - public static S3PutObjectRequest toEnginePutObjectRequest( InputStream is ) throws Exception - { - DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); - dbf.setNamespaceAware( true ); - - DocumentBuilder db = dbf.newDocumentBuilder(); - Document doc = db.parse( is ); - Node parent = null; - Node contents = null; - NodeList children = null; - String temp = null; - String element = null; - int count = 0; + public static S3PutObjectRequest toEnginePutObjectRequest( InputStream is ) throws Exception + { + DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); + dbf.setNamespaceAware( true ); - S3PutObjectRequest request = new S3PutObjectRequest(); + DocumentBuilder db = dbf.newDocumentBuilder(); + Document doc = db.parse( is ); + Node parent = null; + Node contents = null; + NodeList children = null; + String temp = null; + String element = null; + int count = 0; - // [A] Pull out the simple nodes first - NodeList part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Bucket" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setBucketName( contents.getFirstChild().getNodeValue()); - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Key" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setKey( contents.getFirstChild().getNodeValue()); - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "ContentLength" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - { - String length = contents.getFirstChild().getNodeValue(); - if (null != length) request.setContentLength( Long.decode( length )); - } - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "AWSAccessKeyId" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setAccessKey( contents.getFirstChild().getNodeValue()); - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Signature" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setSignature( contents.getFirstChild().getNodeValue()); - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Timestamp" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setRawTimestamp( contents.getFirstChild().getNodeValue()); - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "StorageClass" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setStorageClass( contents.getFirstChild().getNodeValue()); - } - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Credential" ); - if (null != part) - { - if (null != (contents = part.item( 0 ))) - request.setCredential( contents.getFirstChild().getNodeValue()); - } - - - // [B] Get a list of all 'Metadata' elements - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Metadata" ); - if (null != part) - { - count = part.getLength(); - S3MetaDataEntry[] metaEntry = new S3MetaDataEntry[ count ]; - - for( int i=0; i < count; i++ ) - { - parent = part.item(i); - metaEntry[i] = new S3MetaDataEntry(); + S3PutObjectRequest request = new S3PutObjectRequest(); - // -> get a list of all the children elements of the 'Metadata' parent element - if (null != (children = parent.getChildNodes())) - { - int numChildren = children.getLength(); - for( int j=0; j < numChildren; j++ ) - { - contents = children.item( j ); - element = contents.getNodeName().trim(); - if ( element.endsWith( "Name" )) - { - temp = contents.getFirstChild().getNodeValue(); - if (null != temp) metaEntry[i].setName( temp ); - } - else if (element.endsWith( "Value" )) - { - temp = contents.getFirstChild().getNodeValue(); - if (null != temp) metaEntry[i].setValue( temp ); - } - } - } - } - request.setMetaEntries( metaEntry ); - } + // [A] Pull out the simple nodes first + NodeList part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Bucket" ); + if (null != part) + { + if (null != (contents = part.item( 0 ))) + request.setBucketName( contents.getFirstChild().getNodeValue()); + } + part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Key" ); + if (null != part) + { + if (null != (contents = part.item( 0 ))) + request.setKey( contents.getFirstChild().getNodeValue()); + } + part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "ContentLength" ); + if (null != part) + { + if (null != (contents = part.item( 0 ))) + { + String length = contents.getFirstChild().getNodeValue(); + if (null != length) request.setContentLength( Long.decode( length )); + } + } + part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "AWSAccessKeyId" ); + if (null != part) + { + if (null != (contents = part.item( 0 ))) + request.setAccessKey( contents.getFirstChild().getNodeValue()); + } + part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Signature" ); + if (null != part) + { + if (null != (contents = part.item( 0 ))) + request.setSignature( contents.getFirstChild().getNodeValue()); + } + part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Timestamp" ); + if (null != part) + { + if (null != (contents = part.item( 0 ))) + request.setRawTimestamp( contents.getFirstChild().getNodeValue()); + } + part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "StorageClass" ); + if (null != part) + { + if (null != (contents = part.item( 0 ))) + request.setStorageClass( contents.getFirstChild().getNodeValue()); + } + part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Credential" ); + if (null != part) + { + if (null != (contents = part.item( 0 ))) + request.setCredential( contents.getFirstChild().getNodeValue()); + } - // [C] Get a list of all Grant elements in an AccessControlList - part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Grant" ); - if (null != part) - { - S3AccessControlList engineAcl = new S3AccessControlList(); - count = part.getLength(); + // [B] Get a list of all 'Metadata' elements + part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Metadata" ); + if (null != part) + { + count = part.getLength(); + S3MetaDataEntry[] metaEntry = new S3MetaDataEntry[ count ]; + for( int i=0; i < count; i++ ) { - parent = part.item(i); - S3Grant engineGrant = new S3Grant(); + parent = part.item(i); + metaEntry[i] = new S3MetaDataEntry(); - // -> get a list of all the children elements of the 'Grant' parent element - if (null != (children = parent.getChildNodes())) - { - int numChildren = children.getLength(); - for( int j=0; j < numChildren; j++ ) - { - contents = children.item( j ); - element = contents.getNodeName().trim(); - if ( element.endsWith( "Grantee" )) - { - NamedNodeMap attbs = contents.getAttributes(); - if (null != attbs) - { - Node type = attbs.getNamedItemNS( "http://www.w3.org/2001/XMLSchema-instance", "type" ); - if ( null != type ) - temp = type.getFirstChild().getNodeValue().trim(); - else temp = null; - - if ( null != temp && temp.equalsIgnoreCase( "CanonicalUser" )) - { - engineGrant.setGrantee(SAcl.GRANTEE_USER); - engineGrant.setCanonicalUserID( getChildNodeValue( contents, "ID" )); - } - else throw new UnsupportedOperationException( "Missing http://www.w3.org/2001/XMLSchema-instance:type value" ); - } - } - else if (element.endsWith( "Permission" )) - { - temp = contents.getFirstChild().getNodeValue().trim(); - if (temp.equalsIgnoreCase("READ" )) engineGrant.setPermission(SAcl.PERMISSION_READ); - else if (temp.equalsIgnoreCase("WRITE" )) engineGrant.setPermission(SAcl.PERMISSION_WRITE); - else if (temp.equalsIgnoreCase("READ_ACP" )) engineGrant.setPermission(SAcl.PERMISSION_READ_ACL); - else if (temp.equalsIgnoreCase("WRITE_ACP" )) engineGrant.setPermission(SAcl.PERMISSION_WRITE_ACL); - else if (temp.equalsIgnoreCase("FULL_CONTROL")) engineGrant.setPermission(SAcl.PERMISSION_FULL); - else throw new UnsupportedOperationException( "Unsupported permission: " + temp ); - } - } - engineAcl.addGrant( engineGrant ); - } + // -> get a list of all the children elements of the 'Metadata' parent element + if (null != (children = parent.getChildNodes())) + { + int numChildren = children.getLength(); + for( int j=0; j < numChildren; j++ ) + { + contents = children.item( j ); + element = contents.getNodeName().trim(); + if ( element.endsWith( "Name" )) + { + temp = contents.getFirstChild().getNodeValue(); + if (null != temp) metaEntry[i].setName( temp ); + } + else if (element.endsWith( "Value" )) + { + temp = contents.getFirstChild().getNodeValue(); + if (null != temp) metaEntry[i].setValue( temp ); + } + } + } } - request.setAcl( engineAcl ); - } - return request; - } - - /** - * Have to deal with XML with and without namespaces. - */ - public static NodeList getElement( Document doc, String namespace, String tagName ) - { - NodeList part = doc.getElementsByTagNameNS( namespace, tagName ); - if (null == part || 0 == part.getLength()) part = doc.getElementsByTagName( tagName ); - - return part; - } + request.setMetaEntries( metaEntry ); + } - /** - * Looking for the value of a specific child of the given parent node. - * - * @param parent - * @param childName - * @return - */ - private static String getChildNodeValue( Node parent, String childName ) - { - NodeList children = null; - Node element = null; + // [C] Get a list of all Grant elements in an AccessControlList + part = getElement( doc, "http://s3.amazonaws.com/doc/2006-03-01/", "Grant" ); + if (null != part) + { + S3AccessControlList engineAcl = new S3AccessControlList(); + + count = part.getLength(); + for( int i=0; i < count; i++ ) + { + parent = part.item(i); + S3Grant engineGrant = new S3Grant(); + + // -> get a list of all the children elements of the 'Grant' parent element + if (null != (children = parent.getChildNodes())) + { + int numChildren = children.getLength(); + for( int j=0; j < numChildren; j++ ) + { + contents = children.item( j ); + element = contents.getNodeName().trim(); + if ( element.endsWith( "Grantee" )) + { + NamedNodeMap attbs = contents.getAttributes(); + if (null != attbs) + { + Node type = attbs.getNamedItemNS( "http://www.w3.org/2001/XMLSchema-instance", "type" ); + if ( null != type ) + temp = type.getFirstChild().getNodeValue().trim(); + else temp = null; + + if ( null != temp && temp.equalsIgnoreCase( "CanonicalUser" )) + { + engineGrant.setGrantee(SAcl.GRANTEE_USER); + engineGrant.setCanonicalUserID( getChildNodeValue( contents, "ID" )); + } + else throw new UnsupportedOperationException( "Missing http://www.w3.org/2001/XMLSchema-instance:type value" ); + } + } + else if (element.endsWith( "Permission" )) + { + temp = contents.getFirstChild().getNodeValue().trim(); + if (temp.equalsIgnoreCase("READ" )) engineGrant.setPermission(SAcl.PERMISSION_READ); + else if (temp.equalsIgnoreCase("WRITE" )) engineGrant.setPermission(SAcl.PERMISSION_WRITE); + else if (temp.equalsIgnoreCase("READ_ACP" )) engineGrant.setPermission(SAcl.PERMISSION_READ_ACL); + else if (temp.equalsIgnoreCase("WRITE_ACP" )) engineGrant.setPermission(SAcl.PERMISSION_WRITE_ACL); + else if (temp.equalsIgnoreCase("FULL_CONTROL")) engineGrant.setPermission(SAcl.PERMISSION_FULL); + else throw new UnsupportedOperationException( "Unsupported permission: " + temp ); + } + } + engineAcl.addGrant( engineGrant ); + } + } + request.setAcl( engineAcl ); + } + return request; + } + + /** + * Have to deal with XML with and without namespaces. + */ + public static NodeList getElement( Document doc, String namespace, String tagName ) + { + NodeList part = doc.getElementsByTagNameNS( namespace, tagName ); + if (null == part || 0 == part.getLength()) part = doc.getElementsByTagName( tagName ); + + return part; + } + + /** + * Looking for the value of a specific child of the given parent node. + * + * @param parent + * @param childName + * @return + */ + private static String getChildNodeValue( Node parent, String childName ) + { + NodeList children = null; + Node element = null; + + if (null != (children = parent.getChildNodes())) + { + int numChildren = children.getLength(); + for( int i=0; i < numChildren; i++ ) + { + if (null != (element = children.item( i ))) + { + // -> name may have a namespace on it + String name = element.getNodeName().trim(); + if ( name.endsWith( childName )) + { + String value = element.getFirstChild().getNodeValue(); + if (null != value) value = value.trim(); + return value; + } + } + } + } + return null; + } - if (null != (children = parent.getChildNodes())) - { - int numChildren = children.getLength(); - for( int i=0; i < numChildren; i++ ) - { - if (null != (element = children.item( i ))) - { - // -> name may have a namespace on it - String name = element.getNodeName().trim(); - if ( name.endsWith( childName )) - { - String value = element.getFirstChild().getNodeValue(); - if (null != value) value = value.trim(); - return value; - } - } - } - } - return null; - } - private void logRequest(HttpServletRequest request) { - if(logger.isInfoEnabled()) { - logger.info("Request method: " + request.getMethod()); - logger.info("Request contextPath: " + request.getContextPath()); - logger.info("Request pathInfo: " + request.getPathInfo()); - logger.info("Request pathTranslated: " + request.getPathTranslated()); - logger.info("Request queryString: " + request.getQueryString()); - logger.info("Request requestURI: " + request.getRequestURI()); - logger.info("Request requestURL: " + request.getRequestURL()); - logger.info("Request servletPath: " + request.getServletPath()); - Enumeration headers = request.getHeaderNames(); - if(headers != null) { - while(headers.hasMoreElements()) { - Object headerName = headers.nextElement(); - logger.info("Request header " + headerName + ":" + request.getHeader((String)headerName)); - } - } - - Enumeration params = request.getParameterNames(); - if(params != null) { - while(params.hasMoreElements()) { - Object paramName = params.nextElement(); - logger.info("Request parameter " + paramName + ":" + - request.getParameter((String)paramName)); - } - } - logger.info( "- End of request -" ); - } + if(logger.isInfoEnabled()) { + logger.info("Request method: " + request.getMethod()); + logger.info("Request contextPath: " + request.getContextPath()); + logger.info("Request pathInfo: " + request.getPathInfo()); + logger.info("Request pathTranslated: " + request.getPathTranslated()); + logger.info("Request queryString: " + request.getQueryString()); + logger.info("Request requestURI: " + request.getRequestURI()); + logger.info("Request requestURL: " + request.getRequestURL()); + logger.info("Request servletPath: " + request.getServletPath()); + Enumeration headers = request.getHeaderNames(); + if(headers != null) { + while(headers.hasMoreElements()) { + Object headerName = headers.nextElement(); + logger.info("Request header " + headerName + ":" + request.getHeader((String)headerName)); + } + } + + Enumeration params = request.getParameterNames(); + if(params != null) { + while(params.hasMoreElements()) { + Object paramName = params.nextElement(); + logger.info("Request parameter " + paramName + ":" + + request.getParameter((String)paramName)); + } + } + logger.info( "- End of request -" ); + } } } diff --git a/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java b/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java index 8f77916f750..c98de34a698 100644 --- a/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java +++ b/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java @@ -21,19 +21,16 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; -import java.io.PrintWriter; import java.io.Reader; import java.io.StringWriter; import java.io.Writer; -import java.util.ArrayList; import java.text.SimpleDateFormat; import java.util.Calendar; -import java.util.List; +import javax.inject.Inject; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.xml.bind.DatatypeConverter; - import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.stream.XMLStreamException; @@ -52,24 +49,17 @@ import com.cloud.bridge.io.MTOMAwareResultStreamWriter; import com.cloud.bridge.model.BucketPolicyVO; import com.cloud.bridge.model.SAcl; import com.cloud.bridge.model.SAclVO; -import com.cloud.bridge.model.SBucket; import com.cloud.bridge.model.SBucketVO; -import com.cloud.bridge.model.SHost; import com.cloud.bridge.persist.dao.BucketPolicyDao; -import com.cloud.bridge.persist.dao.BucketPolicyDaoImpl; import com.cloud.bridge.persist.dao.MultipartLoadDao; -import com.cloud.bridge.persist.dao.SAclDaoImpl; import com.cloud.bridge.persist.dao.SBucketDao; -import com.cloud.bridge.persist.dao.SBucketDaoImpl; import com.cloud.bridge.service.S3Constants; import com.cloud.bridge.service.S3RestServlet; -import com.cloud.bridge.service.controller.s3.ServiceProvider; import com.cloud.bridge.service.UserContext; import com.cloud.bridge.service.core.s3.S3AccessControlList; import com.cloud.bridge.service.core.s3.S3AccessControlPolicy; -import com.cloud.bridge.service.core.s3.S3AuthParams; -import com.cloud.bridge.service.core.s3.S3BucketAdapter; import com.cloud.bridge.service.core.s3.S3BucketPolicy; +import com.cloud.bridge.service.core.s3.S3BucketPolicy.PolicyAccess; import com.cloud.bridge.service.core.s3.S3CanonicalUser; import com.cloud.bridge.service.core.s3.S3CreateBucketConfiguration; import com.cloud.bridge.service.core.s3.S3CreateBucketRequest; @@ -86,1083 +76,1077 @@ import com.cloud.bridge.service.core.s3.S3ListBucketObjectEntry; import com.cloud.bridge.service.core.s3.S3ListBucketRequest; import com.cloud.bridge.service.core.s3.S3ListBucketResponse; import com.cloud.bridge.service.core.s3.S3MultipartUpload; +import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions; +import com.cloud.bridge.service.core.s3.S3PolicyCondition.ConditionKeys; import com.cloud.bridge.service.core.s3.S3PolicyContext; import com.cloud.bridge.service.core.s3.S3Response; import com.cloud.bridge.service.core.s3.S3SetBucketAccessControlPolicyRequest; -import com.cloud.bridge.service.core.s3.S3BucketPolicy.PolicyAccess; -import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions; -import com.cloud.bridge.service.core.s3.S3PolicyCondition.ConditionKeys; -import com.cloud.bridge.service.exception.InvalidBucketName; import com.cloud.bridge.service.exception.InvalidRequestContentException; import com.cloud.bridge.service.exception.NetworkIOException; import com.cloud.bridge.service.exception.NoSuchObjectException; import com.cloud.bridge.service.exception.ObjectAlreadyExistsException; -import com.cloud.bridge.service.exception.OutOfServiceException; import com.cloud.bridge.service.exception.PermissionDeniedException; import com.cloud.bridge.util.Converter; -import com.cloud.bridge.util.DateHelper; -import com.cloud.bridge.util.HeaderParam; +import com.cloud.bridge.util.OrderedPair; import com.cloud.bridge.util.PolicyParser; import com.cloud.bridge.util.StringHelper; -import com.cloud.bridge.util.OrderedPair; -import com.cloud.bridge.util.Triple; import com.cloud.bridge.util.XSerializer; import com.cloud.bridge.util.XSerializerXmlAdapter; import com.cloud.bridge.util.XmlHelper; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.Transaction; public class S3BucketAction implements ServletAction { protected final static Logger logger = Logger.getLogger(S3BucketAction.class); - protected final BucketPolicyDao bPolicyDao = ComponentLocator.inject(BucketPolicyDaoImpl.class); - protected final SBucketDao bucketDao = ComponentLocator.inject(SBucketDaoImpl.class); - + @Inject BucketPolicyDao bPolicyDao; + @Inject SBucketDao bucketDao; + private DocumentBuilderFactory dbf = null; - public S3BucketAction() { - dbf = DocumentBuilderFactory.newInstance(); - dbf.setNamespaceAware( true ); + public S3BucketAction() { + dbf = DocumentBuilderFactory.newInstance(); + dbf.setNamespaceAware( true ); - } - - public void execute(HttpServletRequest request, HttpServletResponse response) - throws IOException, XMLStreamException - { - String method = request.getMethod(); - String queryString = request.getQueryString(); - - if ( method.equalsIgnoreCase("PUT")) - { - if ( queryString != null && queryString.length() > 0 ) - { - if ( queryString.startsWith("acl")) { - executePutBucketAcl(request, response); - return; - } - else if (queryString.startsWith("versioning")) { - executePutBucketVersioning(request, response); - return; - } - else if (queryString.startsWith("policy")) { - executePutBucketPolicy(request, response); - return; - } - else if (queryString.startsWith("logging")) { - executePutBucketLogging(request, response); - return; - } - else if (queryString.startsWith("website")) { - executePutBucketWebsite(request, response); - return; - } - } - executePutBucket(request, response); - } - else if(method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("HEAD")) - { - if (queryString != null && queryString.length() > 0) - { - if ( queryString.startsWith("acl")) { - executeGetBucketAcl(request, response); - return; - } - else if (queryString.startsWith("versioning")) { - executeGetBucketVersioning(request, response); - return; - } - else if (queryString.contains("versions")) { - executeGetBucketObjectVersions(request, response); - return; - } - else if (queryString.startsWith("location")) { - executeGetBucketLocation(request, response); - return; - } - else if (queryString.startsWith("uploads")) { - executeListMultipartUploads(request, response); - return; - } - else if (queryString.startsWith("policy")) { - executeGetBucketPolicy(request, response); - return; - } - else if (queryString.startsWith("logging")) { - executeGetBucketLogging(request, response); - return; - } - else if (queryString.startsWith("website")) { - executeGetBucketWebsite(request, response); - return; - } - } - - String bucketAtr = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - if ( bucketAtr.equals( "/" )) - executeGetAllBuckets(request, response); - else executeGetBucket(request, response); - } - else if (method.equalsIgnoreCase("DELETE")) - { - if (queryString != null && queryString.length() > 0) - { - if ( queryString.startsWith("policy")) { - executeDeleteBucketPolicy(request, response); - return; - } - else if (queryString.startsWith("website")) { - executeDeleteBucketWebsite(request, response); - return; - } + } - } - executeDeleteBucket(request, response); - } - else if ( (method.equalsIgnoreCase("POST")) && (queryString.equalsIgnoreCase("delete")) ) - { - executeMultiObjectDelete(request, response); - } - else throw new IllegalArgumentException("Unsupported method in REST request"); - } - - - -private void executeMultiObjectDelete(HttpServletRequest request, HttpServletResponse response) throws IOException{ + @Override + public void execute(HttpServletRequest request, HttpServletResponse response) + throws IOException, XMLStreamException + { + String method = request.getMethod(); + String queryString = request.getQueryString(); - int contentLength = request.getContentLength(); - StringBuffer xmlDeleteResponse = null; - boolean quite = true; - - if(contentLength > 0) - { - InputStream is = null; - String versionID =null; - try { - is = request.getInputStream(); - String xml = StringHelper.stringFromStream(is); - String elements[] = {"Key","VersionId"}; - Document doc = XmlHelper.parse(xml); - Node node = XmlHelper.getRootNode(doc); - - if(node == null) { - System.out.println("Invalid XML document, no root element"); - return; - } - - xmlDeleteResponse = new StringBuffer("" + - ""); - - String bucket = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + if ( method.equalsIgnoreCase("PUT")) + { + if ( queryString != null && queryString.length() > 0 ) + { + if ( queryString.startsWith("acl")) { + executePutBucketAcl(request, response); + return; + } + else if (queryString.startsWith("versioning")) { + executePutBucketVersioning(request, response); + return; + } + else if (queryString.startsWith("policy")) { + executePutBucketPolicy(request, response); + return; + } + else if (queryString.startsWith("logging")) { + executePutBucketLogging(request, response); + return; + } + else if (queryString.startsWith("website")) { + executePutBucketWebsite(request, response); + return; + } + } + executePutBucket(request, response); + } + else if(method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("HEAD")) + { + if (queryString != null && queryString.length() > 0) + { + if ( queryString.startsWith("acl")) { + executeGetBucketAcl(request, response); + return; + } + else if (queryString.startsWith("versioning")) { + executeGetBucketVersioning(request, response); + return; + } + else if (queryString.contains("versions")) { + executeGetBucketObjectVersions(request, response); + return; + } + else if (queryString.startsWith("location")) { + executeGetBucketLocation(request, response); + return; + } + else if (queryString.startsWith("uploads")) { + executeListMultipartUploads(request, response); + return; + } + else if (queryString.startsWith("policy")) { + executeGetBucketPolicy(request, response); + return; + } + else if (queryString.startsWith("logging")) { + executeGetBucketLogging(request, response); + return; + } + else if (queryString.startsWith("website")) { + executeGetBucketWebsite(request, response); + return; + } + } - S3DeleteObjectRequest engineRequest = new S3DeleteObjectRequest(); - engineRequest.setBucketName( bucket ); - is.close(); - - doc.getDocumentElement().normalize(); - NodeList qList = doc.getElementsByTagName("Quiet"); + String bucketAtr = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + if ( bucketAtr.equals( "/" )) + executeGetAllBuckets(request, response); + else executeGetBucket(request, response); + } + else if (method.equalsIgnoreCase("DELETE")) + { + if (queryString != null && queryString.length() > 0) + { + if ( queryString.startsWith("policy")) { + executeDeleteBucketPolicy(request, response); + return; + } + else if (queryString.startsWith("website")) { + executeDeleteBucketWebsite(request, response); + return; + } - if (qList.getLength() == 1 ) { - Node qNode= qList.item(0); - if ( qNode.getFirstChild().getNodeValue().equalsIgnoreCase("true") == false ) - quite = false; - - logger.debug("Quite value :" + qNode.getFirstChild().getNodeValue()); - } - - NodeList objList = doc.getElementsByTagName("Object"); - - for (int i = 0; i < objList.getLength(); i++) { + } + executeDeleteBucket(request, response); + } + else if ( (method.equalsIgnoreCase("POST")) && (queryString.equalsIgnoreCase("delete")) ) + { + executeMultiObjectDelete(request, response); + } + else throw new IllegalArgumentException("Unsupported method in REST request"); + } - Node key = objList.item(i); - NodeList key_data = key.getChildNodes(); - - if (key.getNodeType() == Node.ELEMENT_NODE) { - Element eElement = (Element) key; - String key_name = getTagValue(elements[0], eElement); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key_name); - if (key_data.getLength() == 2) { - versionID = getTagValue(elements[1], eElement); - engineRequest.setVersion(versionID); - } - S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); - int resultCode = engineResponse.getResultCode(); - String resutlDesc = engineResponse.getResultDescription(); - if(resultCode == 204) { - if (quite) { // show response depending on quite/verbose - xmlDeleteResponse.append(""+key_name+""); - if (resutlDesc != null) - xmlDeleteResponse.append(resutlDesc); - xmlDeleteResponse.append(""); - } - } - else { - logger.debug("Error in delete ::" + key_name + " eng response:: " + engineResponse.getResultDescription()); - xmlDeleteResponse.append(""+key_name+"" ); - if (resutlDesc != null) - xmlDeleteResponse.append(resutlDesc); - xmlDeleteResponse.append(""); - } - - - } - } - - String version = engineRequest.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - - - } catch (IOException e) { - logger.error("Unable to read request data due to " + e.getMessage(), e); - throw new NetworkIOException(e); - - } finally { - if(is != null) is.close(); - } + private void executeMultiObjectDelete(HttpServletRequest request, HttpServletResponse response) throws IOException{ - xmlDeleteResponse.append(""); - - } - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xmlDeleteResponse.toString()); - - } + int contentLength = request.getContentLength(); + StringBuffer xmlDeleteResponse = null; + boolean quite = true; - private String getTagValue(String sTag, Element eElement) { - - NodeList nlList = eElement.getElementsByTagName(sTag).item(0).getChildNodes(); - Node nValue = (Node) nlList.item(0); - return nValue.getNodeValue(); - } - - - /** - * In order to support a policy on the "s3:CreateBucket" action we must be able to set and get - * policies before a bucket is actually created. - * - * @param request - * @param response - * @throws IOException - */ - private void executePutBucketPolicy(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String policy = streamToString( request.getInputStream()); - - // [A] Is there an owner of an existing policy or bucket? + if(contentLength > 0) + { + InputStream is = null; + String versionID =null; + try { + is = request.getInputStream(); + String xml = StringHelper.stringFromStream(is); + String elements[] = {"Key","VersionId"}; + Document doc = XmlHelper.parse(xml); + Node node = XmlHelper.getRootNode(doc); + + if(node == null) { + System.out.println("Invalid XML document, no root element"); + return; + } + + xmlDeleteResponse = new StringBuffer("" + + ""); + + String bucket = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + + S3DeleteObjectRequest engineRequest = new S3DeleteObjectRequest(); + engineRequest.setBucketName( bucket ); + is.close(); + + doc.getDocumentElement().normalize(); + NodeList qList = doc.getElementsByTagName("Quiet"); + + if (qList.getLength() == 1 ) { + Node qNode= qList.item(0); + if ( qNode.getFirstChild().getNodeValue().equalsIgnoreCase("true") == false ) + quite = false; + + logger.debug("Quite value :" + qNode.getFirstChild().getNodeValue()); + } + + NodeList objList = doc.getElementsByTagName("Object"); + + for (int i = 0; i < objList.getLength(); i++) { + + Node key = objList.item(i); + NodeList key_data = key.getChildNodes(); + + if (key.getNodeType() == Node.ELEMENT_NODE) { + Element eElement = (Element) key; + String key_name = getTagValue(elements[0], eElement); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key_name); + + if (key_data.getLength() == 2) { + versionID = getTagValue(elements[1], eElement); + engineRequest.setVersion(versionID); + } + + S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); + int resultCode = engineResponse.getResultCode(); + String resutlDesc = engineResponse.getResultDescription(); + if(resultCode == 204) { + if (quite) { // show response depending on quite/verbose + xmlDeleteResponse.append(""+key_name+""); + if (resutlDesc != null) + xmlDeleteResponse.append(resutlDesc); + xmlDeleteResponse.append(""); + } + } + else { + logger.debug("Error in delete ::" + key_name + " eng response:: " + engineResponse.getResultDescription()); + xmlDeleteResponse.append(""+key_name+"" ); + if (resutlDesc != null) + xmlDeleteResponse.append(resutlDesc); + xmlDeleteResponse.append(""); + } + + + } + } + + String version = engineRequest.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + + + } catch (IOException e) { + logger.error("Unable to read request data due to " + e.getMessage(), e); + throw new NetworkIOException(e); + + } finally { + if(is != null) is.close(); + } + + xmlDeleteResponse.append(""); + + } + response.setStatus(200); + response.setContentType("text/xml; charset=UTF-8"); + S3RestServlet.endResponse(response, xmlDeleteResponse.toString()); + + } + + private String getTagValue(String sTag, Element eElement) { + + NodeList nlList = eElement.getElementsByTagName(sTag).item(0).getChildNodes(); + Node nValue = nlList.item(0); + return nValue.getNodeValue(); + } + + + /** + * In order to support a policy on the "s3:CreateBucket" action we must be able to set and get + * policies before a bucket is actually created. + * + * @param request + * @param response + * @throws IOException + */ + private void executePutBucketPolicy(HttpServletRequest request, HttpServletResponse response) throws IOException + { + String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String policy = streamToString( request.getInputStream()); + + // [A] Is there an owner of an existing policy or bucket? SBucketVO bucket = bucketDao.getByName( bucketName ); String owner = null; - + if ( null != bucket ) { owner = bucket.getOwnerCanonicalId(); } else { try { - owner = bPolicyDao.getByName(bucketName).getOwnerCanonicalID(); - } - catch( Exception e ) {} + owner = bPolicyDao.getByName(bucketName).getOwnerCanonicalID(); + } + catch( Exception e ) {} } - - // [B] "The bucket owner by default has permissions to attach bucket policies to their buckets using PUT Bucket policy." - // -> the bucket owner may want to restrict the IP address from where this can be executed - String client = UserContext.current().getCanonicalUserId(); - S3PolicyContext context = new S3PolicyContext( - PolicyActions.PutBucketPolicy, bucketName); - - switch (S3Engine.verifyPolicy(context)) { - case ALLOW: - break; - case DEFAULT_DENY: - if (null != owner && !client.equals(owner)) { - response.setStatus(405); - return; - } - break; - case DENY: - response.setStatus(403); - return; - } - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); - // [B] Place the policy into the database over writting an existing policy - try { - // -> first make sure that the policy is valid by parsing it - PolicyParser parser = new PolicyParser(); - S3BucketPolicy sbp = parser.parse( policy, bucketName ); - bPolicyDao.deletePolicy(bucketName); - - if (null != policy && !policy.isEmpty()) { - BucketPolicyVO bpolicy = new BucketPolicyVO(bucketName, client, policy); - bpolicy = bPolicyDao.persist(bpolicy); - //policyDao.addPolicy( bucketName, client, policy ); - } - - if (null != sbp) ServiceProvider.getInstance().setBucketPolicy( bucketName, sbp ); - response.setStatus(200); - txn.commit(); - txn.close(); - } - catch( PermissionDeniedException e ) { - logger.error("Put Bucket Policy failed due to " + e.getMessage(), e); - throw e; - } - catch( ParseException e ) { - logger.error("Put Bucket Policy failed due to " + e.getMessage(), e); - throw new PermissionDeniedException( e.toString()); - } - catch( Exception e ) { - logger.error("Put Bucket Policy failed due to " + e.getMessage(), e); - response.setStatus(500); - } - } - - private void executeGetBucketPolicy(HttpServletRequest request, HttpServletResponse response) - { - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + // [B] "The bucket owner by default has permissions to attach bucket policies to their buckets using PUT Bucket policy." + // -> the bucket owner may want to restrict the IP address from where this can be executed + String client = UserContext.current().getCanonicalUserId(); + S3PolicyContext context = new S3PolicyContext( + PolicyActions.PutBucketPolicy, bucketName); - // [A] Is there an owner of an existing policy or bucket? - SBucketVO bucket = bucketDao.getByName(bucketName); - String owner = null; + switch (S3Engine.verifyPolicy(context)) { + case ALLOW: + break; - if (null != bucket) { - owner = bucket.getOwnerCanonicalId(); - } else { - try { - owner = bPolicyDao.getByName(bucketName).getOwnerCanonicalID(); - } catch (Exception e) { - } - } + case DEFAULT_DENY: + if (null != owner && !client.equals(owner)) { + response.setStatus(405); + return; + } + break; + case DENY: + response.setStatus(403); + return; + } + Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + // [B] Place the policy into the database over writting an existing policy + try { + // -> first make sure that the policy is valid by parsing it + PolicyParser parser = new PolicyParser(); + S3BucketPolicy sbp = parser.parse( policy, bucketName ); + bPolicyDao.deletePolicy(bucketName); - // [B] - // "The bucket owner by default has permissions to retrieve bucket policies using GET Bucket policy." - // -> the bucket owner may want to restrict the IP address from where - // this can be executed - String client = UserContext.current().getCanonicalUserId(); - S3PolicyContext context = new S3PolicyContext( - PolicyActions.GetBucketPolicy, bucketName); - switch (S3Engine.verifyPolicy(context)) { - case ALLOW: - break; + if (null != policy && !policy.isEmpty()) { + BucketPolicyVO bpolicy = new BucketPolicyVO(bucketName, client, policy); + bpolicy = bPolicyDao.persist(bpolicy); + //policyDao.addPolicy( bucketName, client, policy ); + } - case DEFAULT_DENY: - if (null != owner && !client.equals(owner)) { - response.setStatus(405); - return; - } - break; + if (null != sbp) ServiceProvider.getInstance().setBucketPolicy( bucketName, sbp ); + response.setStatus(200); + txn.commit(); + txn.close(); + } + catch( PermissionDeniedException e ) { + logger.error("Put Bucket Policy failed due to " + e.getMessage(), e); + throw e; + } + catch( ParseException e ) { + logger.error("Put Bucket Policy failed due to " + e.getMessage(), e); + throw new PermissionDeniedException( e.toString()); + } + catch( Exception e ) { + logger.error("Put Bucket Policy failed due to " + e.getMessage(), e); + response.setStatus(500); + } + } - case DENY: - response.setStatus(403); - return; - } + private void executeGetBucketPolicy(HttpServletRequest request, HttpServletResponse response) + { + String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - // [B] Pull the policy from the database if one exists - try { - String policy = bPolicyDao.getByName(bucketName).getPolicy(); - if (null == policy) { - response.setStatus(404); - } else { - response.setStatus(200); - response.setContentType("application/json"); - S3RestServlet.endResponse(response, policy); - } - } catch (Exception e) { - logger.error("Get Bucket Policy failed due to " + e.getMessage(), e); - response.setStatus(500); - } + // [A] Is there an owner of an existing policy or bucket? + SBucketVO bucket = bucketDao.getByName(bucketName); + String owner = null; + + if (null != bucket) { + owner = bucket.getOwnerCanonicalId(); + } else { + try { + owner = bPolicyDao.getByName(bucketName).getOwnerCanonicalID(); + } catch (Exception e) { + } + } + + // [B] + // "The bucket owner by default has permissions to retrieve bucket policies using GET Bucket policy." + // -> the bucket owner may want to restrict the IP address from where + // this can be executed + String client = UserContext.current().getCanonicalUserId(); + S3PolicyContext context = new S3PolicyContext( + PolicyActions.GetBucketPolicy, bucketName); + switch (S3Engine.verifyPolicy(context)) { + case ALLOW: + break; + + case DEFAULT_DENY: + if (null != owner && !client.equals(owner)) { + response.setStatus(405); + return; + } + break; + + case DENY: + response.setStatus(403); + return; + } + + // [B] Pull the policy from the database if one exists + try { + String policy = bPolicyDao.getByName(bucketName).getPolicy(); + if (null == policy) { + response.setStatus(404); + } else { + response.setStatus(200); + response.setContentType("application/json"); + S3RestServlet.endResponse(response, policy); + } + } catch (Exception e) { + logger.error("Get Bucket Policy failed due to " + e.getMessage(), e); + response.setStatus(500); + } } private void executeDeleteBucketPolicy(HttpServletRequest request, - HttpServletResponse response) { - String bucketName = (String) request - .getAttribute(S3Constants.BUCKET_ATTR_KEY); + HttpServletResponse response) { + String bucketName = (String) request + .getAttribute(S3Constants.BUCKET_ATTR_KEY); - SBucketVO bucket = bucketDao.getByName(bucketName); - if (bucket != null) { - String client = UserContext.current().getCanonicalUserId(); - if (!client.equals(bucket.getOwnerCanonicalId())) { - response.setStatus(405); - return; - } - } + SBucketVO bucket = bucketDao.getByName(bucketName); + if (bucket != null) { + String client = UserContext.current().getCanonicalUserId(); + if (!client.equals(bucket.getOwnerCanonicalId())) { + response.setStatus(405); + return; + } + } - try { + try { - String policy = bPolicyDao.getByName(bucketName).getPolicy(); - if (null == policy) { - response.setStatus(204); - } else { - ServiceProvider.getInstance().deleteBucketPolicy(bucketName); - bPolicyDao.deletePolicy(bucketName); - response.setStatus(200); - } - } catch (Exception e) { - logger.error( - "Delete Bucket Policy failed due to " + e.getMessage(), e); - response.setStatus(500); - } + String policy = bPolicyDao.getByName(bucketName).getPolicy(); + if (null == policy) { + response.setStatus(204); + } else { + ServiceProvider.getInstance().deleteBucketPolicy(bucketName); + bPolicyDao.deletePolicy(bucketName); + response.setStatus(200); + } + } catch (Exception e) { + logger.error( + "Delete Bucket Policy failed due to " + e.getMessage(), e); + response.setStatus(500); + } } public void executeGetAllBuckets(HttpServletRequest request, - HttpServletResponse response) throws IOException, - XMLStreamException { - Calendar cal = Calendar.getInstance(); - cal.set(1970, 1, 1); - S3ListAllMyBucketsRequest engineRequest = new S3ListAllMyBucketsRequest(); - engineRequest.setAccessKey(UserContext.current().getAccessKey()); - engineRequest.setRequestTimestamp(cal); - engineRequest.setSignature(""); + HttpServletResponse response) throws IOException, + XMLStreamException { + Calendar cal = Calendar.getInstance(); + cal.set(1970, 1, 1); + S3ListAllMyBucketsRequest engineRequest = new S3ListAllMyBucketsRequest(); + engineRequest.setAccessKey(UserContext.current().getAccessKey()); + engineRequest.setRequestTimestamp(cal); + engineRequest.setSignature(""); - S3ListAllMyBucketsResponse engineResponse = ServiceProvider - .getInstance().getS3Engine().handleRequest(engineRequest); + S3ListAllMyBucketsResponse engineResponse = ServiceProvider + .getInstance().getS3Engine().handleRequest(engineRequest); - // To allow the all buckets list to be serialized via Axiom classes - ListAllMyBucketsResponse allBuckets = S3SerializableServiceImplementation - .toListAllMyBucketsResponse(engineResponse); + // To allow the all buckets list to be serialized via Axiom classes + ListAllMyBucketsResponse allBuckets = S3SerializableServiceImplementation + .toListAllMyBucketsResponse(engineResponse); - OutputStream outputStream = response.getOutputStream(); - response.setStatus(200); - response.setContentType("application/xml"); - // The content-type literally should be "application/xml; charset=UTF-8" - // but any compliant JVM supplies utf-8 by default + OutputStream outputStream = response.getOutputStream(); + response.setStatus(200); + response.setContentType("application/xml"); + // The content-type literally should be "application/xml; charset=UTF-8" + // but any compliant JVM supplies utf-8 by default - // MTOMAwareResultStreamWriter resultWriter = new - // MTOMAwareResultStreamWriter ("ListAllMyBucketsResult", outputStream - // ); - // resultWriter.startWrite(); - // resultWriter.writeout(allBuckets); - // resultWriter.stopWrite(); - StringBuffer xml = new StringBuffer(); - xml.append(""); - xml.append(""); - xml.append(""); - xml.append(engineResponse.getOwner().getID()).append(""); - xml.append("") - .append(engineResponse.getOwner().getDisplayName()) - .append(""); - xml.append("").append(""); - SimpleDateFormat sdf = new SimpleDateFormat( - "yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - for (S3ListAllMyBucketsEntry entry : engineResponse.getBuckets()) { - xml.append("").append("").append(entry.getName()) - .append(""); - xml.append("") - .append(sdf.format(entry.getCreationDate().getTime())) - .append(""); - xml.append(""); - } - xml.append("").append(""); - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - - } + // MTOMAwareResultStreamWriter resultWriter = new + // MTOMAwareResultStreamWriter ("ListAllMyBucketsResult", outputStream + // ); + // resultWriter.startWrite(); + // resultWriter.writeout(allBuckets); + // resultWriter.stopWrite(); + StringBuffer xml = new StringBuffer(); + xml.append(""); + xml.append(""); + xml.append(""); + xml.append(engineResponse.getOwner().getID()).append(""); + xml.append("") + .append(engineResponse.getOwner().getDisplayName()) + .append(""); + xml.append("").append(""); + SimpleDateFormat sdf = new SimpleDateFormat( + "yyyy-MM-dd'T'HH:mm:ss.SSSZ"); + for (S3ListAllMyBucketsEntry entry : engineResponse.getBuckets()) { + xml.append("").append("").append(entry.getName()) + .append(""); + xml.append("") + .append(sdf.format(entry.getCreationDate().getTime())) + .append(""); + xml.append(""); + } + xml.append("").append(""); + response.setStatus(200); + response.setContentType("text/xml; charset=UTF-8"); + S3RestServlet.endResponse(response, xml.toString()); - public void executeGetBucket(HttpServletRequest request, HttpServletResponse response) - throws IOException, XMLStreamException - { - S3ListBucketRequest engineRequest = new S3ListBucketRequest(); - engineRequest.setBucketName((String) request - .getAttribute(S3Constants.BUCKET_ATTR_KEY)); - engineRequest.setDelimiter(request.getParameter("delimiter")); - engineRequest.setMarker(request.getParameter("marker")); - engineRequest.setPrefix(request.getParameter("prefix")); + } - int maxKeys = Converter.toInt(request.getParameter("max-keys"), 1000); - engineRequest.setMaxKeys(maxKeys); - try { - S3ListBucketResponse engineResponse = ServiceProvider.getInstance() - .getS3Engine().listBucketContents(engineRequest, false); + public void executeGetBucket(HttpServletRequest request, HttpServletResponse response) + throws IOException, XMLStreamException + { + S3ListBucketRequest engineRequest = new S3ListBucketRequest(); + engineRequest.setBucketName((String) request + .getAttribute(S3Constants.BUCKET_ATTR_KEY)); + engineRequest.setDelimiter(request.getParameter("delimiter")); + engineRequest.setMarker(request.getParameter("marker")); + engineRequest.setPrefix(request.getParameter("prefix")); - // To allow the all list buckets result to be serialized via Axiom - // classes - ListBucketResponse oneBucket = S3SerializableServiceImplementation - .toListBucketResponse(engineResponse); + int maxKeys = Converter.toInt(request.getParameter("max-keys"), 1000); + engineRequest.setMaxKeys(maxKeys); + try { + S3ListBucketResponse engineResponse = ServiceProvider.getInstance() + .getS3Engine().listBucketContents(engineRequest, false); - OutputStream outputStream = response.getOutputStream(); - response.setStatus(200); - response.setContentType("application/xml"); - // The content-type literally should be - // "application/xml; charset=UTF-8" - // but any compliant JVM supplies utf-8 by default; + // To allow the all list buckets result to be serialized via Axiom + // classes + ListBucketResponse oneBucket = S3SerializableServiceImplementation + .toListBucketResponse(engineResponse); - MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter( - "ListBucketResult", outputStream); - resultWriter.startWrite(); - resultWriter.writeout(oneBucket); - resultWriter.stopWrite(); - } catch (NoSuchObjectException nsoe) { - response.setStatus(404); - response.setContentType("application/xml"); + OutputStream outputStream = response.getOutputStream(); + response.setStatus(200); + response.setContentType("application/xml"); + // The content-type literally should be + // "application/xml; charset=UTF-8" + // but any compliant JVM supplies utf-8 by default; - StringBuffer xmlError = new StringBuffer(); - xmlError.append("") - .append("NoSuchBucketThe specified bucket does not exist") - .append("") - .append((String) request - .getAttribute(S3Constants.BUCKET_ATTR_KEY)) - .append("") - .append("1DEADBEEF9") // TODO - .append("abCdeFgHiJ1k2LmN3op4q56r7st89") // TODO - .append(""); - S3RestServlet.endResponse(response, xmlError.toString()); + MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter( + "ListBucketResult", outputStream); + resultWriter.startWrite(); + resultWriter.writeout(oneBucket); + resultWriter.stopWrite(); + } catch (NoSuchObjectException nsoe) { + response.setStatus(404); + response.setContentType("application/xml"); - } + StringBuffer xmlError = new StringBuffer(); + xmlError.append("") + .append("NoSuchBucketThe specified bucket does not exist") + .append("") + .append((String) request + .getAttribute(S3Constants.BUCKET_ATTR_KEY)) + .append("") + .append("1DEADBEEF9") // TODO + .append("abCdeFgHiJ1k2LmN3op4q56r7st89") // TODO + .append(""); + S3RestServlet.endResponse(response, xmlError.toString()); - } - - public void executeGetBucketAcl(HttpServletRequest request, HttpServletResponse response) - throws IOException, XMLStreamException - { - S3GetBucketAccessControlPolicyRequest engineRequest = new S3GetBucketAccessControlPolicyRequest(); - Calendar cal = Calendar.getInstance(); - cal.set( 1970, 1, 1 ); - engineRequest.setAccessKey(UserContext.current().getAccessKey()); - engineRequest.setRequestTimestamp( cal ); - engineRequest.setSignature( "" ); // TODO - Consider providing signature in a future release which allows additional user description - engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); + } - S3AccessControlPolicy engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - - // To allow the bucket acl policy result to be serialized via Axiom classes - GetBucketAccessControlPolicyResponse onePolicy = S3SerializableServiceImplementation.toGetBucketAccessControlPolicyResponse( engineResponse ); + } - OutputStream outputStream = response.getOutputStream(); - response.setStatus(200); - response.setContentType("application/xml"); - // The content-type literally should be "application/xml; charset=UTF-8" - // but any compliant JVM supplies utf-8 by default; - - MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter ("GetBucketAccessControlPolicyResult", outputStream ); - resultWriter.startWrite(); - resultWriter.writeout(onePolicy); - resultWriter.stopWrite(); + public void executeGetBucketAcl(HttpServletRequest request, HttpServletResponse response) + throws IOException, XMLStreamException + { + S3GetBucketAccessControlPolicyRequest engineRequest = new S3GetBucketAccessControlPolicyRequest(); + Calendar cal = Calendar.getInstance(); + cal.set( 1970, 1, 1 ); + engineRequest.setAccessKey(UserContext.current().getAccessKey()); + engineRequest.setRequestTimestamp( cal ); + engineRequest.setSignature( "" ); // TODO - Consider providing signature in a future release which allows additional user description + engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); - - } - - public void executeGetBucketVersioning(HttpServletRequest request, HttpServletResponse response) throws IOException - { - // [A] Does the bucket exist? - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String versioningStatus = null; - - if (null == bucketName) { - logger.error( "executeGetBucketVersioning - no bucket name given" ); - response.setStatus( 400 ); - return; - } - - SBucketVO sbucket = bucketDao.getByName( bucketName ); - if (sbucket == null) { - response.setStatus( 404 ); - return; - } - - // [B] The owner may want to restrict the IP address at which this can be performed - String client = UserContext.current().getCanonicalUserId(); - if (!client.equals( sbucket.getOwnerCanonicalId())) - throw new PermissionDeniedException( "Access Denied - only the owner can read bucket versioning" ); + S3AccessControlPolicy engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketVersioning, bucketName ); - if (PolicyAccess.DENY == S3Engine.verifyPolicy( context )) { - response.setStatus(403); - return; - } + // To allow the bucket acl policy result to be serialized via Axiom classes + GetBucketAccessControlPolicyResponse onePolicy = S3SerializableServiceImplementation.toGetBucketAccessControlPolicyResponse( engineResponse ); + + OutputStream outputStream = response.getOutputStream(); + response.setStatus(200); + response.setContentType("application/xml"); + // The content-type literally should be "application/xml; charset=UTF-8" + // but any compliant JVM supplies utf-8 by default; + + MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter ("GetBucketAccessControlPolicyResult", outputStream ); + resultWriter.startWrite(); + resultWriter.writeout(onePolicy); + resultWriter.stopWrite(); - // [C] - switch( sbucket.getVersioningStatus()) { - default: - case 0: versioningStatus = ""; break; - case 1: versioningStatus = "Enabled"; break; - case 2: versioningStatus = "Suspended"; break; - } + } - StringBuffer xml = new StringBuffer(); + public void executeGetBucketVersioning(HttpServletRequest request, HttpServletResponse response) throws IOException + { + // [A] Does the bucket exist? + String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String versioningStatus = null; + + if (null == bucketName) { + logger.error( "executeGetBucketVersioning - no bucket name given" ); + response.setStatus( 400 ); + return; + } + + SBucketVO sbucket = bucketDao.getByName( bucketName ); + if (sbucket == null) { + response.setStatus( 404 ); + return; + } + + // [B] The owner may want to restrict the IP address at which this can be performed + String client = UserContext.current().getCanonicalUserId(); + if (!client.equals( sbucket.getOwnerCanonicalId())) + throw new PermissionDeniedException( "Access Denied - only the owner can read bucket versioning" ); + + S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketVersioning, bucketName ); + if (PolicyAccess.DENY == S3Engine.verifyPolicy( context )) { + response.setStatus(403); + return; + } + + + // [C] + switch( sbucket.getVersioningStatus()) { + default: + case 0: versioningStatus = ""; break; + case 1: versioningStatus = "Enabled"; break; + case 2: versioningStatus = "Suspended"; break; + } + + StringBuffer xml = new StringBuffer(); xml.append( "" ); xml.append( "" ); if (0 < versioningStatus.length()) xml.append( "" ).append( versioningStatus ).append( "" ); xml.append( "" ); - - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - } - - public void executeGetBucketObjectVersions(HttpServletRequest request, HttpServletResponse response) throws IOException - { - S3ListBucketRequest engineRequest = new S3ListBucketRequest(); - String keyMarker = request.getParameter("key-marker"); - String versionIdMarker = request.getParameter("version-id-marker"); - - engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); - engineRequest.setDelimiter(request.getParameter("delimiter")); - engineRequest.setMarker( keyMarker ); - engineRequest.setPrefix(request.getParameter("prefix")); - engineRequest.setVersionIdMarker( versionIdMarker ); - - int maxKeys = Converter.toInt(request.getParameter("max-keys"), 1000); - engineRequest.setMaxKeys(maxKeys); - S3ListBucketResponse engineResponse = ServiceProvider.getInstance().getS3Engine().listBucketContents( engineRequest, true ); - - // -> the SOAP version produces different XML - StringBuffer xml = new StringBuffer(); + + response.setStatus(200); + response.setContentType("text/xml; charset=UTF-8"); + S3RestServlet.endResponse(response, xml.toString()); + } + + public void executeGetBucketObjectVersions(HttpServletRequest request, HttpServletResponse response) throws IOException + { + S3ListBucketRequest engineRequest = new S3ListBucketRequest(); + String keyMarker = request.getParameter("key-marker"); + String versionIdMarker = request.getParameter("version-id-marker"); + + engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); + engineRequest.setDelimiter(request.getParameter("delimiter")); + engineRequest.setMarker( keyMarker ); + engineRequest.setPrefix(request.getParameter("prefix")); + engineRequest.setVersionIdMarker( versionIdMarker ); + + int maxKeys = Converter.toInt(request.getParameter("max-keys"), 1000); + engineRequest.setMaxKeys(maxKeys); + S3ListBucketResponse engineResponse = ServiceProvider.getInstance().getS3Engine().listBucketContents( engineRequest, true ); + + // -> the SOAP version produces different XML + StringBuffer xml = new StringBuffer(); xml.append( "" ); xml.append( "" ); xml.append( "" ).append( engineResponse.getBucketName()).append( "" ); - + if ( null == keyMarker ) - xml.append( "" ); + xml.append( "" ); else xml.append( "" ).append( keyMarker ).append( "" ); + xml.append( "" ); else xml.append( "" ).append( keyMarker ).append( "" ).append( engineResponse.getMaxKeys()).append( "" ); xml.append( "" ).append( engineResponse.isTruncated()).append( "" ); - + S3ListBucketObjectEntry[] versions = engineResponse.getContents(); for( int i=0; null != versions && i < versions.length; i++ ) { - S3CanonicalUser owner = versions[i].getOwner(); - boolean isDeletionMarker = versions[i].getIsDeletionMarker(); - String displayName = owner.getDisplayName(); - String id = owner.getID(); - - if ( isDeletionMarker ) - { - xml.append( "" ); - xml.append( "" ).append( versions[i].getKey()).append( "" ); - xml.append( "" ).append( versions[i].getVersion()).append( "" ); - xml.append( "" ).append( versions[i].getIsLatest()).append( "" ); - xml.append( "" ).append( DatatypeConverter.printDateTime( versions[i].getLastModified())).append( "" ); - } - else - { xml.append( "" ); - xml.append( "" ).append( versions[i].getKey()).append( "" ); - xml.append( "" ).append( versions[i].getVersion()).append( "" ); - xml.append( "" ).append( versions[i].getIsLatest()).append( "" ); - xml.append( "" ).append( DatatypeConverter.printDateTime( versions[i].getLastModified())).append( "" ); - xml.append( "" ).append( versions[i].getETag()).append( "" ); - xml.append( "" ).append( versions[i].getSize()).append( "" ); - xml.append( "" ).append( versions[i].getStorageClass()).append( "" ); - } - - xml.append( "" ); - xml.append( "" ).append( id ).append( "" ); - if ( null == displayName ) - xml.append( "" ); - else xml.append( "" ).append( owner.getDisplayName()).append( "" ); - xml.append( "" ); - - if ( isDeletionMarker ) - xml.append( "" ); - else xml.append( "" ); + S3CanonicalUser owner = versions[i].getOwner(); + boolean isDeletionMarker = versions[i].getIsDeletionMarker(); + String displayName = owner.getDisplayName(); + String id = owner.getID(); + + if ( isDeletionMarker ) + { + xml.append( "" ); + xml.append( "" ).append( versions[i].getKey()).append( "" ); + xml.append( "" ).append( versions[i].getVersion()).append( "" ); + xml.append( "" ).append( versions[i].getIsLatest()).append( "" ); + xml.append( "" ).append( DatatypeConverter.printDateTime( versions[i].getLastModified())).append( "" ); + } + else + { xml.append( "" ); + xml.append( "" ).append( versions[i].getKey()).append( "" ); + xml.append( "" ).append( versions[i].getVersion()).append( "" ); + xml.append( "" ).append( versions[i].getIsLatest()).append( "" ); + xml.append( "" ).append( DatatypeConverter.printDateTime( versions[i].getLastModified())).append( "" ); + xml.append( "" ).append( versions[i].getETag()).append( "" ); + xml.append( "" ).append( versions[i].getSize()).append( "" ); + xml.append( "" ).append( versions[i].getStorageClass()).append( "" ); + } + + xml.append( "" ); + xml.append( "" ).append( id ).append( "" ); + if ( null == displayName ) + xml.append( "" ); + else xml.append( "" ).append( owner.getDisplayName()).append( "" ); + xml.append( "" ); + + if ( isDeletionMarker ) + xml.append( "" ); + else xml.append( "" ); } xml.append( "" ); - - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - } - - public void executeGetBucketLogging(HttpServletRequest request, HttpServletResponse response) throws IOException { - // TODO -- Review this in future. Currently this is a beta feature of S3 - response.setStatus(405); - } - - public void executeGetBucketLocation(HttpServletRequest request, HttpServletResponse response) throws IOException { - // TODO - This is a fakery! We don't actually store location in backend - StringBuffer xml = new StringBuffer(); - xml.append( "" ); - xml.append( "" ); - // This is the real fakery - xml.append( "us-west-2" ); - xml.append( "" ); - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - } - public void executeGetBucketWebsite(HttpServletRequest request, HttpServletResponse response) throws IOException { - response.setStatus(405); - } + response.setStatus(200); + response.setContentType("text/xml; charset=UTF-8"); + S3RestServlet.endResponse(response, xml.toString()); + } - public void executeDeleteBucketWebsite(HttpServletRequest request, HttpServletResponse response) throws IOException { - response.setStatus(405); - } + public void executeGetBucketLogging(HttpServletRequest request, HttpServletResponse response) throws IOException { + // TODO -- Review this in future. Currently this is a beta feature of S3 + response.setStatus(405); + } - public void executePutBucket(HttpServletRequest request, HttpServletResponse response) throws IOException - { - int contentLength = request.getContentLength(); - Object objectInContent = null; - - if(contentLength > 0) - { - InputStream is = null; - try { - is = request.getInputStream(); - String xml = StringHelper.stringFromStream(is); - Class.forName("com.cloud.bridge.service.core.s3.S3CreateBucketConfiguration"); - XSerializer serializer = new XSerializer(new XSerializerXmlAdapter()); - objectInContent = serializer.serializeFrom(xml); - if(objectInContent != null && !(objectInContent instanceof S3CreateBucketConfiguration)) { - throw new InvalidRequestContentException("Invalid request content in create-bucket: " + xml); - } - is.close(); - - } catch (IOException e) { - logger.error("Unable to read request data due to " + e.getMessage(), e); - throw new NetworkIOException(e); - - } catch (ClassNotFoundException e) { - logger.error("In a normal world this should never never happen:" + e.getMessage(), e); - throw new RuntimeException("A required class was not found in the classpath:" + e.getMessage()); - } - finally { - if(is != null) is.close(); - } - } - - S3CreateBucketRequest engineRequest = new S3CreateBucketRequest(); - engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); - engineRequest.setConfig((S3CreateBucketConfiguration)objectInContent); - try { - S3CreateBucketResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - response.addHeader("Location", "/" + engineResponse.getBucketName()); - response.setContentLength(0); - response.setStatus(200); - response.flushBuffer(); - } catch (ObjectAlreadyExistsException oaee) { - response.setStatus(409); - String xml = " OperationAbortedA conflicting conditional operation is currently in progress against this resource. Please try again.."; - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - } - } - - public void executePutBucketAcl(HttpServletRequest request, HttpServletResponse response) throws IOException - { - // [A] Determine that there is an applicable bucket which might have an ACL set + public void executeGetBucketLocation(HttpServletRequest request, HttpServletResponse response) throws IOException { + // TODO - This is a fakery! We don't actually store location in backend + StringBuffer xml = new StringBuffer(); + xml.append( "" ); + xml.append( "" ); + // This is the real fakery + xml.append( "us-west-2" ); + xml.append( "" ); + response.setStatus(200); + response.setContentType("text/xml; charset=UTF-8"); + S3RestServlet.endResponse(response, xml.toString()); + } - String bucketName = (String) request - .getAttribute(S3Constants.BUCKET_ATTR_KEY); - SBucketVO bucket = bucketDao.getByName(bucketName); - String owner = null; - if (null != bucket) - owner = bucket.getOwnerCanonicalId(); - if (null == owner) { - logger.error("ACL update failed since " + bucketName - + " does not exist"); - throw new IOException("ACL update failed"); - } + public void executeGetBucketWebsite(HttpServletRequest request, HttpServletResponse response) throws IOException { + response.setStatus(405); + } - // [B] Obtain the grant request which applies to the acl request string. - // This latter is supplied as the value of the x-amz-acl header. + public void executeDeleteBucketWebsite(HttpServletRequest request, HttpServletResponse response) throws IOException { + response.setStatus(405); + } - S3SetBucketAccessControlPolicyRequest engineRequest = new S3SetBucketAccessControlPolicyRequest(); - S3Grant grantRequest = new S3Grant(); - S3AccessControlList aclRequest = new S3AccessControlList(); + public void executePutBucket(HttpServletRequest request, HttpServletResponse response) throws IOException + { + int contentLength = request.getContentLength(); + Object objectInContent = null; - String aclRequestString = request.getHeader("x-amz-acl"); - OrderedPair accessControlsForBucketOwner = SAclVO.getCannedAccessControls(aclRequestString, "SBucket"); - grantRequest.setPermission(accessControlsForBucketOwner.getFirst()); - grantRequest.setGrantee(accessControlsForBucketOwner.getSecond()); - grantRequest.setCanonicalUserID(owner); - aclRequest.addGrant(grantRequest); - engineRequest.setAcl(aclRequest); - engineRequest.setBucketName(bucketName); + if(contentLength > 0) + { + InputStream is = null; + try { + is = request.getInputStream(); + String xml = StringHelper.stringFromStream(is); + Class.forName("com.cloud.bridge.service.core.s3.S3CreateBucketConfiguration"); + XSerializer serializer = new XSerializer(new XSerializerXmlAdapter()); + objectInContent = serializer.serializeFrom(xml); + if(objectInContent != null && !(objectInContent instanceof S3CreateBucketConfiguration)) { + throw new InvalidRequestContentException("Invalid request content in create-bucket: " + xml); + } + is.close(); - // [C] Allow an S3Engine to handle the - // S3SetBucketAccessControlPolicyRequest - S3Response engineResponse = ServiceProvider.getInstance().getS3Engine() - .handleRequest(engineRequest); - response.setStatus(engineResponse.getResultCode()); + } catch (IOException e) { + logger.error("Unable to read request data due to " + e.getMessage(), e); + throw new NetworkIOException(e); - } - - public void executePutBucketVersioning(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String bucketName = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String versioningStatus = null; - Node item = null; + } catch (ClassNotFoundException e) { + logger.error("In a normal world this should never never happen:" + e.getMessage(), e); + throw new RuntimeException("A required class was not found in the classpath:" + e.getMessage()); + } + finally { + if(is != null) is.close(); + } + } - if (null == bucketName) { - logger.error("executePutBucketVersioning - no bucket name given"); - response.setStatus(400); - return; - } + S3CreateBucketRequest engineRequest = new S3CreateBucketRequest(); + engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); + engineRequest.setConfig((S3CreateBucketConfiguration)objectInContent); + try { + S3CreateBucketResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); + response.addHeader("Location", "/" + engineResponse.getBucketName()); + response.setContentLength(0); + response.setStatus(200); + response.flushBuffer(); + } catch (ObjectAlreadyExistsException oaee) { + response.setStatus(409); + String xml = " OperationAbortedA conflicting conditional operation is currently in progress against this resource. Please try again.."; + response.setContentType("text/xml; charset=UTF-8"); + S3RestServlet.endResponse(response, xml.toString()); + } + } - // -> is the XML as defined? - try { - DocumentBuilder db = dbf.newDocumentBuilder(); - Document restXML = db.parse(request.getInputStream()); - NodeList match = S3RestServlet.getElement(restXML, - "http://s3.amazonaws.com/doc/2006-03-01/", "Status"); - if (0 < match.getLength()) { - item = match.item(0); - versioningStatus = new String(item.getFirstChild() - .getNodeValue()); - } else { - logger.error("executePutBucketVersioning - cannot find Status tag in XML body"); - response.setStatus(400); - return; - } - } catch (Exception e) { - logger.error( - "executePutBucketVersioning - failed to parse XML due to " - + e.getMessage(), e); - response.setStatus(400); - return; - } + public void executePutBucketAcl(HttpServletRequest request, HttpServletResponse response) throws IOException + { + // [A] Determine that there is an applicable bucket which might have an ACL set - try { - // Irrespective of what the ACLs say only the owner can turn on - // versioning on a bucket. - // The bucket owner may want to restrict the IP address from which - // this can occur. - - SBucketVO sbucket = bucketDao.getByName(bucketName); + String bucketName = (String) request + .getAttribute(S3Constants.BUCKET_ATTR_KEY); + SBucketVO bucket = bucketDao.getByName(bucketName); + String owner = null; + if (null != bucket) + owner = bucket.getOwnerCanonicalId(); + if (null == owner) { + logger.error("ACL update failed since " + bucketName + + " does not exist"); + throw new IOException("ACL update failed"); + } - String client = UserContext.current().getCanonicalUserId(); - if (!client.equals(sbucket.getOwnerCanonicalId())) - throw new PermissionDeniedException( - "Access Denied - only the owner can turn on versioing on a bucket"); + // [B] Obtain the grant request which applies to the acl request string. + // This latter is supplied as the value of the x-amz-acl header. - S3PolicyContext context = new S3PolicyContext( - PolicyActions.PutBucketVersioning, bucketName); - if (PolicyAccess.DENY == S3Engine.verifyPolicy(context)) { - response.setStatus(403); - return; - } + S3SetBucketAccessControlPolicyRequest engineRequest = new S3SetBucketAccessControlPolicyRequest(); + S3Grant grantRequest = new S3Grant(); + S3AccessControlList aclRequest = new S3AccessControlList(); - if (versioningStatus.equalsIgnoreCase("Enabled")) - sbucket.setVersioningStatus(1); - else if (versioningStatus.equalsIgnoreCase("Suspended")) - sbucket.setVersioningStatus(2); - else { - logger.error("executePutBucketVersioning - unknown state: [" - + versioningStatus + "]"); - response.setStatus(400); - return; - } - bucketDao.update(sbucket.getId(), sbucket); + String aclRequestString = request.getHeader("x-amz-acl"); + OrderedPair accessControlsForBucketOwner = SAclVO.getCannedAccessControls(aclRequestString, "SBucket"); + grantRequest.setPermission(accessControlsForBucketOwner.getFirst()); + grantRequest.setGrantee(accessControlsForBucketOwner.getSecond()); + grantRequest.setCanonicalUserID(owner); + aclRequest.addGrant(grantRequest); + engineRequest.setAcl(aclRequest); + engineRequest.setBucketName(bucketName); - } catch (PermissionDeniedException e) { - logger.error( - "executePutBucketVersioning - failed due to " - + e.getMessage(), e); - throw e; + // [C] Allow an S3Engine to handle the + // S3SetBucketAccessControlPolicyRequest + S3Response engineResponse = ServiceProvider.getInstance().getS3Engine() + .handleRequest(engineRequest); + response.setStatus(engineResponse.getResultCode()); - } catch (Exception e) { - logger.error( - "executePutBucketVersioning - failed due to " - + e.getMessage(), e); - response.setStatus(500); - return; - } - response.setStatus(200); - } - - public void executePutBucketLogging(HttpServletRequest request, HttpServletResponse response) throws IOException { - // TODO -- Review this in future. Currently this is a S3 beta feature - response.setStatus(501); - } - - public void executePutBucketWebsite(HttpServletRequest request, HttpServletResponse response) throws IOException { - // TODO -- LoPri - Undertake checks on Put Bucket Website - // Tested using configuration \nAllowOverride FileInfo AuthConfig Limit... in httpd.conf - // Need some way of using AllowOverride to allow use of .htaccess and then pushing .httaccess file to bucket subdirectory of mount point - // Currently has noop effect in the sense that a running apachectl process sees the directory contents without further action - response.setStatus(200); - } + } - public void executeDeleteBucket(HttpServletRequest request, HttpServletResponse response) throws IOException - { - S3DeleteBucketRequest engineRequest = new S3DeleteBucketRequest(); - engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); - S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - response.setStatus(engineResponse.getResultCode()); - response.flushBuffer(); - } - - /** - * Multipart upload is a complex operation with all the options defined by Amazon. Part of the functionality is - * provided by the query done against the database. The CommonPrefixes functionality is done the same way - * as done in the listBucketContents function (i.e., by iterating though the list to decide which output - * element each key is placed). - * - * @param request - * @param response - * @throws IOException - */ - public void executeListMultipartUploads(HttpServletRequest request, HttpServletResponse response) throws IOException - { - // [A] Obtain parameters and do basic bucket verification - String bucketName = (String) request - .getAttribute(S3Constants.BUCKET_ATTR_KEY); - String delimiter = request.getParameter("delimiter"); - String keyMarker = request.getParameter("key-marker"); - String prefix = request.getParameter("prefix"); - int maxUploads = 1000; - int nextUploadId = 0; - String nextKey = null; - boolean isTruncated = false; - S3MultipartUpload[] uploads = null; - S3MultipartUpload onePart = null; - String temp = request.getParameter("max-uploads"); - if (null != temp) { - maxUploads = Integer.parseInt(temp); - if (maxUploads > 1000 || maxUploads < 0) - maxUploads = 1000; - } + public void executePutBucketVersioning(HttpServletRequest request, HttpServletResponse response) throws IOException + { + String bucketName = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String versioningStatus = null; + Node item = null; - // -> upload-id-marker is ignored unless key-marker is also specified - String uploadIdMarker = request.getParameter("upload-id-marker"); - if (null == keyMarker) - uploadIdMarker = null; + if (null == bucketName) { + logger.error("executePutBucketVersioning - no bucket name given"); + response.setStatus(400); + return; + } - // -> does the bucket exist, we may need it to verify access permissions - SBucketVO bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error("listMultipartUpload failed since " + bucketName - + " does not exist"); - response.setStatus(404); - return; - } + // -> is the XML as defined? + try { + DocumentBuilder db = dbf.newDocumentBuilder(); + Document restXML = db.parse(request.getInputStream()); + NodeList match = S3RestServlet.getElement(restXML, + "http://s3.amazonaws.com/doc/2006-03-01/", "Status"); + if (0 < match.getLength()) { + item = match.item(0); + versioningStatus = new String(item.getFirstChild() + .getNodeValue()); + } else { + logger.error("executePutBucketVersioning - cannot find Status tag in XML body"); + response.setStatus(400); + return; + } + } catch (Exception e) { + logger.error( + "executePutBucketVersioning - failed to parse XML due to " + + e.getMessage(), e); + response.setStatus(400); + return; + } - S3PolicyContext context = new S3PolicyContext( - PolicyActions.ListBucketMultipartUploads, bucketName); - context.setEvalParam(ConditionKeys.Prefix, prefix); - context.setEvalParam(ConditionKeys.Delimiter, delimiter); - S3Engine.verifyAccess(context, "SBucket", bucket.getId(), - SAcl.PERMISSION_READ); + try { + // Irrespective of what the ACLs say only the owner can turn on + // versioning on a bucket. + // The bucket owner may want to restrict the IP address from which + // this can occur. - // [B] Query the multipart table to get the list of current uploads - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - OrderedPair result = uploadDao - .getInitiatedUploads(bucketName, maxUploads, prefix, - keyMarker, uploadIdMarker); - uploads = result.getFirst(); - isTruncated = result.getSecond().booleanValue(); - } catch (Exception e) { - logger.error( - "List Multipart Uploads failed due to " + e.getMessage(), e); - response.setStatus(500); - } + SBucketVO sbucket = bucketDao.getByName(bucketName); - StringBuffer xml = new StringBuffer(); - xml.append(""); - xml.append(""); - xml.append("").append(bucketName).append(""); - xml.append("").append((null == keyMarker ? "" : keyMarker)) - .append(""); - xml.append("") - .append((null == uploadIdMarker ? "" : uploadIdMarker)) - .append(""); + String client = UserContext.current().getCanonicalUserId(); + if (!client.equals(sbucket.getOwnerCanonicalId())) + throw new PermissionDeniedException( + "Access Denied - only the owner can turn on versioing on a bucket"); - // [C] Construct the contents of the element - StringBuffer partsList = new StringBuffer(); - for (int i = 0; i < uploads.length; i++) { - onePart = uploads[i]; - if (null == onePart) - break; + S3PolicyContext context = new S3PolicyContext( + PolicyActions.PutBucketVersioning, bucketName); + if (PolicyAccess.DENY == S3Engine.verifyPolicy(context)) { + response.setStatus(403); + return; + } - if (delimiter != null && !delimiter.isEmpty()) { - // -> is this available only in the CommonPrefixes element? - if (StringHelper.substringInBetween(onePart.getKey(), prefix, - delimiter) != null) - continue; - } + if (versioningStatus.equalsIgnoreCase("Enabled")) + sbucket.setVersioningStatus(1); + else if (versioningStatus.equalsIgnoreCase("Suspended")) + sbucket.setVersioningStatus(2); + else { + logger.error("executePutBucketVersioning - unknown state: [" + + versioningStatus + "]"); + response.setStatus(400); + return; + } + bucketDao.update(sbucket.getId(), sbucket); - nextKey = onePart.getKey(); - nextUploadId = onePart.getId(); - partsList.append(""); - partsList.append("").append(nextKey).append(""); - partsList.append("").append(nextUploadId) - .append(""); - partsList.append(""); - partsList.append("").append(onePart.getAccessKey()) - .append(""); - partsList.append(""); - partsList.append(""); - partsList.append(""); - partsList.append("").append(onePart.getAccessKey()) - .append(""); - partsList.append(""); - partsList.append(""); - partsList.append("STANDARD"); - partsList - .append("") - .append(DatatypeConverter.printDateTime(onePart - .getLastModified())).append(""); - partsList.append(""); - } + } catch (PermissionDeniedException e) { + logger.error( + "executePutBucketVersioning - failed due to " + + e.getMessage(), e); + throw e; - // [D] Construct the contents of the elements (if any) - for (int i = 0; i < uploads.length; i++) { - onePart = uploads[i]; - if (null == onePart) - break; + } catch (Exception e) { + logger.error( + "executePutBucketVersioning - failed due to " + + e.getMessage(), e); + response.setStatus(500); + return; + } + response.setStatus(200); + } - if (delimiter != null && !delimiter.isEmpty()) { - String subName = StringHelper.substringInBetween( - onePart.getKey(), prefix, delimiter); - if (subName != null) { - partsList.append(""); - partsList.append(""); - if (prefix != null && prefix.length() > 0) - partsList.append(prefix + delimiter + subName); - else - partsList.append(subName); - partsList.append(""); - partsList.append(""); - } - } - } + public void executePutBucketLogging(HttpServletRequest request, HttpServletResponse response) throws IOException { + // TODO -- Review this in future. Currently this is a S3 beta feature + response.setStatus(501); + } - // [D] Finish off the response - xml.append("").append((null == nextKey ? "" : nextKey)) - .append(""); - xml.append("") - .append((0 == nextUploadId ? "" : nextUploadId)) - .append(""); - xml.append("").append(maxUploads).append(""); - xml.append("").append(isTruncated) - .append(""); + public void executePutBucketWebsite(HttpServletRequest request, HttpServletResponse response) throws IOException { + // TODO -- LoPri - Undertake checks on Put Bucket Website + // Tested using configuration \nAllowOverride FileInfo AuthConfig Limit... in httpd.conf + // Need some way of using AllowOverride to allow use of .htaccess and then pushing .httaccess file to bucket subdirectory of mount point + // Currently has noop effect in the sense that a running apachectl process sees the directory contents without further action + response.setStatus(200); + } - xml.append(partsList.toString()); - xml.append(""); + public void executeDeleteBucket(HttpServletRequest request, HttpServletResponse response) throws IOException + { + S3DeleteBucketRequest engineRequest = new S3DeleteBucketRequest(); + engineRequest.setBucketName((String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY)); + S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); + response.setStatus(engineResponse.getResultCode()); + response.flushBuffer(); + } - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - } - - private String streamToString( InputStream is ) throws IOException - { - int n = 0; - - if ( null != is ) - { - Writer writer = new StringWriter(); - char[] buffer = new char[1024]; - try { - Reader reader = new BufferedReader( new InputStreamReader(is, "UTF-8")); - while ((n = reader.read(buffer)) != -1) writer.write(buffer, 0, n); - } - finally { - is.close(); - } - return writer.toString(); - } - else return null; + /** + * Multipart upload is a complex operation with all the options defined by Amazon. Part of the functionality is + * provided by the query done against the database. The CommonPrefixes functionality is done the same way + * as done in the listBucketContents function (i.e., by iterating though the list to decide which output + * element each key is placed). + * + * @param request + * @param response + * @throws IOException + */ + public void executeListMultipartUploads(HttpServletRequest request, HttpServletResponse response) throws IOException + { + // [A] Obtain parameters and do basic bucket verification + String bucketName = (String) request + .getAttribute(S3Constants.BUCKET_ATTR_KEY); + String delimiter = request.getParameter("delimiter"); + String keyMarker = request.getParameter("key-marker"); + String prefix = request.getParameter("prefix"); + int maxUploads = 1000; + int nextUploadId = 0; + String nextKey = null; + boolean isTruncated = false; + S3MultipartUpload[] uploads = null; + S3MultipartUpload onePart = null; + String temp = request.getParameter("max-uploads"); + if (null != temp) { + maxUploads = Integer.parseInt(temp); + if (maxUploads > 1000 || maxUploads < 0) + maxUploads = 1000; + } + + // -> upload-id-marker is ignored unless key-marker is also specified + String uploadIdMarker = request.getParameter("upload-id-marker"); + if (null == keyMarker) + uploadIdMarker = null; + + // -> does the bucket exist, we may need it to verify access permissions + SBucketVO bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error("listMultipartUpload failed since " + bucketName + + " does not exist"); + response.setStatus(404); + return; + } + + S3PolicyContext context = new S3PolicyContext( + PolicyActions.ListBucketMultipartUploads, bucketName); + context.setEvalParam(ConditionKeys.Prefix, prefix); + context.setEvalParam(ConditionKeys.Delimiter, delimiter); + S3Engine.verifyAccess(context, "SBucket", bucket.getId(), + SAcl.PERMISSION_READ); + + // [B] Query the multipart table to get the list of current uploads + try { + MultipartLoadDao uploadDao = new MultipartLoadDao(); + OrderedPair result = uploadDao + .getInitiatedUploads(bucketName, maxUploads, prefix, + keyMarker, uploadIdMarker); + uploads = result.getFirst(); + isTruncated = result.getSecond().booleanValue(); + } catch (Exception e) { + logger.error( + "List Multipart Uploads failed due to " + e.getMessage(), e); + response.setStatus(500); + } + + StringBuffer xml = new StringBuffer(); + xml.append(""); + xml.append(""); + xml.append("").append(bucketName).append(""); + xml.append("").append((null == keyMarker ? "" : keyMarker)) + .append(""); + xml.append("") + .append((null == uploadIdMarker ? "" : uploadIdMarker)) + .append(""); + + // [C] Construct the contents of the element + StringBuffer partsList = new StringBuffer(); + for (int i = 0; i < uploads.length; i++) { + onePart = uploads[i]; + if (null == onePart) + break; + + if (delimiter != null && !delimiter.isEmpty()) { + // -> is this available only in the CommonPrefixes element? + if (StringHelper.substringInBetween(onePart.getKey(), prefix, + delimiter) != null) + continue; + } + + nextKey = onePart.getKey(); + nextUploadId = onePart.getId(); + partsList.append(""); + partsList.append("").append(nextKey).append(""); + partsList.append("").append(nextUploadId) + .append(""); + partsList.append(""); + partsList.append("").append(onePart.getAccessKey()) + .append(""); + partsList.append(""); + partsList.append(""); + partsList.append(""); + partsList.append("").append(onePart.getAccessKey()) + .append(""); + partsList.append(""); + partsList.append(""); + partsList.append("STANDARD"); + partsList + .append("") + .append(DatatypeConverter.printDateTime(onePart + .getLastModified())).append(""); + partsList.append(""); + } + + // [D] Construct the contents of the elements (if any) + for (int i = 0; i < uploads.length; i++) { + onePart = uploads[i]; + if (null == onePart) + break; + + if (delimiter != null && !delimiter.isEmpty()) { + String subName = StringHelper.substringInBetween( + onePart.getKey(), prefix, delimiter); + if (subName != null) { + partsList.append(""); + partsList.append(""); + if (prefix != null && prefix.length() > 0) + partsList.append(prefix + delimiter + subName); + else + partsList.append(subName); + partsList.append(""); + partsList.append(""); + } + } + } + + // [D] Finish off the response + xml.append("").append((null == nextKey ? "" : nextKey)) + .append(""); + xml.append("") + .append((0 == nextUploadId ? "" : nextUploadId)) + .append(""); + xml.append("").append(maxUploads).append(""); + xml.append("").append(isTruncated) + .append(""); + + xml.append(partsList.toString()); + xml.append(""); + + response.setStatus(200); + response.setContentType("text/xml; charset=UTF-8"); + S3RestServlet.endResponse(response, xml.toString()); + } + + private String streamToString( InputStream is ) throws IOException + { + int n = 0; + + if ( null != is ) + { + Writer writer = new StringWriter(); + char[] buffer = new char[1024]; + try { + Reader reader = new BufferedReader( new InputStreamReader(is, "UTF-8")); + while ((n = reader.read(buffer)) != -1) writer.write(buffer, 0, n); + } + finally { + is.close(); + } + return writer.toString(); + } + else return null; } } diff --git a/awsapi/src/com/cloud/bridge/service/controller/s3/S3ObjectAction.java b/awsapi/src/com/cloud/bridge/service/controller/s3/S3ObjectAction.java index ee4cec65e41..89ccf59916c 100644 --- a/awsapi/src/com/cloud/bridge/service/controller/s3/S3ObjectAction.java +++ b/awsapi/src/com/cloud/bridge/service/controller/s3/S3ObjectAction.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.UUID; import javax.activation.DataHandler; +import javax.inject.Inject; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.xml.bind.DatatypeConverter; @@ -46,11 +47,9 @@ import com.amazon.s3.GetObjectAccessControlPolicyResponse; import com.cloud.bridge.io.MTOMAwareResultStreamWriter; import com.cloud.bridge.model.SAcl; import com.cloud.bridge.model.SAclVO; -import com.cloud.bridge.model.SBucket; import com.cloud.bridge.model.SBucketVO; import com.cloud.bridge.persist.dao.MultipartLoadDao; import com.cloud.bridge.persist.dao.SBucketDao; -import com.cloud.bridge.persist.dao.SBucketDaoImpl; import com.cloud.bridge.service.S3Constants; import com.cloud.bridge.service.S3RestServlet; import com.cloud.bridge.service.UserContext; @@ -68,1200 +67,1198 @@ import com.cloud.bridge.service.core.s3.S3GetObjectResponse; import com.cloud.bridge.service.core.s3.S3Grant; import com.cloud.bridge.service.core.s3.S3MetaDataEntry; import com.cloud.bridge.service.core.s3.S3MultipartPart; +import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions; import com.cloud.bridge.service.core.s3.S3PolicyContext; import com.cloud.bridge.service.core.s3.S3PutObjectInlineRequest; import com.cloud.bridge.service.core.s3.S3PutObjectInlineResponse; -import com.cloud.bridge.service.core.s3.S3PutObjectRequest; import com.cloud.bridge.service.core.s3.S3Response; -import com.cloud.bridge.service.core.s3.S3SetBucketAccessControlPolicyRequest; import com.cloud.bridge.service.core.s3.S3SetObjectAccessControlPolicyRequest; -import com.cloud.bridge.service.core.s3.S3PolicyAction.PolicyActions; import com.cloud.bridge.service.exception.PermissionDeniedException; import com.cloud.bridge.util.Converter; import com.cloud.bridge.util.DateHelper; import com.cloud.bridge.util.HeaderParam; -import com.cloud.bridge.util.ServletRequestDataSource; import com.cloud.bridge.util.OrderedPair; -import com.cloud.utils.component.ComponentLocator; +import com.cloud.bridge.util.ServletRequestDataSource; public class S3ObjectAction implements ServletAction { protected final static Logger logger = Logger.getLogger(S3ObjectAction.class); - protected final SBucketDao bucketDao = ComponentLocator.inject(SBucketDaoImpl.class); + @Inject SBucketDao bucketDao; private DocumentBuilderFactory dbf = null; - - public S3ObjectAction() { - dbf = DocumentBuilderFactory.newInstance(); - dbf.setNamespaceAware( true ); - } + public S3ObjectAction() { + dbf = DocumentBuilderFactory.newInstance(); + dbf.setNamespaceAware( true ); - public void execute(HttpServletRequest request, HttpServletResponse response) - throws IOException, XMLStreamException - { - String method = request.getMethod(); - String queryString = request.getQueryString(); - String copy = null; - - response.addHeader( "x-amz-request-id", UUID.randomUUID().toString()); - - if ( method.equalsIgnoreCase( "GET" )) - { - if ( queryString != null && queryString.length() > 0 ) - { - if (queryString.contains("acl")) executeGetObjectAcl(request, response); - else if (queryString.contains("uploadId")) executeListUploadParts(request, response); - else executeGetObject(request, response); - } - else executeGetObject(request, response); - } - else if (method.equalsIgnoreCase( "PUT" )) - { - if ( queryString != null && queryString.length() > 0 ) - { - if (queryString.contains("acl")) executePutObjectAcl(request, response); - else if (queryString.contains("partNumber")) executeUploadPart(request, response); - else executePutObject(request, response); - } - else if ( null != (copy = request.getHeader( "x-amz-copy-source" ))) - { - executeCopyObject(request, response, copy.trim()); - } - else executePutObject(request, response); - } - else if (method.equalsIgnoreCase( "DELETE" )) - { - if ( queryString != null && queryString.length() > 0 ) - { - if (queryString.contains("uploadId")) executeAbortMultipartUpload(request, response); - else executeDeleteObject(request, response); - } - else executeDeleteObject(request, response); - } - else if (method.equalsIgnoreCase( "HEAD" )) - { - executeHeadObject(request, response); - } - else if (method.equalsIgnoreCase( "POST" )) - { - if ( queryString != null && queryString.length() > 0 ) - { - if (queryString.contains("uploads")) executeInitiateMultipartUpload(request, response); - else if (queryString.contains("uploadId")) executeCompleteMultipartUpload(request, response); - } - else if ( request.getAttribute(S3Constants.PLAIN_POST_ACCESS_KEY) !=null ) - executePlainPostObject (request, response); - // TODO - Having implemented the request, now provide an informative HTML page response - else - executePostObject(request, response); - } - else throw new IllegalArgumentException( "Unsupported method in REST request"); - } + } - - private void executeCopyObject(HttpServletRequest request, HttpServletResponse response, String copy) - throws IOException, XMLStreamException - { + @Override + public void execute(HttpServletRequest request, HttpServletResponse response) + throws IOException, XMLStreamException + { + String method = request.getMethod(); + String queryString = request.getQueryString(); + String copy = null; + + response.addHeader( "x-amz-request-id", UUID.randomUUID().toString()); + + if ( method.equalsIgnoreCase( "GET" )) + { + if ( queryString != null && queryString.length() > 0 ) + { + if (queryString.contains("acl")) executeGetObjectAcl(request, response); + else if (queryString.contains("uploadId")) executeListUploadParts(request, response); + else executeGetObject(request, response); + } + else executeGetObject(request, response); + } + else if (method.equalsIgnoreCase( "PUT" )) + { + if ( queryString != null && queryString.length() > 0 ) + { + if (queryString.contains("acl")) executePutObjectAcl(request, response); + else if (queryString.contains("partNumber")) executeUploadPart(request, response); + else executePutObject(request, response); + } + else if ( null != (copy = request.getHeader( "x-amz-copy-source" ))) + { + executeCopyObject(request, response, copy.trim()); + } + else executePutObject(request, response); + } + else if (method.equalsIgnoreCase( "DELETE" )) + { + if ( queryString != null && queryString.length() > 0 ) + { + if (queryString.contains("uploadId")) executeAbortMultipartUpload(request, response); + else executeDeleteObject(request, response); + } + else executeDeleteObject(request, response); + } + else if (method.equalsIgnoreCase( "HEAD" )) + { + executeHeadObject(request, response); + } + else if (method.equalsIgnoreCase( "POST" )) + { + if ( queryString != null && queryString.length() > 0 ) + { + if (queryString.contains("uploads")) executeInitiateMultipartUpload(request, response); + else if (queryString.contains("uploadId")) executeCompleteMultipartUpload(request, response); + } + else if ( request.getAttribute(S3Constants.PLAIN_POST_ACCESS_KEY) !=null ) + executePlainPostObject (request, response); + // TODO - Having implemented the request, now provide an informative HTML page response + else + executePostObject(request, response); + } + else throw new IllegalArgumentException( "Unsupported method in REST request"); + } + + + private void executeCopyObject(HttpServletRequest request, HttpServletResponse response, String copy) + throws IOException, XMLStreamException + { S3CopyObjectRequest engineRequest = new S3CopyObjectRequest(); String versionId = null; - - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - String sourceBucketName = null; - String sourceKey = null; - // [A] Parse the x-amz-copy-source header into usable pieces - // Check to find a ?versionId= value if any - int index = copy.indexOf( '?' ); - if (-1 != index) - { - versionId = copy.substring( index+1 ); - if (versionId.startsWith( "versionId=" )) engineRequest.setVersion( versionId.substring( 10 )); - copy = copy.substring( 0, index ); - } - - // The value of copy should look like: "bucket-name/object-name" - index = copy.indexOf( '/' ); - - // In case it looks like "/bucket-name/object-name" discard a leading '/' if it exists - if ( 0 == index ) - { - copy = copy.substring(1); - index = copy.indexOf( '/' ); - } - - if ( -1 == index ) - throw new IllegalArgumentException( "Invalid x-amz-copy-source header value [" + copy + "]" ); - - sourceBucketName = copy.substring( 0, index ); - sourceKey = copy.substring( index+1 ); - - - // [B] Set the object used in the SOAP request so it can do the bulk of the work for us + String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + String sourceBucketName = null; + String sourceKey = null; + + // [A] Parse the x-amz-copy-source header into usable pieces + // Check to find a ?versionId= value if any + int index = copy.indexOf( '?' ); + if (-1 != index) + { + versionId = copy.substring( index+1 ); + if (versionId.startsWith( "versionId=" )) engineRequest.setVersion( versionId.substring( 10 )); + copy = copy.substring( 0, index ); + } + + // The value of copy should look like: "bucket-name/object-name" + index = copy.indexOf( '/' ); + + // In case it looks like "/bucket-name/object-name" discard a leading '/' if it exists + if ( 0 == index ) + { + copy = copy.substring(1); + index = copy.indexOf( '/' ); + } + + if ( -1 == index ) + throw new IllegalArgumentException( "Invalid x-amz-copy-source header value [" + copy + "]" ); + + sourceBucketName = copy.substring( 0, index ); + sourceKey = copy.substring( index+1 ); + + + // [B] Set the object used in the SOAP request so it can do the bulk of the work for us engineRequest.setSourceBucketName( sourceBucketName ); engineRequest.setSourceKey( sourceKey ); engineRequest.setDestinationBucketName( bucketName ); engineRequest.setDestinationKey( key ); - + engineRequest.setDataDirective( request.getHeader( "x-amz-metadata-directive" )); - engineRequest.setMetaEntries( extractMetaData( request )); - engineRequest.setCannedAccess( request.getHeader( "x-amz-acl" )); - engineRequest.setConditions( conditionalRequest( request, true )); - - - // [C] Do the actual work and return the result - S3CopyObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); - + engineRequest.setMetaEntries( extractMetaData( request )); + engineRequest.setCannedAccess( request.getHeader( "x-amz-acl" )); + engineRequest.setConditions( conditionalRequest( request, true )); + + + // [C] Do the actual work and return the result + S3CopyObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); + versionId = engineResponse.getCopyVersion(); if (null != versionId) response.addHeader( "x-amz-copy-source-version-id", versionId ); versionId = engineResponse.getPutVersion(); if (null != versionId) response.addHeader( "x-amz-version-id", versionId ); - - // To allow the copy object result to be serialized via Axiom classes - CopyObjectResponse allBuckets = S3SerializableServiceImplementation.toCopyObjectResponse( engineResponse ); - - OutputStream outputStream = response.getOutputStream(); - response.setStatus(200); - response.setContentType("application/xml"); - // The content-type literally should be "application/xml; charset=UTF-8" - // but any compliant JVM supplies utf-8 by default; - - MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter ("CopyObjectResult", outputStream ); - resultWriter.startWrite(); - resultWriter.writeout(allBuckets); - resultWriter.stopWrite(); - } + // To allow the copy object result to be serialized via Axiom classes + CopyObjectResponse allBuckets = S3SerializableServiceImplementation.toCopyObjectResponse( engineResponse ); - private void executeGetObjectAcl(HttpServletRequest request, HttpServletResponse response) throws IOException, XMLStreamException - { - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + OutputStream outputStream = response.getOutputStream(); + response.setStatus(200); + response.setContentType("application/xml"); + // The content-type literally should be "application/xml; charset=UTF-8" + // but any compliant JVM supplies utf-8 by default; - S3GetObjectAccessControlPolicyRequest engineRequest = new S3GetObjectAccessControlPolicyRequest(); - engineRequest.setBucketName( bucketName ); - engineRequest.setKey( key ); - - // -> is this a request for a specific version of the object? look for "versionId=" in the query string - String queryString = request.getQueryString(); - if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); + MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter ("CopyObjectResult", outputStream ); + resultWriter.startWrite(); + resultWriter.writeout(allBuckets); + resultWriter.stopWrite(); - S3AccessControlPolicy engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - int resultCode = engineResponse.getResultCode(); - if (200 != resultCode) { - response.setStatus( resultCode ); - return; - } - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - - - // To allow the get object acl policy result to be serialized via Axiom classes - GetObjectAccessControlPolicyResponse onePolicy = S3SerializableServiceImplementation.toGetObjectAccessControlPolicyResponse( engineResponse ); - - OutputStream outputStream = response.getOutputStream(); - response.setStatus(200); - response.setContentType("application/xml"); - // The content-type literally should be "application/xml; charset=UTF-8" - // but any compliant JVM supplies utf-8 by default; - - MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter ("GetObjectAccessControlPolicyResult", outputStream ); - resultWriter.startWrite(); - resultWriter.writeout(onePolicy); - resultWriter.stopWrite(); - } - - private void executePutObjectAcl(HttpServletRequest request, HttpServletResponse response) throws IOException - { - // [A] Determine that there is an applicable bucket which might have an ACL set - - String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - - SBucketVO bucket = bucketDao.getByName( bucketName ); - String owner = null; - if ( null != bucket ) - owner = bucket.getOwnerCanonicalId(); - if (null == owner) - { - logger.error( "ACL update failed since " + bucketName + " does not exist" ); - throw new IOException("ACL update failed"); - } - if (null == key) - { - logger.error( "ACL update failed since " + bucketName + " does not contain the expected key" ); - throw new IOException("ACL update failed"); - } - - // [B] Obtain the grant request which applies to the acl request string. This latter is supplied as the value of the x-amz-acl header. - - S3SetObjectAccessControlPolicyRequest engineRequest = new S3SetObjectAccessControlPolicyRequest(); - S3Grant grantRequest = new S3Grant(); - S3AccessControlList aclRequest = new S3AccessControlList(); - - String aclRequestString = request.getHeader("x-amz-acl"); - OrderedPair accessControlsForObjectOwner = SAclVO.getCannedAccessControls(aclRequestString,"SObject"); - grantRequest.setPermission(accessControlsForObjectOwner.getFirst()); - grantRequest.setGrantee(accessControlsForObjectOwner.getSecond()); - grantRequest.setCanonicalUserID(owner); - aclRequest.addGrant(grantRequest); - engineRequest.setAcl(aclRequest); - engineRequest.setBucketName(bucketName); - engineRequest.setKey(key); - - - // [C] Allow an S3Engine to handle the S3SetObjectAccessControlPolicyRequest - S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - response.setStatus( engineResponse.getResultCode()); - - } + } - private void executeGetObject(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - - - S3GetObjectRequest engineRequest = new S3GetObjectRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setInlineData(true); - engineRequest.setReturnData(true); - //engineRequest.setReturnMetadata(true); - engineRequest = setRequestByteRange( request, engineRequest ); - - // -> is this a request for a specific version of the object? look for "versionId=" in the query string - String queryString = request.getQueryString(); - if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); - - S3GetObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); - response.setStatus( engineResponse.getResultCode()); - - if (engineResponse.getResultCode() >=400 ) { - return; - } - String deleteMarker = engineResponse.getDeleteMarker(); - if ( null != deleteMarker ) { - response.addHeader( "x-amz-delete-marker", "true" ); - response.addHeader( "x-amz-version-id", deleteMarker ); - } - else { - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - } - - // -> was the get conditional? - if (!conditionPassed( request, response, engineResponse.getLastModified().getTime(), engineResponse.getETag())) - return; - - - // -> is there data to return - // -> from the Amazon REST documentation it appears that Meta data is only returned as part of a HEAD request - //returnMetaData( engineResponse, response ); - - DataHandler dataHandler = engineResponse.getData(); - if (dataHandler != null) { - response.addHeader("ETag", "\"" + engineResponse.getETag() + "\""); - response.addHeader("Last-Modified", DateHelper.getDateDisplayString( - DateHelper.GMT_TIMEZONE, engineResponse.getLastModified().getTime(), "E, d MMM yyyy HH:mm:ss z")); - - response.setContentLength((int)engineResponse.getContentLength()); - S3RestServlet.writeResponse(response, dataHandler.getInputStream()); - } - } - - private void executePutObject(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String continueHeader = request.getHeader( "Expect" ); - if (continueHeader != null && continueHeader.equalsIgnoreCase("100-continue")) { - S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n"); - } - - long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0); - - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setContentLength(contentLength); - engineRequest.setMetaEntries( extractMetaData( request )); - engineRequest.setCannedAccess( request.getHeader( "x-amz-acl" )); - - DataHandler dataHandler = new DataHandler(new ServletRequestDataSource(request)); - engineRequest.setData(dataHandler); - - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - } - - /** - * Once versioining is turned on then to delete an object requires specifying a version - * parameter. A deletion marker is set once versioning is turned on in a bucket. - */ - private void executeDeleteObject(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - - S3DeleteObjectRequest engineRequest = new S3DeleteObjectRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - - // -> is this a request for a specific version of the object? look for "versionId=" in the query string - String queryString = request.getQueryString(); - if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); - - S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); - - response.setStatus( engineResponse.getResultCode()); - String version = engineRequest.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - } - - /* - * The purpose of a plain POST operation is to add an object to a specified bucket using HTML forms. - * The capability is for developer and tester convenience providing a simple browser-based upload - * feature as an alternative to using PUTs. - * In the case of PUTs the upload information is passed through HTTP headers. However in the case of a - * POST this information must be supplied as form fields. Many of these are mandatory or otherwise - * the POST request will be rejected. - * The requester using the HTML page must submit valid credentials sufficient for checking that - * the bucket to which the object is to be added has WRITE permission for that user. The AWS access - * key field on the form is taken to be synonymous with the user canonical ID for this purpose. - */ - private void executePlainPostObject(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String continueHeader = request.getHeader( "Expect" ); - if (continueHeader != null && continueHeader.equalsIgnoreCase("100-continue")) { - S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n"); - } - - long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0); - - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - String accessKey = (String) request.getAttribute(S3Constants.PLAIN_POST_ACCESS_KEY); - String signature = (String) request.getAttribute(S3Constants.PLAIN_POST_SIGNATURE); - S3Grant grant = new S3Grant(); - grant.setCanonicalUserID(accessKey); - grant.setGrantee(SAcl.GRANTEE_USER); - grant.setPermission(SAcl.PERMISSION_FULL); - S3AccessControlList acl = new S3AccessControlList(); - acl.addGrant(grant); - S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setAcl(acl); - engineRequest.setContentLength(contentLength); - engineRequest.setMetaEntries( extractMetaData( request )); - engineRequest.setCannedAccess( request.getHeader( "x-amz-acl" )); - - DataHandler dataHandler = new DataHandler(new ServletRequestDataSource(request)); - engineRequest.setData(dataHandler); - - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); - response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - } - - - private void executeHeadObject(HttpServletRequest request, HttpServletResponse response) throws IOException - { - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - - S3GetObjectRequest engineRequest = new S3GetObjectRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setInlineData(true); // -> need to set so we get ETag etc returned - engineRequest.setReturnData(true); - engineRequest.setReturnMetadata(true); - engineRequest = setRequestByteRange( request, engineRequest ); - - // -> is this a request for a specific version of the object? look for "versionId=" in the query string - String queryString = request.getQueryString(); - if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); - - S3GetObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); - response.setStatus( engineResponse.getResultCode()); - - //bucket lookup for non-existance key - - if ( engineResponse.getResultCode() == 404 ) - return; - - String deleteMarker = engineResponse.getDeleteMarker(); - if ( null != deleteMarker ) { - response.addHeader( "x-amz-delete-marker", "true" ); - response.addHeader( "x-amz-version-id", deleteMarker ); - } - else { - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - } - - // -> was the head request conditional? - if (!conditionPassed( request, response, engineResponse.getLastModified().getTime(), engineResponse.getETag())) - return; - - - // -> for a head request we return everything except the data - returnMetaData( engineResponse, response ); - - DataHandler dataHandler = engineResponse.getData(); - if (dataHandler != null) { - response.addHeader("ETag", "\"" + engineResponse.getETag() + "\""); - response.addHeader("Last-Modified", DateHelper.getDateDisplayString( - DateHelper.GMT_TIMEZONE, engineResponse.getLastModified().getTime(), "E, d MMM yyyy HH:mm:ss z")); - - response.setContentLength((int)engineResponse.getContentLength()); - } - } - - // There is a problem with POST since the 'Signature' and 'AccessKey' parameters are not - // determined until we hit this function (i.e., they are encoded in the body of the message - // they are not HTTP request headers). All the values we used to get in the request headers - // are not encoded in the request body. - // - // add ETag header computed as Base64 MD5 whenever object is uploaded or updated - // - private void executePostObject( HttpServletRequest request, HttpServletResponse response ) throws IOException - { - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String contentType = request.getHeader( "Content-Type" ); - int boundaryIndex = contentType.indexOf( "boundary=" ); - String boundary = "--" + (contentType.substring( boundaryIndex + 9 )); - String lastBoundary = boundary + "--"; - - InputStreamReader isr = new InputStreamReader( request.getInputStream()); - BufferedReader br = new BufferedReader( isr ); - - StringBuffer temp = new StringBuffer(); - String oneLine = null; - String name = null; - String value = null; - String metaName = null; // -> after stripped off the x-amz-meta- - boolean isMetaTag = false; - int countMeta = 0; - int state = 0; - - // [A] First parse all the parts out of the POST request and message body - // -> bucket name is still encoded in a Host header - S3AuthParams params = new S3AuthParams(); - List metaSet = new ArrayList(); - S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); - engineRequest.setBucketName( bucket ); - - // -> the last body part contains the content that is used to write the S3 object, all - // other body parts are header values - while( null != (oneLine = br.readLine())) - { - if ( oneLine.startsWith( lastBoundary )) - { - // -> this is the data of the object to put - if (0 < temp.length()) - { - value = temp.toString(); - temp.setLength( 0 ); - - engineRequest.setContentLength( value.length()); - engineRequest.setDataAsString( value ); - } - break; - } - else if ( oneLine.startsWith( boundary )) - { - // -> this is the header data - if (0 < temp.length()) - { - value = temp.toString().trim(); - temp.setLength( 0 ); - //System.out.println( "param: " + name + " = " + value ); - - if (name.equalsIgnoreCase( "key" )) { - engineRequest.setKey( value ); - } - else if (name.equalsIgnoreCase( "x-amz-acl" )) { - engineRequest.setCannedAccess( value ); - } - else if (isMetaTag) { - S3MetaDataEntry oneMeta = new S3MetaDataEntry(); - oneMeta.setName( metaName ); - oneMeta.setValue( value ); - metaSet.add( oneMeta ); - countMeta++; - metaName = null; - } - - // -> build up the headers so we can do authentication on this POST - HeaderParam oneHeader = new HeaderParam(); - oneHeader.setName( name ); - oneHeader.setValue( value ); - params.addHeader( oneHeader ); - } - state = 1; - } - else if (1 == state && 0 == oneLine.length()) - { - // -> data of a body part starts here - state = 2; - } - else if (1 == state) - { - // -> the name of the 'name-value' pair is encoded in the Content-Disposition header - if (oneLine.startsWith( "Content-Disposition: form-data;")) - { - isMetaTag = false; - int nameOffset = oneLine.indexOf( "name=" ); - if (-1 != nameOffset) - { - name = oneLine.substring( nameOffset+5 ); - if (name.startsWith( "\"" )) name = name.substring( 1 ); - if (name.endsWith( "\"" )) name = name.substring( 0, name.length()-1 ); - name = name.trim(); - - if (name.startsWith( "x-amz-meta-" )) { - metaName = name.substring( 11 ); - isMetaTag = true; - } - } - } - } - else if (2 == state) - { - // -> the body parts data may take up multiple lines - //System.out.println( oneLine.length() + " body data: " + oneLine ); - temp.append( oneLine ); - } -// else System.out.println( oneLine.length() + " preamble: " + oneLine ); - } - - - // [B] Authenticate the POST request after we have all the headers - try { - S3RestServlet.authenticateRequest( request, params ); - } - catch( Exception e ) { - throw new IOException( e.toString()); - } - - // [C] Perform the request - if (0 < countMeta) engineRequest.setMetaEntries( metaSet.toArray(new S3MetaDataEntry[0])); - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); - response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); - String version = engineResponse.getVersion(); - if (null != version) response.addHeader( "x-amz-version-id", version ); - } - - /** - * Save all the information about the multipart upload request in the database so once it is finished - * (in the future) we can create the real S3 object. - * - * @throws IOException - */ - private void executeInitiateMultipartUpload( HttpServletRequest request, HttpServletResponse response ) throws IOException + private void executeGetObjectAcl(HttpServletRequest request, HttpServletResponse response) throws IOException, XMLStreamException { - // This request is via a POST which typically has its auth parameters inside the message - try { - S3RestServlet.authenticateRequest( request, S3RestServlet.extractRequestHeaders( request )); - } - catch( Exception e ) { - throw new IOException( e.toString()); - } + String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + + S3GetObjectAccessControlPolicyRequest engineRequest = new S3GetObjectAccessControlPolicyRequest(); + engineRequest.setBucketName( bucketName ); + engineRequest.setKey( key ); + + // -> is this a request for a specific version of the object? look for "versionId=" in the query string + String queryString = request.getQueryString(); + if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); + + S3AccessControlPolicy engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); + int resultCode = engineResponse.getResultCode(); + if (200 != resultCode) { + response.setStatus( resultCode ); + return; + } + String version = engineResponse.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + + + // To allow the get object acl policy result to be serialized via Axiom classes + GetObjectAccessControlPolicyResponse onePolicy = S3SerializableServiceImplementation.toGetObjectAccessControlPolicyResponse( engineResponse ); + + OutputStream outputStream = response.getOutputStream(); + response.setStatus(200); + response.setContentType("application/xml"); + // The content-type literally should be "application/xml; charset=UTF-8" + // but any compliant JVM supplies utf-8 by default; + + MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter ("GetObjectAccessControlPolicyResult", outputStream ); + resultWriter.startWrite(); + resultWriter.writeout(onePolicy); + resultWriter.stopWrite(); + } + + private void executePutObjectAcl(HttpServletRequest request, HttpServletResponse response) throws IOException + { + // [A] Determine that there is an applicable bucket which might have an ACL set + + String bucketName = (String)request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String)request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + + SBucketVO bucket = bucketDao.getByName( bucketName ); + String owner = null; + if ( null != bucket ) + owner = bucket.getOwnerCanonicalId(); + if (null == owner) + { + logger.error( "ACL update failed since " + bucketName + " does not exist" ); + throw new IOException("ACL update failed"); + } + if (null == key) + { + logger.error( "ACL update failed since " + bucketName + " does not contain the expected key" ); + throw new IOException("ACL update failed"); + } + + // [B] Obtain the grant request which applies to the acl request string. This latter is supplied as the value of the x-amz-acl header. + + S3SetObjectAccessControlPolicyRequest engineRequest = new S3SetObjectAccessControlPolicyRequest(); + S3Grant grantRequest = new S3Grant(); + S3AccessControlList aclRequest = new S3AccessControlList(); + + String aclRequestString = request.getHeader("x-amz-acl"); + OrderedPair accessControlsForObjectOwner = SAclVO.getCannedAccessControls(aclRequestString,"SObject"); + grantRequest.setPermission(accessControlsForObjectOwner.getFirst()); + grantRequest.setGrantee(accessControlsForObjectOwner.getSecond()); + grantRequest.setCanonicalUserID(owner); + aclRequest.addGrant(grantRequest); + engineRequest.setAcl(aclRequest); + engineRequest.setBucketName(bucketName); + engineRequest.setKey(key); + + + // [C] Allow an S3Engine to handle the S3SetObjectAccessControlPolicyRequest + S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); + response.setStatus( engineResponse.getResultCode()); + + } + + private void executeGetObject(HttpServletRequest request, HttpServletResponse response) throws IOException + { + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + + + S3GetObjectRequest engineRequest = new S3GetObjectRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setInlineData(true); + engineRequest.setReturnData(true); + //engineRequest.setReturnMetadata(true); + engineRequest = setRequestByteRange( request, engineRequest ); + + // -> is this a request for a specific version of the object? look for "versionId=" in the query string + String queryString = request.getQueryString(); + if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); + + S3GetObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); + response.setStatus( engineResponse.getResultCode()); + + if (engineResponse.getResultCode() >=400 ) { + return; + } + String deleteMarker = engineResponse.getDeleteMarker(); + if ( null != deleteMarker ) { + response.addHeader( "x-amz-delete-marker", "true" ); + response.addHeader( "x-amz-version-id", deleteMarker ); + } + else { + String version = engineResponse.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + } + + // -> was the get conditional? + if (!conditionPassed( request, response, engineResponse.getLastModified().getTime(), engineResponse.getETag())) + return; + + + // -> is there data to return + // -> from the Amazon REST documentation it appears that Meta data is only returned as part of a HEAD request + //returnMetaData( engineResponse, response ); + + DataHandler dataHandler = engineResponse.getData(); + if (dataHandler != null) { + response.addHeader("ETag", "\"" + engineResponse.getETag() + "\""); + response.addHeader("Last-Modified", DateHelper.getDateDisplayString( + DateHelper.GMT_TIMEZONE, engineResponse.getLastModified().getTime(), "E, d MMM yyyy HH:mm:ss z")); + + response.setContentLength((int)engineResponse.getContentLength()); + S3RestServlet.writeResponse(response, dataHandler.getInputStream()); + } + } + + private void executePutObject(HttpServletRequest request, HttpServletResponse response) throws IOException + { + String continueHeader = request.getHeader( "Expect" ); + if (continueHeader != null && continueHeader.equalsIgnoreCase("100-continue")) { + S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n"); + } + + long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0); + + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setContentLength(contentLength); + engineRequest.setMetaEntries( extractMetaData( request )); + engineRequest.setCannedAccess( request.getHeader( "x-amz-acl" )); + + DataHandler dataHandler = new DataHandler(new ServletRequestDataSource(request)); + engineRequest.setData(dataHandler); + + S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); + response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); + String version = engineResponse.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + } + + /** + * Once versioining is turned on then to delete an object requires specifying a version + * parameter. A deletion marker is set once versioning is turned on in a bucket. + */ + private void executeDeleteObject(HttpServletRequest request, HttpServletResponse response) throws IOException + { + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + + S3DeleteObjectRequest engineRequest = new S3DeleteObjectRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + + // -> is this a request for a specific version of the object? look for "versionId=" in the query string + String queryString = request.getQueryString(); + if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); + + S3Response engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); + + response.setStatus( engineResponse.getResultCode()); + String version = engineRequest.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + } + + /* + * The purpose of a plain POST operation is to add an object to a specified bucket using HTML forms. + * The capability is for developer and tester convenience providing a simple browser-based upload + * feature as an alternative to using PUTs. + * In the case of PUTs the upload information is passed through HTTP headers. However in the case of a + * POST this information must be supplied as form fields. Many of these are mandatory or otherwise + * the POST request will be rejected. + * The requester using the HTML page must submit valid credentials sufficient for checking that + * the bucket to which the object is to be added has WRITE permission for that user. The AWS access + * key field on the form is taken to be synonymous with the user canonical ID for this purpose. + */ + private void executePlainPostObject(HttpServletRequest request, HttpServletResponse response) throws IOException + { + String continueHeader = request.getHeader( "Expect" ); + if (continueHeader != null && continueHeader.equalsIgnoreCase("100-continue")) { + S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n"); + } + + long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0); + + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + String accessKey = (String) request.getAttribute(S3Constants.PLAIN_POST_ACCESS_KEY); + String signature = (String) request.getAttribute(S3Constants.PLAIN_POST_SIGNATURE); + S3Grant grant = new S3Grant(); + grant.setCanonicalUserID(accessKey); + grant.setGrantee(SAcl.GRANTEE_USER); + grant.setPermission(SAcl.PERMISSION_FULL); + S3AccessControlList acl = new S3AccessControlList(); + acl.addGrant(grant); + S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setAcl(acl); + engineRequest.setContentLength(contentLength); + engineRequest.setMetaEntries( extractMetaData( request )); + engineRequest.setCannedAccess( request.getHeader( "x-amz-acl" )); + + DataHandler dataHandler = new DataHandler(new ServletRequestDataSource(request)); + engineRequest.setData(dataHandler); + + S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest); + response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); + String version = engineResponse.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + } + + + private void executeHeadObject(HttpServletRequest request, HttpServletResponse response) throws IOException + { + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + + S3GetObjectRequest engineRequest = new S3GetObjectRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setInlineData(true); // -> need to set so we get ETag etc returned + engineRequest.setReturnData(true); + engineRequest.setReturnMetadata(true); + engineRequest = setRequestByteRange( request, engineRequest ); + + // -> is this a request for a specific version of the object? look for "versionId=" in the query string + String queryString = request.getQueryString(); + if (null != queryString) engineRequest.setVersion( returnParameter( queryString, "versionId=" )); + + S3GetObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); + response.setStatus( engineResponse.getResultCode()); + + //bucket lookup for non-existance key + + if ( engineResponse.getResultCode() == 404 ) + return; + + String deleteMarker = engineResponse.getDeleteMarker(); + if ( null != deleteMarker ) { + response.addHeader( "x-amz-delete-marker", "true" ); + response.addHeader( "x-amz-version-id", deleteMarker ); + } + else { + String version = engineResponse.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + } + + // -> was the head request conditional? + if (!conditionPassed( request, response, engineResponse.getLastModified().getTime(), engineResponse.getETag())) + return; + + + // -> for a head request we return everything except the data + returnMetaData( engineResponse, response ); + + DataHandler dataHandler = engineResponse.getData(); + if (dataHandler != null) { + response.addHeader("ETag", "\"" + engineResponse.getETag() + "\""); + response.addHeader("Last-Modified", DateHelper.getDateDisplayString( + DateHelper.GMT_TIMEZONE, engineResponse.getLastModified().getTime(), "E, d MMM yyyy HH:mm:ss z")); + + response.setContentLength((int)engineResponse.getContentLength()); + } + } + + // There is a problem with POST since the 'Signature' and 'AccessKey' parameters are not + // determined until we hit this function (i.e., they are encoded in the body of the message + // they are not HTTP request headers). All the values we used to get in the request headers + // are not encoded in the request body. + // + // add ETag header computed as Base64 MD5 whenever object is uploaded or updated + // + private void executePostObject( HttpServletRequest request, HttpServletResponse response ) throws IOException + { + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String contentType = request.getHeader( "Content-Type" ); + int boundaryIndex = contentType.indexOf( "boundary=" ); + String boundary = "--" + (contentType.substring( boundaryIndex + 9 )); + String lastBoundary = boundary + "--"; + + InputStreamReader isr = new InputStreamReader( request.getInputStream()); + BufferedReader br = new BufferedReader( isr ); + + StringBuffer temp = new StringBuffer(); + String oneLine = null; + String name = null; + String value = null; + String metaName = null; // -> after stripped off the x-amz-meta- + boolean isMetaTag = false; + int countMeta = 0; + int state = 0; + + // [A] First parse all the parts out of the POST request and message body + // -> bucket name is still encoded in a Host header + S3AuthParams params = new S3AuthParams(); + List metaSet = new ArrayList(); + S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); + engineRequest.setBucketName( bucket ); + + // -> the last body part contains the content that is used to write the S3 object, all + // other body parts are header values + while( null != (oneLine = br.readLine())) + { + if ( oneLine.startsWith( lastBoundary )) + { + // -> this is the data of the object to put + if (0 < temp.length()) + { + value = temp.toString(); + temp.setLength( 0 ); + + engineRequest.setContentLength( value.length()); + engineRequest.setDataAsString( value ); + } + break; + } + else if ( oneLine.startsWith( boundary )) + { + // -> this is the header data + if (0 < temp.length()) + { + value = temp.toString().trim(); + temp.setLength( 0 ); + //System.out.println( "param: " + name + " = " + value ); + + if (name.equalsIgnoreCase( "key" )) { + engineRequest.setKey( value ); + } + else if (name.equalsIgnoreCase( "x-amz-acl" )) { + engineRequest.setCannedAccess( value ); + } + else if (isMetaTag) { + S3MetaDataEntry oneMeta = new S3MetaDataEntry(); + oneMeta.setName( metaName ); + oneMeta.setValue( value ); + metaSet.add( oneMeta ); + countMeta++; + metaName = null; + } + + // -> build up the headers so we can do authentication on this POST + HeaderParam oneHeader = new HeaderParam(); + oneHeader.setName( name ); + oneHeader.setValue( value ); + params.addHeader( oneHeader ); + } + state = 1; + } + else if (1 == state && 0 == oneLine.length()) + { + // -> data of a body part starts here + state = 2; + } + else if (1 == state) + { + // -> the name of the 'name-value' pair is encoded in the Content-Disposition header + if (oneLine.startsWith( "Content-Disposition: form-data;")) + { + isMetaTag = false; + int nameOffset = oneLine.indexOf( "name=" ); + if (-1 != nameOffset) + { + name = oneLine.substring( nameOffset+5 ); + if (name.startsWith( "\"" )) name = name.substring( 1 ); + if (name.endsWith( "\"" )) name = name.substring( 0, name.length()-1 ); + name = name.trim(); + + if (name.startsWith( "x-amz-meta-" )) { + metaName = name.substring( 11 ); + isMetaTag = true; + } + } + } + } + else if (2 == state) + { + // -> the body parts data may take up multiple lines + //System.out.println( oneLine.length() + " body data: " + oneLine ); + temp.append( oneLine ); + } +// else System.out.println( oneLine.length() + " preamble: " + oneLine ); + } + + + // [B] Authenticate the POST request after we have all the headers + try { + S3RestServlet.authenticateRequest( request, params ); + } + catch( Exception e ) { + throw new IOException( e.toString()); + } + + // [C] Perform the request + if (0 < countMeta) engineRequest.setMetaEntries( metaSet.toArray(new S3MetaDataEntry[0])); + S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest( engineRequest ); + response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); + String version = engineResponse.getVersion(); + if (null != version) response.addHeader( "x-amz-version-id", version ); + } + + /** + * Save all the information about the multipart upload request in the database so once it is finished + * (in the future) we can create the real S3 object. + * + * @throws IOException + */ + private void executeInitiateMultipartUpload( HttpServletRequest request, HttpServletResponse response ) throws IOException + { + // This request is via a POST which typically has its auth parameters inside the message + try { + S3RestServlet.authenticateRequest( request, S3RestServlet.extractRequestHeaders( request )); + } + catch( Exception e ) { + throw new IOException( e.toString()); + } + + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + String cannedAccess = request.getHeader( "x-amz-acl" ); + S3MetaDataEntry[] meta = extractMetaData( request ); - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - String cannedAccess = request.getHeader( "x-amz-acl" ); - S3MetaDataEntry[] meta = extractMetaData( request ); - // -> the S3 engine has easy access to all the privileged checking code - S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setCannedAccess( cannedAccess ); - engineRequest.setMetaEntries( meta ); - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().initiateMultipartUpload( engineRequest ); - int result = engineResponse.getResultCode(); - response.setStatus( result ); + S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setCannedAccess( cannedAccess ); + engineRequest.setMetaEntries( meta ); + S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().initiateMultipartUpload( engineRequest ); + int result = engineResponse.getResultCode(); + response.setStatus( result ); if (200 != result) return; - + // -> there is no SOAP version of this function - StringBuffer xml = new StringBuffer(); + StringBuffer xml = new StringBuffer(); xml.append( "" ); xml.append( "" ); xml.append( "" ).append( bucket ).append( "" ); xml.append( "" ).append( key ).append( "" ); xml.append( "" ).append( engineResponse.getUploadId()).append( "" ); xml.append( "" ); - - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - } - - private void executeUploadPart( HttpServletRequest request, HttpServletResponse response ) throws IOException - { - String continueHeader = request.getHeader( "Expect" ); - if (continueHeader != null && continueHeader.equalsIgnoreCase("100-continue")) { - S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n"); - } - - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - int partNumber = -1; - int uploadId = -1; - long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0); + response.setContentType("text/xml; charset=UTF-8"); + S3RestServlet.endResponse(response, xml.toString()); + } - String temp = request.getParameter("uploadId"); - if (null != temp) uploadId = Integer.parseInt( temp ); + private void executeUploadPart( HttpServletRequest request, HttpServletResponse response ) throws IOException + { + String continueHeader = request.getHeader( "Expect" ); + if (continueHeader != null && continueHeader.equalsIgnoreCase("100-continue")) { + S3RestServlet.writeResponse(response, "HTTP/1.1 100 Continue\r\n"); + } - temp = request.getParameter("partNumber"); - if (null != temp) partNumber = Integer.parseInt( temp ); - if (partNumber < 1 || partNumber > 10000) { - logger.error("uploadPart invalid part number " + partNumber ); - response.setStatus(416); + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + int partNumber = -1; + int uploadId = -1; + + long contentLength = Converter.toLong(request.getHeader("Content-Length"), 0); + + String temp = request.getParameter("uploadId"); + if (null != temp) uploadId = Integer.parseInt( temp ); + + temp = request.getParameter("partNumber"); + if (null != temp) partNumber = Integer.parseInt( temp ); + if (partNumber < 1 || partNumber > 10000) { + logger.error("uploadPart invalid part number " + partNumber ); + response.setStatus(416); return; - } - - // -> verification - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - if (null == uploadDao.multipartExits( uploadId )) { - response.setStatus(404); - return; - } - - // -> another requirement is that only the upload initiator can upload parts - String initiator = uploadDao.getInitiator( uploadId ); - if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) { - response.setStatus(403); - return; - } - } - catch( Exception e ) { - logger.error("executeUploadPart failed due to " + e.getMessage(), e); - response.setStatus(500); - return; - } + } - S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setContentLength(contentLength); - DataHandler dataHandler = new DataHandler(new ServletRequestDataSource(request)); - engineRequest.setData(dataHandler); + // -> verification + try { + MultipartLoadDao uploadDao = new MultipartLoadDao(); + if (null == uploadDao.multipartExits( uploadId )) { + response.setStatus(404); + return; + } - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().saveUploadPart( engineRequest, uploadId, partNumber ); - if (null != engineResponse.getETag()) response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); - response.setStatus(engineResponse.getResultCode()); - } - - /** - * This function is required to both parsing XML on the request and return XML as part of its result. - * - * @param request - * @param response - * @throws IOException - */ - private void executeCompleteMultipartUpload( HttpServletRequest request, HttpServletResponse response ) throws IOException - { - // [A] This request is via a POST which typically has its auth parameters inside the message - try { - S3RestServlet.authenticateRequest( request, S3RestServlet.extractRequestHeaders( request )); - } - catch( Exception e ) { - throw new IOException( e.toString()); - } + // -> another requirement is that only the upload initiator can upload parts + String initiator = uploadDao.getInitiator( uploadId ); + if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) { + response.setStatus(403); + return; + } + } + catch( Exception e ) { + logger.error("executeUploadPart failed due to " + e.getMessage(), e); + response.setStatus(500); + return; + } + + S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setContentLength(contentLength); + DataHandler dataHandler = new DataHandler(new ServletRequestDataSource(request)); + engineRequest.setData(dataHandler); + + S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().saveUploadPart( engineRequest, uploadId, partNumber ); + if (null != engineResponse.getETag()) response.setHeader("ETag", "\"" + engineResponse.getETag() + "\""); + response.setStatus(engineResponse.getResultCode()); + } + + /** + * This function is required to both parsing XML on the request and return XML as part of its result. + * + * @param request + * @param response + * @throws IOException + */ + private void executeCompleteMultipartUpload( HttpServletRequest request, HttpServletResponse response ) throws IOException + { + // [A] This request is via a POST which typically has its auth parameters inside the message + try { + S3RestServlet.authenticateRequest( request, S3RestServlet.extractRequestHeaders( request )); + } + catch( Exception e ) { + throw new IOException( e.toString()); + } + + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + S3MultipartPart[] parts = null; + S3MetaDataEntry[] meta = null; + String cannedAccess = null; + int uploadId = -1; - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - S3MultipartPart[] parts = null; - S3MetaDataEntry[] meta = null; - String cannedAccess = null; - int uploadId = -1; - // AWS S3 specifies that the keep alive connection is by sending whitespace characters until done - // Therefore the XML version prolog is prepended to the stream in advance + // Therefore the XML version prolog is prepended to the stream in advance OutputStream outputStream = response.getOutputStream(); outputStream.write("".getBytes()); - String temp = request.getParameter("uploadId"); - if (null != temp) uploadId = Integer.parseInt( temp ); - - - // [B] Look up all the uploaded body parts and related info - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - if (null == uploadDao.multipartExits( uploadId )) { - response.setStatus(404); - returnErrorXML( 404, "NotFound", outputStream ); - return; - } - - // -> another requirement is that only the upload initiator can upload parts - String initiator = uploadDao.getInitiator( uploadId ); - if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) { - response.setStatus(403); - returnErrorXML( 403, "Forbidden", outputStream ); - return; - } - - parts = uploadDao.getParts( uploadId, 10000, 0 ); - meta = uploadDao.getMeta( uploadId ); - cannedAccess = uploadDao.getCannedAccess( uploadId ); - } - catch( Exception e ) { - logger.error("executeCompleteMultipartUpload failed due to " + e.getMessage(), e); - response.setStatus(500); - returnErrorXML( 500, "InternalError", outputStream ); - return; - } - - - // [C] Parse the given XML body part and perform error checking - OrderedPair match = verifyParts( request.getInputStream(), parts ); - if (200 != match.getFirst().intValue()) { - response.setStatus(match.getFirst().intValue()); - returnErrorXML( match.getFirst().intValue(), match.getSecond(), outputStream ); - return; - } + String temp = request.getParameter("uploadId"); + if (null != temp) uploadId = Integer.parseInt( temp ); - - // [D] Ask the engine to create a newly re-constituted object - S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); - engineRequest.setBucketName(bucket); - engineRequest.setKey(key); - engineRequest.setMetaEntries(meta); - engineRequest.setCannedAccess(cannedAccess); - S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().concatentateMultipartUploads( response, engineRequest, parts, outputStream ); - int result = engineResponse.getResultCode(); - // -> free all multipart state since we now have one concatentated object - if (200 == result) ServiceProvider.getInstance().getS3Engine().freeUploadParts( bucket, uploadId, false ); - - // If all successful then clean up all left over parts - // Notice that "" has already been written into the servlet output stream at the beginning of section [A] - if ( 200 == result ) - { - StringBuffer xml = new StringBuffer(); - xml.append( "" ); - xml.append( "" ).append( "http://" + bucket + ".s3.amazonaws.com/" + key ).append( "" ); - xml.append( "" ).append( bucket ).append( "" ); - xml.append( "" ).append( key ).append( "" ); - xml.append( "\"" ).append( engineResponse.getETag()).append( "\"" ); - xml.append( "" ); - String xmlString = xml.toString().replaceAll("^\\s+", ""); // Remove leading whitespace characters - outputStream.write( xmlString.getBytes()); - outputStream.close(); - } - else returnErrorXML( result, null, outputStream ); - } - - private void executeAbortMultipartUpload( HttpServletRequest request, HttpServletResponse response ) throws IOException - { - String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - int uploadId = -1; + // [B] Look up all the uploaded body parts and related info + try { + MultipartLoadDao uploadDao = new MultipartLoadDao(); + if (null == uploadDao.multipartExits( uploadId )) { + response.setStatus(404); + returnErrorXML( 404, "NotFound", outputStream ); + return; + } - String temp = request.getParameter("uploadId"); - if (null != temp) uploadId = Integer.parseInt( temp ); - - int result = ServiceProvider.getInstance().getS3Engine().freeUploadParts( bucket, uploadId, true ); + // -> another requirement is that only the upload initiator can upload parts + String initiator = uploadDao.getInitiator( uploadId ); + if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) { + response.setStatus(403); + returnErrorXML( 403, "Forbidden", outputStream ); + return; + } + + parts = uploadDao.getParts( uploadId, 10000, 0 ); + meta = uploadDao.getMeta( uploadId ); + cannedAccess = uploadDao.getCannedAccess( uploadId ); + } + catch( Exception e ) { + logger.error("executeCompleteMultipartUpload failed due to " + e.getMessage(), e); + response.setStatus(500); + returnErrorXML( 500, "InternalError", outputStream ); + return; + } + + + // [C] Parse the given XML body part and perform error checking + OrderedPair match = verifyParts( request.getInputStream(), parts ); + if (200 != match.getFirst().intValue()) { + response.setStatus(match.getFirst().intValue()); + returnErrorXML( match.getFirst().intValue(), match.getSecond(), outputStream ); + return; + } + + + // [D] Ask the engine to create a newly re-constituted object + S3PutObjectInlineRequest engineRequest = new S3PutObjectInlineRequest(); + engineRequest.setBucketName(bucket); + engineRequest.setKey(key); + engineRequest.setMetaEntries(meta); + engineRequest.setCannedAccess(cannedAccess); + + S3PutObjectInlineResponse engineResponse = ServiceProvider.getInstance().getS3Engine().concatentateMultipartUploads( response, engineRequest, parts, outputStream ); + int result = engineResponse.getResultCode(); + // -> free all multipart state since we now have one concatentated object + if (200 == result) ServiceProvider.getInstance().getS3Engine().freeUploadParts( bucket, uploadId, false ); + + // If all successful then clean up all left over parts + // Notice that "" has already been written into the servlet output stream at the beginning of section [A] + if ( 200 == result ) + { + StringBuffer xml = new StringBuffer(); + xml.append( "" ); + xml.append( "" ).append( "http://" + bucket + ".s3.amazonaws.com/" + key ).append( "" ); + xml.append( "" ).append( bucket ).append( "" ); + xml.append( "" ).append( key ).append( "" ); + xml.append( "\"" ).append( engineResponse.getETag()).append( "\"" ); + xml.append( "" ); + String xmlString = xml.toString().replaceAll("^\\s+", ""); // Remove leading whitespace characters + outputStream.write( xmlString.getBytes()); + outputStream.close(); + } + else returnErrorXML( result, null, outputStream ); + } + + private void executeAbortMultipartUpload( HttpServletRequest request, HttpServletResponse response ) throws IOException + { + String bucket = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + int uploadId = -1; + + String temp = request.getParameter("uploadId"); + if (null != temp) uploadId = Integer.parseInt( temp ); + + int result = ServiceProvider.getInstance().getS3Engine().freeUploadParts( bucket, uploadId, true ); response.setStatus( result ); - } - - private void executeListUploadParts( HttpServletRequest request, HttpServletResponse response ) throws IOException - { - String bucketName = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); - String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); - String owner = null; - String initiator = null; - S3MultipartPart[] parts = null; - int remaining = 0; - int uploadId = -1; - int maxParts = 1000; - int partMarker = 0; - int nextMarker = 0; + } - String temp = request.getParameter("uploadId"); - if (null != temp) uploadId = Integer.parseInt( temp ); + private void executeListUploadParts( HttpServletRequest request, HttpServletResponse response ) throws IOException + { + String bucketName = (String) request.getAttribute(S3Constants.BUCKET_ATTR_KEY); + String key = (String) request.getAttribute(S3Constants.OBJECT_ATTR_KEY); + String owner = null; + String initiator = null; + S3MultipartPart[] parts = null; + int remaining = 0; + int uploadId = -1; + int maxParts = 1000; + int partMarker = 0; + int nextMarker = 0; - temp = request.getParameter("max-parts"); - if (null != temp) { - maxParts = Integer.parseInt( temp ); - if (maxParts > 1000 || maxParts < 0) maxParts = 1000; - } + String temp = request.getParameter("uploadId"); + if (null != temp) uploadId = Integer.parseInt( temp ); - temp = request.getParameter("part-number-marker"); - if (null != temp) partMarker = Integer.parseInt( temp ); + temp = request.getParameter("max-parts"); + if (null != temp) { + maxParts = Integer.parseInt( temp ); + if (maxParts > 1000 || maxParts < 0) maxParts = 1000; + } - - // -> does the bucket exist, we may need it to verify access permissions - SBucketVO bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "listUploadParts failed since " + bucketName + " does not exist" ); - response.setStatus(404); - return; - } - - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - OrderedPair exists = uploadDao.multipartExits( uploadId ); - if (null == exists) { - response.setStatus(404); - return; - } - owner = exists.getFirst(); - - // -> the multipart initiator or bucket owner can do this action - initiator = uploadDao.getInitiator( uploadId ); - if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) - { - try { - // -> write permission on a bucket allows a PutObject / DeleteObject action on any object in the bucket - S3PolicyContext context = new S3PolicyContext( PolicyActions.ListMultipartUploadParts, bucketName ); - context.setKeyName( exists.getSecond()); - S3Engine.verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); - } - catch (PermissionDeniedException e) { - response.setStatus(403); - return; - } - } - - parts = uploadDao.getParts( uploadId, maxParts, partMarker ); - remaining = uploadDao.numParts( uploadId, partMarker+maxParts ); - } - catch( Exception e ) { - logger.error("List Uploads failed due to " + e.getMessage(), e); - response.setStatus(500); - } + temp = request.getParameter("part-number-marker"); + if (null != temp) partMarker = Integer.parseInt( temp ); - - StringBuffer xml = new StringBuffer(); - xml.append( "" ); - xml.append( "" ); - xml.append( "" ).append( bucket ).append( "" ); - xml.append( "" ).append( key ).append( "" ); - xml.append( "" ).append( uploadId ).append( "" ); - - // -> currently we just have the access key and have no notion of a display name - xml.append( "" ); - xml.append( "" ).append( initiator ).append( "" ); - xml.append( "" ); - xml.append( "" ); - xml.append( "" ); - xml.append( "" ).append( owner ).append( "" ); - xml.append( "" ); - xml.append( "" ); - - StringBuffer partsList = new StringBuffer(); - for( int i=0; i < parts.length; i++ ) - { - S3MultipartPart onePart = parts[i]; - if (null == onePart) break; - - nextMarker = onePart.getPartNumber(); - partsList.append( "" ); - partsList.append( "" ).append( nextMarker ).append( "" ); - partsList.append( "" ).append( DatatypeConverter.printDateTime( onePart.getLastModified())).append( "" ); - partsList.append( "\"" ).append( onePart.getETag()).append( "\"" ); - partsList.append( "" ).append( onePart.getSize()).append( "" ); - partsList.append( "" ); - } - - xml.append( "STANDARD" ); - xml.append( "" ).append( partMarker ).append( "" ); - xml.append( "" ).append( nextMarker ).append( "" ); - xml.append( "" ).append( maxParts ).append( "" ); - xml.append( "" ).append((0 < remaining ? "true" : "false" )).append( "" ); - xml.append( partsList.toString()); - xml.append( "" ); - - response.setStatus(200); - response.setContentType("text/xml; charset=UTF-8"); - S3RestServlet.endResponse(response, xml.toString()); - } - - /** - * Support the "Range: bytes=0-399" header with just one byte range. - * @param request - * @param engineRequest - * @return - */ - private S3GetObjectRequest setRequestByteRange( HttpServletRequest request, S3GetObjectRequest engineRequest ) - { - String temp = request.getHeader( "Range" ); - if (null == temp) return engineRequest; - - int offset = temp.indexOf( "=" ); - if (-1 != offset) - { - String range = temp.substring( offset+1 ); - - String[] parts = range.split( "-" ); - if (2 >= parts.length) { - // -> the end byte is inclusive - engineRequest.setByteRangeStart( Long.parseLong(parts[0])); - engineRequest.setByteRangeEnd( Long.parseLong(parts[1])+1); - } - } - return engineRequest; - } - - private S3ConditionalHeaders conditionalRequest( HttpServletRequest request, boolean isCopy ) - { - S3ConditionalHeaders headers = new S3ConditionalHeaders(); - - if (isCopy) { - headers.setModifiedSince( request.getHeader( "x-amz-copy-source-if-modified-since" )); - headers.setUnModifiedSince( request.getHeader( "x-amz-copy-source-if-unmodified-since" )); - headers.setMatch( request.getHeader( "x-amz-copy-source-if-match" )); - headers.setNoneMatch( request.getHeader( "x-amz-copy-source-if-none-match" )); - } - else { - headers.setModifiedSince( request.getHeader( "If-Modified-Since" )); - headers.setUnModifiedSince( request.getHeader( "If-Unmodified-Since" )); - headers.setMatch( request.getHeader( "If-Match" )); - headers.setNoneMatch( request.getHeader( "If-None-Match" )); - } + // -> does the bucket exist, we may need it to verify access permissions + SBucketVO bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error( "listUploadParts failed since " + bucketName + " does not exist" ); + response.setStatus(404); + return; + } + + try { + MultipartLoadDao uploadDao = new MultipartLoadDao(); + OrderedPair exists = uploadDao.multipartExits( uploadId ); + if (null == exists) { + response.setStatus(404); + return; + } + owner = exists.getFirst(); + + // -> the multipart initiator or bucket owner can do this action + initiator = uploadDao.getInitiator( uploadId ); + if (null == initiator || !initiator.equals( UserContext.current().getAccessKey())) + { + try { + // -> write permission on a bucket allows a PutObject / DeleteObject action on any object in the bucket + S3PolicyContext context = new S3PolicyContext( PolicyActions.ListMultipartUploadParts, bucketName ); + context.setKeyName( exists.getSecond()); + S3Engine.verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); + } + catch (PermissionDeniedException e) { + response.setStatus(403); + return; + } + } + + parts = uploadDao.getParts( uploadId, maxParts, partMarker ); + remaining = uploadDao.numParts( uploadId, partMarker+maxParts ); + } + catch( Exception e ) { + logger.error("List Uploads failed due to " + e.getMessage(), e); + response.setStatus(500); + } + + + StringBuffer xml = new StringBuffer(); + xml.append( "" ); + xml.append( "" ); + xml.append( "" ).append( bucket ).append( "" ); + xml.append( "" ).append( key ).append( "" ); + xml.append( "" ).append( uploadId ).append( "" ); + + // -> currently we just have the access key and have no notion of a display name + xml.append( "" ); + xml.append( "" ).append( initiator ).append( "" ); + xml.append( "" ); + xml.append( "" ); + xml.append( "" ); + xml.append( "" ).append( owner ).append( "" ); + xml.append( "" ); + xml.append( "" ); + + StringBuffer partsList = new StringBuffer(); + for( int i=0; i < parts.length; i++ ) + { + S3MultipartPart onePart = parts[i]; + if (null == onePart) break; + + nextMarker = onePart.getPartNumber(); + partsList.append( "" ); + partsList.append( "" ).append( nextMarker ).append( "" ); + partsList.append( "" ).append( DatatypeConverter.printDateTime( onePart.getLastModified())).append( "" ); + partsList.append( "\"" ).append( onePart.getETag()).append( "\"" ); + partsList.append( "" ).append( onePart.getSize()).append( "" ); + partsList.append( "" ); + } + + xml.append( "STANDARD" ); + xml.append( "" ).append( partMarker ).append( "" ); + xml.append( "" ).append( nextMarker ).append( "" ); + xml.append( "" ).append( maxParts ).append( "" ); + xml.append( "" ).append((0 < remaining ? "true" : "false" )).append( "" ); + + xml.append( partsList.toString()); + xml.append( "" ); + + response.setStatus(200); + response.setContentType("text/xml; charset=UTF-8"); + S3RestServlet.endResponse(response, xml.toString()); + } + + /** + * Support the "Range: bytes=0-399" header with just one byte range. + * @param request + * @param engineRequest + * @return + */ + private S3GetObjectRequest setRequestByteRange( HttpServletRequest request, S3GetObjectRequest engineRequest ) + { + String temp = request.getHeader( "Range" ); + if (null == temp) return engineRequest; + + int offset = temp.indexOf( "=" ); + if (-1 != offset) + { + String range = temp.substring( offset+1 ); + + String[] parts = range.split( "-" ); + if (2 >= parts.length) { + // -> the end byte is inclusive + engineRequest.setByteRangeStart( Long.parseLong(parts[0])); + engineRequest.setByteRangeEnd( Long.parseLong(parts[1])+1); + } + } + return engineRequest; + } + + private S3ConditionalHeaders conditionalRequest( HttpServletRequest request, boolean isCopy ) + { + S3ConditionalHeaders headers = new S3ConditionalHeaders(); + + if (isCopy) { + headers.setModifiedSince( request.getHeader( "x-amz-copy-source-if-modified-since" )); + headers.setUnModifiedSince( request.getHeader( "x-amz-copy-source-if-unmodified-since" )); + headers.setMatch( request.getHeader( "x-amz-copy-source-if-match" )); + headers.setNoneMatch( request.getHeader( "x-amz-copy-source-if-none-match" )); + } + else { + headers.setModifiedSince( request.getHeader( "If-Modified-Since" )); + headers.setUnModifiedSince( request.getHeader( "If-Unmodified-Since" )); + headers.setMatch( request.getHeader( "If-Match" )); + headers.setNoneMatch( request.getHeader( "If-None-Match" )); + } return headers; - } - - private boolean conditionPassed( HttpServletRequest request, HttpServletResponse response, Date lastModified, String ETag ) - { - S3ConditionalHeaders ifCond = conditionalRequest( request, false ); - - if (0 > ifCond.ifModifiedSince( lastModified )) { - response.setStatus( 304 ); - return false; - } - if (0 > ifCond.ifUnmodifiedSince( lastModified )) { - response.setStatus( 412 ); - return false; - } - if (0 > ifCond.ifMatchEtag( ETag )) { - response.setStatus( 412 ); - return false; - } - if (0 > ifCond.ifNoneMatchEtag( ETag )) { - response.setStatus( 412 ); - return false; - } - return true; - } - - /** - * Return the saved object's meta data back to the client as HTTP "x-amz-meta-" headers. - * This function is constructing an HTTP header and these headers have a defined syntax - * as defined in rfc2616. Any characters that could cause an invalid HTTP header will - * prevent that meta data from being returned via the REST call (as is defined in the Amazon - * spec). These characters can be defined if using the SOAP API as well as the REST API. - * - * @param engineResponse - * @param response - */ - private void returnMetaData( S3GetObjectResponse engineResponse, HttpServletResponse response ) - { - boolean ignoreMeta = false; - int ignoredCount = 0; - - S3MetaDataEntry[] metaSet = engineResponse.getMetaEntries(); - for( int i=0; null != metaSet && i < metaSet.length; i++ ) - { - String name = metaSet[i].getName(); - String value = metaSet[i].getValue(); - byte[] nameBytes = name.getBytes(); - ignoreMeta = false; - - // -> cannot have control characters (octets 0 - 31) and DEL (127), in an HTTP header - for( int j=0; j < name.length(); j++ ) { - if ((0 <= nameBytes[j] && 31 >= nameBytes[j]) || 127 == nameBytes[j]) { - ignoreMeta = true; - break; - } - } - - // -> cannot have HTTP separators in an HTTP header - if (-1 != name.indexOf('(') || -1 != name.indexOf(')') || -1 != name.indexOf('@') || - -1 != name.indexOf('<') || -1 != name.indexOf('>') || -1 != name.indexOf('\"') || - -1 != name.indexOf('[') || -1 != name.indexOf(']') || -1 != name.indexOf('=') || - -1 != name.indexOf(',') || -1 != name.indexOf(';') || -1 != name.indexOf(':') || - -1 != name.indexOf('\\') || -1 != name.indexOf('/') || -1 != name.indexOf(' ') || - -1 != name.indexOf('{') || -1 != name.indexOf('}') || -1 != name.indexOf('?') || - -1 != name.indexOf('\t') - ) ignoreMeta = true; - - - if ( ignoreMeta ) - ignoredCount++; - else response.addHeader( "x-amz-meta-" + name, value ); - } - - if (0 < ignoredCount) response.addHeader( "x-amz-missing-meta", new String( "" + ignoredCount )); - } + } - /** - * Extract the name and value of all meta data so it can be written with the - * object that is being 'PUT'. - * - * @param request - * @return - */ - private S3MetaDataEntry[] extractMetaData( HttpServletRequest request ) - { - List metaSet = new ArrayList(); - int count = 0; - - Enumeration headers = request.getHeaderNames(); + private boolean conditionPassed( HttpServletRequest request, HttpServletResponse response, Date lastModified, String ETag ) + { + S3ConditionalHeaders ifCond = conditionalRequest( request, false ); + + if (0 > ifCond.ifModifiedSince( lastModified )) { + response.setStatus( 304 ); + return false; + } + if (0 > ifCond.ifUnmodifiedSince( lastModified )) { + response.setStatus( 412 ); + return false; + } + if (0 > ifCond.ifMatchEtag( ETag )) { + response.setStatus( 412 ); + return false; + } + if (0 > ifCond.ifNoneMatchEtag( ETag )) { + response.setStatus( 412 ); + return false; + } + return true; + } + + /** + * Return the saved object's meta data back to the client as HTTP "x-amz-meta-" headers. + * This function is constructing an HTTP header and these headers have a defined syntax + * as defined in rfc2616. Any characters that could cause an invalid HTTP header will + * prevent that meta data from being returned via the REST call (as is defined in the Amazon + * spec). These characters can be defined if using the SOAP API as well as the REST API. + * + * @param engineResponse + * @param response + */ + private void returnMetaData( S3GetObjectResponse engineResponse, HttpServletResponse response ) + { + boolean ignoreMeta = false; + int ignoredCount = 0; + + S3MetaDataEntry[] metaSet = engineResponse.getMetaEntries(); + for( int i=0; null != metaSet && i < metaSet.length; i++ ) + { + String name = metaSet[i].getName(); + String value = metaSet[i].getValue(); + byte[] nameBytes = name.getBytes(); + ignoreMeta = false; + + // -> cannot have control characters (octets 0 - 31) and DEL (127), in an HTTP header + for( int j=0; j < name.length(); j++ ) { + if ((0 <= nameBytes[j] && 31 >= nameBytes[j]) || 127 == nameBytes[j]) { + ignoreMeta = true; + break; + } + } + + // -> cannot have HTTP separators in an HTTP header + if (-1 != name.indexOf('(') || -1 != name.indexOf(')') || -1 != name.indexOf('@') || + -1 != name.indexOf('<') || -1 != name.indexOf('>') || -1 != name.indexOf('\"') || + -1 != name.indexOf('[') || -1 != name.indexOf(']') || -1 != name.indexOf('=') || + -1 != name.indexOf(',') || -1 != name.indexOf(';') || -1 != name.indexOf(':') || + -1 != name.indexOf('\\') || -1 != name.indexOf('/') || -1 != name.indexOf(' ') || + -1 != name.indexOf('{') || -1 != name.indexOf('}') || -1 != name.indexOf('?') || + -1 != name.indexOf('\t') + ) ignoreMeta = true; + + + if ( ignoreMeta ) + ignoredCount++; + else response.addHeader( "x-amz-meta-" + name, value ); + } + + if (0 < ignoredCount) response.addHeader( "x-amz-missing-meta", new String( "" + ignoredCount )); + } + + /** + * Extract the name and value of all meta data so it can be written with the + * object that is being 'PUT'. + * + * @param request + * @return + */ + private S3MetaDataEntry[] extractMetaData( HttpServletRequest request ) + { + List metaSet = new ArrayList(); + int count = 0; + + Enumeration headers = request.getHeaderNames(); while( headers.hasMoreElements()) { - String key = (String)headers.nextElement(); - if (key.startsWith( "x-amz-meta-" )) - { - String name = key.substring( 11 ); - String value = request.getHeader( key ); - if (null != value) { - S3MetaDataEntry oneMeta = new S3MetaDataEntry(); - oneMeta.setName( name ); - oneMeta.setValue( value ); - metaSet.add( oneMeta ); - count++; - } - } + String key = (String)headers.nextElement(); + if (key.startsWith( "x-amz-meta-" )) + { + String name = key.substring( 11 ); + String value = request.getHeader( key ); + if (null != value) { + S3MetaDataEntry oneMeta = new S3MetaDataEntry(); + oneMeta.setName( name ); + oneMeta.setValue( value ); + metaSet.add( oneMeta ); + count++; + } + } } if ( 0 < count ) - return metaSet.toArray(new S3MetaDataEntry[0]); + return metaSet.toArray(new S3MetaDataEntry[0]); else return null; - } - - /** - * Parameters on the query string may or may not be name-value pairs. - * For example: "?acl&versionId=2", notice that "acl" has no value other - * than it is present. - * - * @param queryString - from a URL to locate the 'find' parameter - * @param find - name string to return first found - * @return the value matching the found name - */ - private String returnParameter( String queryString, String find ) - { - int offset = queryString.indexOf( find ); - if (-1 != offset) - { - String temp = queryString.substring( offset ); - String[] paramList = temp.split( "[&=]" ); + } + + /** + * Parameters on the query string may or may not be name-value pairs. + * For example: "?acl&versionId=2", notice that "acl" has no value other + * than it is present. + * + * @param queryString - from a URL to locate the 'find' parameter + * @param find - name string to return first found + * @return the value matching the found name + */ + private String returnParameter( String queryString, String find ) + { + int offset = queryString.indexOf( find ); + if (-1 != offset) + { + String temp = queryString.substring( offset ); + String[] paramList = temp.split( "[&=]" ); if (null != paramList && 2 <= paramList.length) return paramList[1]; - } - return null; - } - - private void returnErrorXML( int errorCode, String errorDescription, OutputStream os ) throws IOException - { - StringBuffer xml = new StringBuffer(); - - xml.append( "" ); + } + return null; + } + + private void returnErrorXML( int errorCode, String errorDescription, OutputStream os ) throws IOException + { + StringBuffer xml = new StringBuffer(); + + xml.append( "" ); xml.append( "" ); - + if ( null != errorDescription ) - xml.append( "" ).append( errorDescription ).append( "" ); + xml.append( "" ).append( errorDescription ).append( "" ); else xml.append( "" ).append( errorCode ).append( "" ); - + xml.append( "" ).append( "" ).append( "" ); xml.append( "" ).append( "" ).append( "" ); xml.append( "" ).append( "" ).append( "" ); xml.append( "" ); - + os.write( xml.toString().getBytes()); os.close(); - } - - /** - * The Complete Multipart Upload function pass in the request body a list of - * all uploaded body parts. It is required that we verify that list matches - * what was uploaded. - * - * @param is - * @param parts - * @return error code, and error string - * @throws ParserConfigurationException, IOException, SAXException - */ + } + + /** + * The Complete Multipart Upload function pass in the request body a list of + * all uploaded body parts. It is required that we verify that list matches + * what was uploaded. + * + * @param is + * @param parts + * @return error code, and error string + * @throws ParserConfigurationException, IOException, SAXException + */ private OrderedPair verifyParts( InputStream is, S3MultipartPart[] parts ) { - try { - DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); - dbf.setNamespaceAware( true ); - - DocumentBuilder db = dbf.newDocumentBuilder(); - Document doc = db.parse( is ); - Node parent = null; - Node contents = null; - NodeList children = null; - String temp = null; - String element = null; - String eTag = null; - int lastNumber = -1; - int partNumber = -1; - int count = 0; + try { + DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); + dbf.setNamespaceAware( true ); - // -> handle with and without a namespace - NodeList nodeSet = doc.getElementsByTagNameNS( "http://s3.amazonaws.com/doc/2006-03-01/", "Part" ); - count = nodeSet.getLength(); - if (0 == count) { - nodeSet = doc.getElementsByTagName( "Part" ); - count = nodeSet.getLength(); - } - if (count != parts.length) return new OrderedPair(400, "InvalidPart"); + DocumentBuilder db = dbf.newDocumentBuilder(); + Document doc = db.parse( is ); + Node parent = null; + Node contents = null; + NodeList children = null; + String temp = null; + String element = null; + String eTag = null; + int lastNumber = -1; + int partNumber = -1; + int count = 0; - // -> get a list of all the children elements of the 'Part' parent element - for( int i=0; i < count; i++ ) - { - partNumber = -1; - eTag = null; - parent = nodeSet.item(i); - - if (null != (children = parent.getChildNodes())) - { - int numChildren = children.getLength(); - for( int j=0; j < numChildren; j++ ) - { - contents = children.item( j ); - element = contents.getNodeName().trim(); - if ( element.endsWith( "PartNumber" )) - { - temp = contents.getFirstChild().getNodeValue(); - if (null != temp) partNumber = Integer.parseInt( temp ); - //System.out.println( "part: " + partNumber ); - } - else if (element.endsWith( "ETag" )) - { - eTag = contents.getFirstChild().getNodeValue(); - //System.out.println( "etag: " + eTag ); - } - } - } - - // -> do the parts given in the call XML match what was previously uploaded? - if (lastNumber >= partNumber) { - return new OrderedPair(400, "InvalidPartOrder"); - } - if (partNumber != parts[i].getPartNumber() || - eTag == null || - !eTag.equalsIgnoreCase( "\"" + parts[i].getETag() + "\"" )) { - return new OrderedPair(400, "InvalidPart"); - } - - lastNumber = partNumber; - } - return new OrderedPair(200, "Success"); - } - catch( Exception e ) { - return new OrderedPair(500, e.toString()); - } + // -> handle with and without a namespace + NodeList nodeSet = doc.getElementsByTagNameNS( "http://s3.amazonaws.com/doc/2006-03-01/", "Part" ); + count = nodeSet.getLength(); + if (0 == count) { + nodeSet = doc.getElementsByTagName( "Part" ); + count = nodeSet.getLength(); + } + if (count != parts.length) return new OrderedPair(400, "InvalidPart"); + + // -> get a list of all the children elements of the 'Part' parent element + for( int i=0; i < count; i++ ) + { + partNumber = -1; + eTag = null; + parent = nodeSet.item(i); + + if (null != (children = parent.getChildNodes())) + { + int numChildren = children.getLength(); + for( int j=0; j < numChildren; j++ ) + { + contents = children.item( j ); + element = contents.getNodeName().trim(); + if ( element.endsWith( "PartNumber" )) + { + temp = contents.getFirstChild().getNodeValue(); + if (null != temp) partNumber = Integer.parseInt( temp ); + //System.out.println( "part: " + partNumber ); + } + else if (element.endsWith( "ETag" )) + { + eTag = contents.getFirstChild().getNodeValue(); + //System.out.println( "etag: " + eTag ); + } + } + } + + // -> do the parts given in the call XML match what was previously uploaded? + if (lastNumber >= partNumber) { + return new OrderedPair(400, "InvalidPartOrder"); + } + if (partNumber != parts[i].getPartNumber() || + eTag == null || + !eTag.equalsIgnoreCase( "\"" + parts[i].getETag() + "\"" )) { + return new OrderedPair(400, "InvalidPart"); + } + + lastNumber = partNumber; + } + return new OrderedPair(200, "Success"); + } + catch( Exception e ) { + return new OrderedPair(500, e.toString()); + } } } diff --git a/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java b/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java index 2ddbbf2a57d..4eb4c3b99a1 100644 --- a/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java +++ b/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java @@ -25,29 +25,27 @@ import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.net.InetAddress; -import java.sql.SQLException; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.Timer; import java.util.TimerTask; +import javax.inject.Inject; + import org.apache.axis2.AxisFault; import org.apache.log4j.Logger; import org.apache.log4j.xml.DOMConfigurator; -import com.amazon.s3.AmazonS3SkeletonInterface; import com.amazon.ec2.AmazonEC2SkeletonInterface; +import com.amazon.s3.AmazonS3SkeletonInterface; import com.cloud.bridge.model.MHostVO; import com.cloud.bridge.model.SHost; import com.cloud.bridge.model.SHostVO; import com.cloud.bridge.model.UserCredentialsVO; import com.cloud.bridge.persist.dao.MHostDao; -import com.cloud.bridge.persist.dao.MHostDaoImpl; import com.cloud.bridge.persist.dao.SHostDao; -import com.cloud.bridge.persist.dao.SHostDaoImpl; import com.cloud.bridge.persist.dao.UserCredentialsDao; -import com.cloud.bridge.persist.dao.UserCredentialsDaoImpl; import com.cloud.bridge.service.EC2SoapServiceImpl; import com.cloud.bridge.service.UserInfo; import com.cloud.bridge.service.core.ec2.EC2Engine; @@ -58,191 +56,190 @@ import com.cloud.bridge.util.ConfigurationHelper; import com.cloud.bridge.util.DateHelper; import com.cloud.bridge.util.NetHelper; import com.cloud.bridge.util.OrderedPair; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; public class ServiceProvider { - protected final static Logger logger = Logger.getLogger(ServiceProvider.class); - protected final MHostDao mhostDao = ComponentLocator.inject(MHostDaoImpl.class); - protected final SHostDao shostDao = ComponentLocator.inject(SHostDaoImpl.class); - protected final UserCredentialsDao ucDao = ComponentLocator.inject(UserCredentialsDaoImpl.class); - - public final static long HEARTBEAT_INTERVAL = 10000; + protected final static Logger logger = Logger.getLogger(ServiceProvider.class); + @Inject MHostDao mhostDao; + @Inject SHostDao shostDao; + @Inject UserCredentialsDao ucDao; - private static ServiceProvider instance; + public final static long HEARTBEAT_INTERVAL = 10000; - private Map, Object> serviceMap = new HashMap, Object>(); - private Timer timer = new Timer(); - private MHostVO mhost; - private Properties properties; - private boolean useSubDomain = false; // use DNS sub domain for bucket name - private String serviceEndpoint = null; - private String multipartDir = null; // illegal bucket name used as a folder for storing multiparts - private String masterDomain = ".s3.amazonaws.com"; - private S3Engine engine; - private EC2Engine EC2_engine = null; + private static ServiceProvider instance; - // -> cache Bucket Policies here so we don't have to load from db on every access - private Map policyMap = new HashMap(); + private final Map, Object> serviceMap = new HashMap, Object>(); + private final Timer timer = new Timer(); + private MHostVO mhost; + private Properties properties; + private boolean useSubDomain = false; // use DNS sub domain for bucket name + private String serviceEndpoint = null; + private String multipartDir = null; // illegal bucket name used as a folder for storing multiparts + private String masterDomain = ".s3.amazonaws.com"; + private final S3Engine engine; + private EC2Engine EC2_engine = null; - protected ServiceProvider() throws IOException { - // register service implementation object - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); - txn.close(); - engine = new S3Engine(); - EC2_engine = new EC2Engine(); - serviceMap.put(AmazonS3SkeletonInterface.class, new S3SerializableServiceImplementation(engine)); - serviceMap.put(AmazonEC2SkeletonInterface.class, new EC2SoapServiceImpl(EC2_engine)); - } + // -> cache Bucket Policies here so we don't have to load from db on every access + private final Map policyMap = new HashMap(); - public synchronized static ServiceProvider getInstance() { - if(instance == null) - { - try { - instance = new ServiceProvider(); - instance.initialize(); - } catch(Throwable e) { - logger.error("Unexpected exception " + e.getMessage(), e); - } finally { - } - } - return instance; - } + protected ServiceProvider() throws IOException { + // register service implementation object + Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + txn.close(); + engine = new S3Engine(); + EC2_engine = new EC2Engine(); + serviceMap.put(AmazonS3SkeletonInterface.class, new S3SerializableServiceImplementation(engine)); + serviceMap.put(AmazonEC2SkeletonInterface.class, new EC2SoapServiceImpl(EC2_engine)); + } - public long getManagementHostId() { - // we want to limit mhost within its own session, id of the value will be returned - long mhostId = 0; - if(mhost != null) - mhostId = mhost.getId() != null ? mhost.getId().longValue() : 0L; - return mhostId; - } + public synchronized static ServiceProvider getInstance() { + if(instance == null) + { + try { + instance = new ServiceProvider(); + instance.initialize(); + } catch(Throwable e) { + logger.error("Unexpected exception " + e.getMessage(), e); + } finally { + } + } + return instance; + } - /** - * We return a 2-tuple to distinguish between two cases: - * (1) there is no entry in the map for bucketName, and (2) there is a null entry - * in the map for bucketName. In case 2, the database was inspected for the - * bucket policy but it had none so we cache it here to reduce database lookups. - * @param bucketName - * @return Integer in the tuple means: -1 if no policy defined for the bucket, 0 if one defined - * even if it is set at null. - */ - public OrderedPair getBucketPolicy(String bucketName) { + public long getManagementHostId() { + // we want to limit mhost within its own session, id of the value will be returned + long mhostId = 0; + if(mhost != null) + mhostId = mhost.getId() != null ? mhost.getId().longValue() : 0L; + return mhostId; + } - if (policyMap.containsKey( bucketName )) { - S3BucketPolicy policy = policyMap.get( bucketName ); - return new OrderedPair( policy, 0 ); - } - else return new OrderedPair( null, -1 ); // For case (1) where the map has no entry for bucketName - } + /** + * We return a 2-tuple to distinguish between two cases: + * (1) there is no entry in the map for bucketName, and (2) there is a null entry + * in the map for bucketName. In case 2, the database was inspected for the + * bucket policy but it had none so we cache it here to reduce database lookups. + * @param bucketName + * @return Integer in the tuple means: -1 if no policy defined for the bucket, 0 if one defined + * even if it is set at null. + */ + public OrderedPair getBucketPolicy(String bucketName) { - /** - * The policy parameter can be set to null, which means that there is no policy - * for the bucket so a database lookup is not necessary. - * - * @param bucketName - * @param policy - */ - public void setBucketPolicy(String bucketName, S3BucketPolicy policy) { - policyMap.put(bucketName, policy); - } + if (policyMap.containsKey( bucketName )) { + S3BucketPolicy policy = policyMap.get( bucketName ); + return new OrderedPair( policy, 0 ); + } + else return new OrderedPair( null, -1 ); // For case (1) where the map has no entry for bucketName + } - public void deleteBucketPolicy(String bucketName) { - policyMap.remove(bucketName); - } + /** + * The policy parameter can be set to null, which means that there is no policy + * for the bucket so a database lookup is not necessary. + * + * @param bucketName + * @param policy + */ + public void setBucketPolicy(String bucketName, S3BucketPolicy policy) { + policyMap.put(bucketName, policy); + } - public S3Engine getS3Engine() { - return engine; - } + public void deleteBucketPolicy(String bucketName) { + policyMap.remove(bucketName); + } - public EC2Engine getEC2Engine() { - return EC2_engine; - } + public S3Engine getS3Engine() { + return engine; + } - public String getMasterDomain() { - return masterDomain; - } + public EC2Engine getEC2Engine() { + return EC2_engine; + } - public boolean getUseSubDomain() { - return useSubDomain; - } + public String getMasterDomain() { + return masterDomain; + } - public String getServiceEndpoint() { - return serviceEndpoint; - } + public boolean getUseSubDomain() { + return useSubDomain; + } - public String getMultipartDir() { - return multipartDir; - } + public String getServiceEndpoint() { + return serviceEndpoint; + } - public Properties getStartupProperties() { - return properties; - } + public String getMultipartDir() { + return multipartDir; + } - public UserInfo getUserInfo(String accessKey) { - UserInfo info = new UserInfo(); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); - try { - txn.start(); - UserCredentialsVO cloudKeys = ucDao.getByAccessKey( accessKey ); - if ( null == cloudKeys ) { - logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" ); - return null; - } else { - info.setAccessKey( accessKey ); - info.setSecretKey( cloudKeys.getSecretKey()); - info.setCanonicalUserId(accessKey); - info.setDescription( "S3 REST request" ); - return info; - } - }finally { - txn.commit(); - } - } - - @DB - protected void initialize() { - if(logger.isInfoEnabled()) - logger.info("Initializing ServiceProvider..."); - - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); - //txn.close(); + public Properties getStartupProperties() { + return properties; + } - File file = ConfigurationHelper.findConfigurationFile("log4j-cloud.xml"); - if(file != null) { - System.out.println("Log4j configuration from : " + file.getAbsolutePath()); - DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); - } else { - System.out.println("Configure log4j with default properties"); - } + public UserInfo getUserInfo(String accessKey) { + UserInfo info = new UserInfo(); + Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + try { + txn.start(); + UserCredentialsVO cloudKeys = ucDao.getByAccessKey( accessKey ); + if ( null == cloudKeys ) { + logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" ); + return null; + } else { + info.setAccessKey( accessKey ); + info.setSecretKey( cloudKeys.getSecretKey()); + info.setCanonicalUserId(accessKey); + info.setDescription( "S3 REST request" ); + return info; + } + }finally { + txn.commit(); + } + } - loadStartupProperties(); - String hostKey = properties.getProperty("host.key"); - if(hostKey == null) { - InetAddress inetAddr = NetHelper.getFirstNonLoopbackLocalInetAddress(); - if(inetAddr != null) - hostKey = NetHelper.getMacAddress(inetAddr); - } - if(hostKey == null) - throw new ConfigurationException("Please configure host.key property in cloud-bridge.properites"); - String host = properties.getProperty("host"); - if(host == null) - host = NetHelper.getHostName(); + @DB + protected void initialize() { + if(logger.isInfoEnabled()) + logger.info("Initializing ServiceProvider..."); - if(properties.get("bucket.dns") != null && - ((String)properties.get("bucket.dns")).equalsIgnoreCase("true")) { - useSubDomain = true; - } + Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + //txn.close(); - serviceEndpoint = (String)properties.get("serviceEndpoint"); - masterDomain = new String( "." + serviceEndpoint ); + File file = ConfigurationHelper.findConfigurationFile("log4j-cloud.xml"); + if(file != null) { + System.out.println("Log4j configuration from : " + file.getAbsolutePath()); + DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); + } else { + System.out.println("Configure log4j with default properties"); + } - setupHost(hostKey, host); + loadStartupProperties(); + String hostKey = properties.getProperty("host.key"); + if(hostKey == null) { + InetAddress inetAddr = NetHelper.getFirstNonLoopbackLocalInetAddress(); + if(inetAddr != null) + hostKey = NetHelper.getMacAddress(inetAddr); + } + if(hostKey == null) + throw new ConfigurationException("Please configure host.key property in cloud-bridge.properites"); + String host = properties.getProperty("host"); + if(host == null) + host = NetHelper.getHostName(); - // we will commit and start a new transaction to allow host info be flushed to DB - //PersistContext.flush(); + if(properties.get("bucket.dns") != null && + ((String)properties.get("bucket.dns")).equalsIgnoreCase("true")) { + useSubDomain = true; + } - String localStorageRoot = properties.getProperty("storage.root"); + serviceEndpoint = (String)properties.get("serviceEndpoint"); + masterDomain = new String( "." + serviceEndpoint ); + + setupHost(hostKey, host); + + // we will commit and start a new transaction to allow host info be flushed to DB + //PersistContext.flush(); + + String localStorageRoot = properties.getProperty("storage.root"); if (localStorageRoot != null) { if (localStorageRoot.toLowerCase().startsWith("castor")) { setupCAStorStorage(localStorageRoot); @@ -251,138 +248,139 @@ public class ServiceProvider { } } - multipartDir = properties.getProperty("storage.multipartDir"); - - Transaction txn1 = Transaction.open(Transaction.AWSAPI_DB); - timer.schedule(getHeartbeatTask(), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL); - txn1.close(); + multipartDir = properties.getProperty("storage.multipartDir"); - if(logger.isInfoEnabled()) - logger.info("ServiceProvider initialized"); - } + Transaction txn1 = Transaction.open(Transaction.AWSAPI_DB); + timer.schedule(getHeartbeatTask(), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL); + txn1.close(); - private void loadStartupProperties() { - File propertiesFile = ConfigurationHelper.findConfigurationFile("cloud-bridge.properties"); - properties = new Properties(); - if(propertiesFile != null) { - try { - properties.load(new FileInputStream(propertiesFile)); - } catch (FileNotFoundException e) { - logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); - } catch (IOException e) { - logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); - } - - logger.info("Use startup properties file: " + propertiesFile.getAbsolutePath()); - } else { - if(logger.isInfoEnabled()) - logger.info("Startup properties is not found."); - } - } - - private TimerTask getHeartbeatTask() { - return new TimerTask() { - - @Override - public void run() { - try { - mhost.setLastHeartbeatTime(DateHelper.currentGMTTime()); - mhostDao.updateHeartBeat(mhost); - } catch(Throwable e){ - logger.error("Unexpected exception " + e.getMessage(), e); - } finally { - } - } - }; - } - - private void setupHost(String hostKey, String host) { - - mhost = mhostDao.getByHostKey(hostKey); - if(mhost == null) { - mhost = new MHostVO(); - mhost.setHostKey(hostKey); - mhost.setHost(host); - mhost.setLastHeartbeatTime(DateHelper.currentGMTTime()); - mhost = mhostDao.persist(mhost); - } else { - mhost.setHost(host); - mhostDao.update(mhost.getId(), mhost); - } - } - - private void setupLocalStorage(String storageRoot) { - SHostVO shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot); - if(shost == null) { - shost = new SHostVO(); - shost.setMhost(mhost); - shost.setMhostid(mhost.getId()); - shost.setHostType(SHost.STORAGE_HOST_TYPE_LOCAL); - shost.setHost(NetHelper.getHostName()); - shost.setExportRoot(storageRoot); - shostDao.persist(shost); - } - } - - private void setupCAStorStorage(String storageRoot) { - SHostVO shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot); - if(shost == null) { - shost = new SHostVO(); - shost.setMhost(mhost); - shost.setMhostid(mhost.getId()); - shost.setHostType(SHost.STORAGE_HOST_TYPE_CASTOR); - shost.setHost(NetHelper.getHostName()); - shost.setExportRoot(storageRoot); - shostDao.persist(shost); - } + if(logger.isInfoEnabled()) + logger.info("ServiceProvider initialized"); } - public void shutdown() { - timer.cancel(); + private void loadStartupProperties() { + File propertiesFile = ConfigurationHelper.findConfigurationFile("cloud-bridge.properties"); + properties = new Properties(); + if(propertiesFile != null) { + try { + properties.load(new FileInputStream(propertiesFile)); + } catch (FileNotFoundException e) { + logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); + } catch (IOException e) { + logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); + } - if(logger.isInfoEnabled()) - logger.info("ServiceProvider stopped"); - } + logger.info("Use startup properties file: " + propertiesFile.getAbsolutePath()); + } else { + if(logger.isInfoEnabled()) + logger.info("Startup properties is not found."); + } + } - @SuppressWarnings("unchecked") - private static T getProxy(Class serviceInterface, final T serviceObject) { - return (T) Proxy.newProxyInstance(serviceObject.getClass().getClassLoader(), - new Class[] { serviceInterface }, - new InvocationHandler() { - public Object invoke(Object proxy, Method method, - Object[] args) throws Throwable { - Object result = null; - try { - result = method.invoke(serviceObject, args); - } catch (Throwable e) { - // Rethrow the exception to Axis: - // Check if the exception is an AxisFault or a - // RuntimeException - // enveloped AxisFault and if so, pass it on as - // such. Otherwise - // log to help debugging and throw as is. - if (e.getCause() != null - && e.getCause() instanceof AxisFault) - throw e.getCause(); - else if (e.getCause() != null - && e.getCause().getCause() != null - && e.getCause().getCause() instanceof AxisFault) - throw e.getCause().getCause(); - else { - logger.warn( - "Unhandled exception " + e.getMessage(), - e); - throw e; - } - } finally { - } - return result; + private TimerTask getHeartbeatTask() { + return new TimerTask() { + + @Override + public void run() { + try { + mhost.setLastHeartbeatTime(DateHelper.currentGMTTime()); + mhostDao.updateHeartBeat(mhost); + } catch(Throwable e){ + logger.error("Unexpected exception " + e.getMessage(), e); + } finally { + } + } + }; + } + + private void setupHost(String hostKey, String host) { + + mhost = mhostDao.getByHostKey(hostKey); + if(mhost == null) { + mhost = new MHostVO(); + mhost.setHostKey(hostKey); + mhost.setHost(host); + mhost.setLastHeartbeatTime(DateHelper.currentGMTTime()); + mhost = mhostDao.persist(mhost); + } else { + mhost.setHost(host); + mhostDao.update(mhost.getId(), mhost); + } + } + + private void setupLocalStorage(String storageRoot) { + SHostVO shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot); + if(shost == null) { + shost = new SHostVO(); + shost.setMhost(mhost); + shost.setMhostid(mhost.getId()); + shost.setHostType(SHost.STORAGE_HOST_TYPE_LOCAL); + shost.setHost(NetHelper.getHostName()); + shost.setExportRoot(storageRoot); + shostDao.persist(shost); + } + } + + private void setupCAStorStorage(String storageRoot) { + SHostVO shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot); + if(shost == null) { + shost = new SHostVO(); + shost.setMhost(mhost); + shost.setMhostid(mhost.getId()); + shost.setHostType(SHost.STORAGE_HOST_TYPE_CASTOR); + shost.setHost(NetHelper.getHostName()); + shost.setExportRoot(storageRoot); + shostDao.persist(shost); + } + } + + public void shutdown() { + timer.cancel(); + + if(logger.isInfoEnabled()) + logger.info("ServiceProvider stopped"); + } + + @SuppressWarnings("unchecked") + private static T getProxy(Class serviceInterface, final T serviceObject) { + return (T) Proxy.newProxyInstance(serviceObject.getClass().getClassLoader(), + new Class[] { serviceInterface }, + new InvocationHandler() { + @Override + public Object invoke(Object proxy, Method method, + Object[] args) throws Throwable { + Object result = null; + try { + result = method.invoke(serviceObject, args); + } catch (Throwable e) { + // Rethrow the exception to Axis: + // Check if the exception is an AxisFault or a + // RuntimeException + // enveloped AxisFault and if so, pass it on as + // such. Otherwise + // log to help debugging and throw as is. + if (e.getCause() != null + && e.getCause() instanceof AxisFault) + throw e.getCause(); + else if (e.getCause() != null + && e.getCause().getCause() != null + && e.getCause().getCause() instanceof AxisFault) + throw e.getCause().getCause(); + else { + logger.warn( + "Unhandled exception " + e.getMessage(), + e); + throw e; } - }); - } + } finally { + } + return result; + } + }); + } - @SuppressWarnings("unchecked") - public T getServiceImpl(Class serviceInterface) { - return getProxy(serviceInterface, (T)serviceMap.get(serviceInterface)); - } + @SuppressWarnings("unchecked") + public T getServiceImpl(Class serviceInterface) { + return getProxy(serviceInterface, (T)serviceMap.get(serviceInterface)); + } } diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java index eb25249bd92..0f8eded815f 100644 --- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java +++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java @@ -22,9 +22,6 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.security.SignatureException; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; import java.sql.SQLException; import java.text.ParseException; import java.util.ArrayList; @@ -32,6 +29,7 @@ import java.util.List; import java.util.Properties; import java.util.UUID; +import javax.inject.Inject; import javax.xml.parsers.ParserConfigurationException; import org.apache.log4j.Logger; @@ -39,13 +37,9 @@ import org.xml.sax.SAXException; import com.cloud.bridge.model.CloudStackServiceOfferingVO; import com.cloud.bridge.persist.dao.CloudStackAccountDao; -import com.cloud.bridge.persist.dao.CloudStackAccountDaoImpl; import com.cloud.bridge.persist.dao.CloudStackSvcOfferingDao; -import com.cloud.bridge.persist.dao.CloudStackSvcOfferingDaoImpl; -import com.cloud.bridge.persist.dao.OfferingDaoImpl; -import com.cloud.bridge.persist.dao.SObjectItemDaoImpl; +import com.cloud.bridge.persist.dao.OfferingDao; import com.cloud.bridge.service.UserContext; - import com.cloud.bridge.service.core.ec2.EC2ImageAttributes.ImageAttribute; import com.cloud.bridge.service.exception.EC2ServiceException; import com.cloud.bridge.service.exception.EC2ServiceException.ClientError; @@ -68,7 +62,6 @@ import com.cloud.stack.models.CloudStackResourceLimit; import com.cloud.stack.models.CloudStackResourceTag; import com.cloud.stack.models.CloudStackSecurityGroup; import com.cloud.stack.models.CloudStackSecurityGroupIngress; -import com.cloud.stack.models.CloudStackServiceOffering; import com.cloud.stack.models.CloudStackSnapshot; import com.cloud.stack.models.CloudStackTemplate; import com.cloud.stack.models.CloudStackTemplatePermission; @@ -76,453 +69,451 @@ import com.cloud.stack.models.CloudStackUser; import com.cloud.stack.models.CloudStackUserVm; import com.cloud.stack.models.CloudStackVolume; import com.cloud.stack.models.CloudStackZone; -import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.db.Transaction; /** * EC2Engine processes the ec2 commands and calls their cloudstack analogs * */ public class EC2Engine { - protected final static Logger logger = Logger.getLogger(EC2Engine.class); - String managementServer = null; - String cloudAPIPort = null; + protected final static Logger logger = Logger.getLogger(EC2Engine.class); + String managementServer = null; + String cloudAPIPort = null; - protected final CloudStackSvcOfferingDao scvoDao = ComponentLocator.inject(CloudStackSvcOfferingDaoImpl.class); - protected final OfferingDaoImpl ofDao = ComponentLocator.inject(OfferingDaoImpl.class); - CloudStackAccountDao accDao = ComponentLocator.inject(CloudStackAccountDaoImpl.class); - private CloudStackApi _eng = null; - - private CloudStackAccount currentAccount = null; + @Inject CloudStackSvcOfferingDao scvoDao; + @Inject OfferingDao ofDao; + @Inject CloudStackAccountDao accDao; + private CloudStackApi _eng = null; - public EC2Engine() throws IOException { - loadConfigValues(); - } + private CloudStackAccount currentAccount = null; - /** - * Which management server to we talk to? - * Load a mapping form Amazon values for 'instanceType' to cloud defined - * diskOfferingId and serviceOfferingId. - * - * @throws IOException - */ - private void loadConfigValues() throws IOException { - File propertiesFile = ConfigurationHelper.findConfigurationFile("ec2-service.properties"); - if (null != propertiesFile) { - logger.info("Use EC2 properties file: " + propertiesFile.getAbsolutePath()); - Properties EC2Prop = new Properties(); - try { - EC2Prop.load( new FileInputStream( propertiesFile )); - } catch (FileNotFoundException e) { - logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); - } catch (IOException e) { - logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); - } - managementServer = EC2Prop.getProperty( "managementServer" ); - cloudAPIPort = EC2Prop.getProperty( "cloudAPIPort", null ); - - try { - if(ofDao.getOfferingCount() == 0) { - String strValue = EC2Prop.getProperty("m1.small.serviceId"); - if(strValue != null) ofDao.setOfferMapping("m1.small", strValue); + public EC2Engine() throws IOException { + loadConfigValues(); + } - strValue = EC2Prop.getProperty("m1.large.serviceId"); - if(strValue != null) ofDao.setOfferMapping("m1.large", strValue); + /** + * Which management server to we talk to? + * Load a mapping form Amazon values for 'instanceType' to cloud defined + * diskOfferingId and serviceOfferingId. + * + * @throws IOException + */ + private void loadConfigValues() throws IOException { + File propertiesFile = ConfigurationHelper.findConfigurationFile("ec2-service.properties"); + if (null != propertiesFile) { + logger.info("Use EC2 properties file: " + propertiesFile.getAbsolutePath()); + Properties EC2Prop = new Properties(); + try { + EC2Prop.load( new FileInputStream( propertiesFile )); + } catch (FileNotFoundException e) { + logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e); + } catch (IOException e) { + logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e); + } + managementServer = EC2Prop.getProperty( "managementServer" ); + cloudAPIPort = EC2Prop.getProperty( "cloudAPIPort", null ); - strValue = EC2Prop.getProperty("m1.xlarge.serviceId"); - if(strValue != null) ofDao.setOfferMapping("m1.xlarge", strValue); + try { + if(ofDao.getOfferingCount() == 0) { + String strValue = EC2Prop.getProperty("m1.small.serviceId"); + if(strValue != null) ofDao.setOfferMapping("m1.small", strValue); - strValue = EC2Prop.getProperty("c1.medium.serviceId"); - if(strValue != null) ofDao.setOfferMapping("c1.medium", strValue); + strValue = EC2Prop.getProperty("m1.large.serviceId"); + if(strValue != null) ofDao.setOfferMapping("m1.large", strValue); - strValue = EC2Prop.getProperty("c1.xlarge.serviceId"); - if(strValue != null) ofDao.setOfferMapping("c1.xlarge", strValue); + strValue = EC2Prop.getProperty("m1.xlarge.serviceId"); + if(strValue != null) ofDao.setOfferMapping("m1.xlarge", strValue); - strValue = EC2Prop.getProperty("m2.xlarge.serviceId"); - if(strValue != null) ofDao.setOfferMapping("m2.xlarge", strValue); + strValue = EC2Prop.getProperty("c1.medium.serviceId"); + if(strValue != null) ofDao.setOfferMapping("c1.medium", strValue); - strValue = EC2Prop.getProperty("m2.2xlarge.serviceId"); - if(strValue != null) ofDao.setOfferMapping("m2.2xlarge", strValue); + strValue = EC2Prop.getProperty("c1.xlarge.serviceId"); + if(strValue != null) ofDao.setOfferMapping("c1.xlarge", strValue); - strValue = EC2Prop.getProperty("m2.4xlarge.serviceId"); - if(strValue != null) ofDao.setOfferMapping("m2.4xlarge", strValue); + strValue = EC2Prop.getProperty("m2.xlarge.serviceId"); + if(strValue != null) ofDao.setOfferMapping("m2.xlarge", strValue); - strValue = EC2Prop.getProperty("cc1.4xlarge.serviceId"); - if(strValue != null) ofDao.setOfferMapping("cc1.4xlarge", strValue); - } - } catch(Exception e) { - logger.error("Unexpected exception ", e); - } - } else logger.error( "ec2-service.properties not found" ); - } - - /** - * Helper function to manage the api connection - * - * @return - */ - private CloudStackApi getApi() { - if (_eng == null) { - _eng = new CloudStackApi(managementServer, cloudAPIPort, false); - } - // regardless of whether _eng is initialized, we must make sure - // access/secret keys are current with what's in the UserCredentials + strValue = EC2Prop.getProperty("m2.2xlarge.serviceId"); + if(strValue != null) ofDao.setOfferMapping("m2.2xlarge", strValue); + + strValue = EC2Prop.getProperty("m2.4xlarge.serviceId"); + if(strValue != null) ofDao.setOfferMapping("m2.4xlarge", strValue); + + strValue = EC2Prop.getProperty("cc1.4xlarge.serviceId"); + if(strValue != null) ofDao.setOfferMapping("cc1.4xlarge", strValue); + } + } catch(Exception e) { + logger.error("Unexpected exception ", e); + } + } else logger.error( "ec2-service.properties not found" ); + } + + /** + * Helper function to manage the api connection + * + * @return + */ + private CloudStackApi getApi() { + if (_eng == null) { + _eng = new CloudStackApi(managementServer, cloudAPIPort, false); + } + // regardless of whether _eng is initialized, we must make sure + // access/secret keys are current with what's in the UserCredentials _eng.setApiKey(UserContext.current().getAccessKey()); _eng.setSecretKey(UserContext.current().getSecretKey()); - return _eng; - } + return _eng; + } - /** - * Verifies account can access CloudStack - * - * @param accessKey - * @param secretKey - * @return - * @throws EC2ServiceException - */ - public boolean validateAccount( String accessKey, String secretKey ) throws EC2ServiceException { - String oldApiKey = null; - String oldSecretKey = null; + /** + * Verifies account can access CloudStack + * + * @param accessKey + * @param secretKey + * @return + * @throws EC2ServiceException + */ + public boolean validateAccount( String accessKey, String secretKey ) throws EC2ServiceException { + String oldApiKey = null; + String oldSecretKey = null; - if (accessKey == null || secretKey == null) { + if (accessKey == null || secretKey == null) { return false; } - - // okay, instead of using the getApi() nonsense for validate, we are going to manage _eng - if (_eng == null) { + + // okay, instead of using the getApi() nonsense for validate, we are going to manage _eng + if (_eng == null) { _eng = new CloudStackApi(managementServer, cloudAPIPort, false); - } - - try { - oldApiKey = _eng.getApiKey(); - oldSecretKey = _eng.getSecretKey(); - } catch(Exception e) { - // we really don't care, and expect this - } + } + try { - _eng.setApiKey(accessKey); - _eng.setSecretKey(secretKey); - List accts = _eng.listAccounts(null, null, null, null, null, null, null, null); - if (oldApiKey != null && oldSecretKey != null) { - _eng.setApiKey(oldApiKey); - _eng.setSecretKey(oldSecretKey); - } - if (accts == null) { - return false; - } - return true; - } catch(Exception e) { - logger.error("Validate account failed!"); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } + oldApiKey = _eng.getApiKey(); + oldSecretKey = _eng.getSecretKey(); + } catch(Exception e) { + // we really don't care, and expect this + } + try { + _eng.setApiKey(accessKey); + _eng.setSecretKey(secretKey); + List accts = _eng.listAccounts(null, null, null, null, null, null, null, null); + if (oldApiKey != null && oldSecretKey != null) { + _eng.setApiKey(oldApiKey); + _eng.setSecretKey(oldSecretKey); + } + if (accts == null) { + return false; + } + return true; + } catch(Exception e) { + logger.error("Validate account failed!"); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } - /** - * Creates a security group - * - * @param groupName - * @param groupDesc - * @return - */ - public Boolean createSecurityGroup(String groupName, String groupDesc) { - try { - CloudStackSecurityGroup grp = getApi().createSecurityGroup(groupName, null, groupDesc, null); - if (grp != null && grp.getId() != null) { - return true; - } - return false; - } catch( Exception e ) { - logger.error( "EC2 CreateSecurityGroup - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } + /** + * Creates a security group + * + * @param groupName + * @param groupDesc + * @return + */ + public Boolean createSecurityGroup(String groupName, String groupDesc) { + try { + CloudStackSecurityGroup grp = getApi().createSecurityGroup(groupName, null, groupDesc, null); + if (grp != null && grp.getId() != null) { + return true; + } + return false; + } catch( Exception e ) { + logger.error( "EC2 CreateSecurityGroup - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } - /** - * Deletes a security group - * - * @param groupName - * @return - */ - public boolean deleteSecurityGroup(String groupName) { - try { - CloudStackInfoResponse resp = getApi().deleteSecurityGroup(null, null, null, groupName); - if (resp != null) { - return resp.getSuccess(); - } - return false; - } catch( Exception e ) { - logger.error( "EC2 DeleteSecurityGroup - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } + /** + * Deletes a security group + * + * @param groupName + * @return + */ + public boolean deleteSecurityGroup(String groupName) { + try { + CloudStackInfoResponse resp = getApi().deleteSecurityGroup(null, null, null, groupName); + if (resp != null) { + return resp.getSuccess(); + } + return false; + } catch( Exception e ) { + logger.error( "EC2 DeleteSecurityGroup - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } - /** - * returns a list of security groups - * - * @param request - * @return - */ - public EC2DescribeSecurityGroupsResponse describeSecurityGroups(EC2DescribeSecurityGroups request) - { - try { - EC2DescribeSecurityGroupsResponse response = listSecurityGroups( request.getGroupSet()); - EC2GroupFilterSet gfs = request.getFilterSet(); + /** + * returns a list of security groups + * + * @param request + * @return + */ + public EC2DescribeSecurityGroupsResponse describeSecurityGroups(EC2DescribeSecurityGroups request) + { + try { + EC2DescribeSecurityGroupsResponse response = listSecurityGroups( request.getGroupSet()); + EC2GroupFilterSet gfs = request.getFilterSet(); - if ( null == gfs ) - return response; - else return gfs.evaluate( response ); - } catch( Exception e ) { - logger.error( "EC2 DescribeSecurityGroups - ", e); - throw new EC2ServiceException(ServerError.InternalError, "An unexpected error occurred."); - } - } + if ( null == gfs ) + return response; + else return gfs.evaluate( response ); + } catch( Exception e ) { + logger.error( "EC2 DescribeSecurityGroups - ", e); + throw new EC2ServiceException(ServerError.InternalError, "An unexpected error occurred."); + } + } - /** - * CloudStack supports revoke only by using the ruleid of the ingress rule. - * We list all security groups and find the matching group and use the first ruleId we find. - * - * @param request - * @return - */ - public boolean revokeSecurityGroup( EC2AuthorizeRevokeSecurityGroup request ) - { - if (null == request.getName()) throw new EC2ServiceException(ServerError.InternalError, "Name is a required parameter"); - try { - String[] groupSet = new String[1]; - groupSet[0] = request.getName(); - String ruleId = null; - - EC2IpPermission[] items = request.getIpPermissionSet(); + /** + * CloudStack supports revoke only by using the ruleid of the ingress rule. + * We list all security groups and find the matching group and use the first ruleId we find. + * + * @param request + * @return + */ + public boolean revokeSecurityGroup( EC2AuthorizeRevokeSecurityGroup request ) + { + if (null == request.getName()) throw new EC2ServiceException(ServerError.InternalError, "Name is a required parameter"); + try { + String[] groupSet = new String[1]; + groupSet[0] = request.getName(); + String ruleId = null; - EC2DescribeSecurityGroupsResponse response = listSecurityGroups( groupSet ); - EC2SecurityGroup[] groups = response.getGroupSet(); + EC2IpPermission[] items = request.getIpPermissionSet(); - for (EC2SecurityGroup group : groups) { - EC2IpPermission[] perms = group.getIpPermissionSet(); - for (EC2IpPermission perm : perms) { - ruleId = doesRuleMatch( items[0], perm ); - if (ruleId != null) break; - } - } + EC2DescribeSecurityGroupsResponse response = listSecurityGroups( groupSet ); + EC2SecurityGroup[] groups = response.getGroupSet(); - if (null == ruleId) - throw new EC2ServiceException(ClientError.InvalidGroup_NotFound, "Cannot find matching ruleid."); + for (EC2SecurityGroup group : groups) { + EC2IpPermission[] perms = group.getIpPermissionSet(); + for (EC2IpPermission perm : perms) { + ruleId = doesRuleMatch( items[0], perm ); + if (ruleId != null) break; + } + } - CloudStackInfoResponse resp = getApi().revokeSecurityGroupIngress(ruleId); - if (resp != null && resp.getId() != null) { - return resp.getSuccess(); - } - return false; - } catch( Exception e ) { - logger.error( "EC2 revokeSecurityGroupIngress" + " - " + e.getMessage()); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } + if (null == ruleId) + throw new EC2ServiceException(ClientError.InvalidGroup_NotFound, "Cannot find matching ruleid."); - /** - * authorizeSecurityGroup - * - * @param request - ip permission parameters - */ - public boolean authorizeSecurityGroup(EC2AuthorizeRevokeSecurityGroup request ) - { - if (null == request.getName()) throw new EC2ServiceException(ServerError.InternalError, "Name is a required parameter"); + CloudStackInfoResponse resp = getApi().revokeSecurityGroupIngress(ruleId); + if (resp != null && resp.getId() != null) { + return resp.getSuccess(); + } + return false; + } catch( Exception e ) { + logger.error( "EC2 revokeSecurityGroupIngress" + " - " + e.getMessage()); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } - EC2IpPermission[] items = request.getIpPermissionSet(); + /** + * authorizeSecurityGroup + * + * @param request - ip permission parameters + */ + public boolean authorizeSecurityGroup(EC2AuthorizeRevokeSecurityGroup request ) + { + if (null == request.getName()) throw new EC2ServiceException(ServerError.InternalError, "Name is a required parameter"); - try { - for (EC2IpPermission ipPerm : items) { - EC2SecurityGroup[] groups = ipPerm.getUserSet(); - - List secGroupList = new ArrayList(); - for (EC2SecurityGroup group : groups) { - CloudStackKeyValue pair = new CloudStackKeyValue(); - pair.setKeyValue(group.getAccount(), group.getName()); - secGroupList.add(pair); - } - CloudStackSecurityGroupIngress resp = null; - if (ipPerm.getProtocol().equalsIgnoreCase("icmp")) { - resp = getApi().authorizeSecurityGroupIngress(null, constructList(ipPerm.getIpRangeSet()), null, null, - ipPerm.getIcmpCode(), ipPerm.getIcmpType(), ipPerm.getProtocol(), null, - request.getName(), null, secGroupList); - } else { - resp = getApi().authorizeSecurityGroupIngress(null, constructList(ipPerm.getIpRangeSet()), null, - ipPerm.getToPort().longValue(), null, null, ipPerm.getProtocol(), null, request.getName(), - ipPerm.getFromPort().longValue(), secGroupList); - } - if (resp != null && resp.getRuleId() != null) { - return true; - } - return false; - } - } catch(Exception e) { - logger.error( "EC2 AuthorizeSecurityGroupIngress - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - return true; - } + EC2IpPermission[] items = request.getIpPermissionSet(); - /** - * Does the permission from the request (left) match the permission from the cloudStack query (right). - * If the cloudStack rule matches then we return its ruleId. - * - * @param permLeft - * @param permRight - * @return ruleId of the cloudstack rule - */ - private String doesRuleMatch(EC2IpPermission permLeft, EC2IpPermission permRight) - { - int matches = 0; + try { + for (EC2IpPermission ipPerm : items) { + EC2SecurityGroup[] groups = ipPerm.getUserSet(); - if (null != permLeft.getIcmpType() && null != permLeft.getIcmpCode()) { - if (null == permRight.getIcmpType() || null == permRight.getIcmpCode()) return null; + List secGroupList = new ArrayList(); + for (EC2SecurityGroup group : groups) { + CloudStackKeyValue pair = new CloudStackKeyValue(); + pair.setKeyValue(group.getAccount(), group.getName()); + secGroupList.add(pair); + } + CloudStackSecurityGroupIngress resp = null; + if (ipPerm.getProtocol().equalsIgnoreCase("icmp")) { + resp = getApi().authorizeSecurityGroupIngress(null, constructList(ipPerm.getIpRangeSet()), null, null, + ipPerm.getIcmpCode(), ipPerm.getIcmpType(), ipPerm.getProtocol(), null, + request.getName(), null, secGroupList); + } else { + resp = getApi().authorizeSecurityGroupIngress(null, constructList(ipPerm.getIpRangeSet()), null, + ipPerm.getToPort().longValue(), null, null, ipPerm.getProtocol(), null, request.getName(), + ipPerm.getFromPort().longValue(), secGroupList); + } + if (resp != null && resp.getRuleId() != null) { + return true; + } + return false; + } + } catch(Exception e) { + logger.error( "EC2 AuthorizeSecurityGroupIngress - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + return true; + } - if (!permLeft.getIcmpType().equalsIgnoreCase( permRight.getIcmpType())) return null; - if (!permLeft.getIcmpCode().equalsIgnoreCase( permRight.getIcmpCode())) return null; - matches++; - } + /** + * Does the permission from the request (left) match the permission from the cloudStack query (right). + * If the cloudStack rule matches then we return its ruleId. + * + * @param permLeft + * @param permRight + * @return ruleId of the cloudstack rule + */ + private String doesRuleMatch(EC2IpPermission permLeft, EC2IpPermission permRight) + { + int matches = 0; - // -> "Valid Values for EC2 security groups: tcp | udp | icmp or the corresponding protocol number (6 | 17 | 1)." - if (null != permLeft.getProtocol()) { - if (null == permRight.getProtocol()) return null; + if (null != permLeft.getIcmpType() && null != permLeft.getIcmpCode()) { + if (null == permRight.getIcmpType() || null == permRight.getIcmpCode()) return null; - String protocol = permLeft.getProtocol(); - if (protocol.equals( "6" )) protocol = "tcp"; - else if (protocol.equals( "17" )) protocol = "udp"; - else if (protocol.equals( "1" )) protocol = "icmp"; + if (!permLeft.getIcmpType().equalsIgnoreCase( permRight.getIcmpType())) return null; + if (!permLeft.getIcmpCode().equalsIgnoreCase( permRight.getIcmpCode())) return null; + matches++; + } - if (!protocol.equalsIgnoreCase( permRight.getProtocol())) return null; - matches++; - } + // -> "Valid Values for EC2 security groups: tcp | udp | icmp or the corresponding protocol number (6 | 17 | 1)." + if (null != permLeft.getProtocol()) { + if (null == permRight.getProtocol()) return null; + + String protocol = permLeft.getProtocol(); + if (protocol.equals( "6" )) protocol = "tcp"; + else if (protocol.equals( "17" )) protocol = "udp"; + else if (protocol.equals( "1" )) protocol = "icmp"; + + if (!protocol.equalsIgnoreCase( permRight.getProtocol())) return null; + matches++; + } - if (null != permLeft.getCIDR()) { - if (null == permRight.getCIDR()) return null; + if (null != permLeft.getCIDR()) { + if (null == permRight.getCIDR()) return null; - if (!permLeft.getCIDR().equalsIgnoreCase( permRight.getCIDR())) return null; - matches++; - } + if (!permLeft.getCIDR().equalsIgnoreCase( permRight.getCIDR())) return null; + matches++; + } - // -> is the port(s) from the request (left) a match of the rule's port(s) - if (0 != permLeft.getFromPort()) { - // -> -1 means all ports match - if (-1 != permLeft.getFromPort()) { - if (permLeft.getFromPort().compareTo(permRight.getFromPort()) != 0 || - permLeft.getToPort().compareTo(permRight.getToPort()) != 0) - return null; - } - matches++; - } + // -> is the port(s) from the request (left) a match of the rule's port(s) + if (0 != permLeft.getFromPort()) { + // -> -1 means all ports match + if (-1 != permLeft.getFromPort()) { + if (permLeft.getFromPort().compareTo(permRight.getFromPort()) != 0 || + permLeft.getToPort().compareTo(permRight.getToPort()) != 0) + return null; + } + matches++; + } - // -> was permLeft set up properly with at least one property to match? - if ( 0 == matches ) - return null; - else return permRight.getRuleId(); - } - - /** - * Returns a list of all snapshots - * - * @param request - * @return - */ + // -> was permLeft set up properly with at least one property to match? + if ( 0 == matches ) + return null; + else return permRight.getRuleId(); + } + + /** + * Returns a list of all snapshots + * + * @param request + * @return + */ public EC2DescribeSnapshotsResponse handleRequest( EC2DescribeSnapshots request ) - { - EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); - EC2SnapshotFilterSet sfs = request.getFilterSet(); + { + EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); + EC2SnapshotFilterSet sfs = request.getFilterSet(); EC2TagKeyValue[] tagKeyValueSet = request.getResourceTagSet(); - try { - // -> query to get the volume size for each snapshot + try { + // -> query to get the volume size for each snapshot EC2DescribeSnapshotsResponse response = listSnapshots( request.getSnapshotSet(), getResourceTags(tagKeyValueSet)); - if (response == null) { - return new EC2DescribeSnapshotsResponse(); - } - EC2Snapshot[] snapshots = response.getSnapshotSet(); - for (EC2Snapshot snap : snapshots) { - volumes = listVolumes(snap.getVolumeId(), null, volumes, null); - EC2Volume[] volSet = volumes.getVolumeSet(); - if (0 < volSet.length) snap.setVolumeSize(volSet[0].getSize()); - volumes.reset(); - } + if (response == null) { + return new EC2DescribeSnapshotsResponse(); + } + EC2Snapshot[] snapshots = response.getSnapshotSet(); + for (EC2Snapshot snap : snapshots) { + volumes = listVolumes(snap.getVolumeId(), null, volumes, null); + EC2Volume[] volSet = volumes.getVolumeSet(); + if (0 < volSet.length) snap.setVolumeSize(volSet[0].getSize()); + volumes.reset(); + } - if ( null == sfs ) - return response; - else return sfs.evaluate( response ); - } catch( EC2ServiceException error ) { - logger.error( "EC2 DescribeSnapshots - ", error); - throw error; + if ( null == sfs ) + return response; + else return sfs.evaluate( response ); + } catch( EC2ServiceException error ) { + logger.error( "EC2 DescribeSnapshots - ", error); + throw error; - } catch( Exception e ) { - logger.error( "EC2 DescribeSnapshots - ", e); - throw new EC2ServiceException(ServerError.InternalError, "An unexpected error occurred."); - } - } + } catch( Exception e ) { + logger.error( "EC2 DescribeSnapshots - ", e); + throw new EC2ServiceException(ServerError.InternalError, "An unexpected error occurred."); + } + } - /** - * Creates a snapshot - * - * @param volumeId - * @return - */ - public EC2Snapshot createSnapshot( String volumeId ) { - try { - - CloudStackSnapshot snap = getApi().createSnapshot(volumeId, null, null, null); - if (snap == null) { - throw new EC2ServiceException(ServerError.InternalError, "Unable to create snapshot!"); - } - EC2Snapshot ec2Snapshot = new EC2Snapshot(); + /** + * Creates a snapshot + * + * @param volumeId + * @return + */ + public EC2Snapshot createSnapshot( String volumeId ) { + try { - ec2Snapshot.setId(snap.getId()); - ec2Snapshot.setName(snap.getName()); - ec2Snapshot.setType(snap.getSnapshotType()); - ec2Snapshot.setAccountName(snap.getAccountName()); - ec2Snapshot.setDomainId(snap.getDomainId()); - ec2Snapshot.setCreated(snap.getCreated()); - ec2Snapshot.setVolumeId(snap.getVolumeId()); - - List vols = getApi().listVolumes(null, null, null, snap.getVolumeId(), null, null, null, null, null, null, null, null); + CloudStackSnapshot snap = getApi().createSnapshot(volumeId, null, null, null); + if (snap == null) { + throw new EC2ServiceException(ServerError.InternalError, "Unable to create snapshot!"); + } + EC2Snapshot ec2Snapshot = new EC2Snapshot(); - if(vols.size() > 0) { - assert(vols.get(0).getSize() != null); - Long sizeInGB = vols.get(0).getSize().longValue()/1073741824; - ec2Snapshot.setVolumeSize(sizeInGB); - } + ec2Snapshot.setId(snap.getId()); + ec2Snapshot.setName(snap.getName()); + ec2Snapshot.setType(snap.getSnapshotType()); + ec2Snapshot.setAccountName(snap.getAccountName()); + ec2Snapshot.setDomainId(snap.getDomainId()); + ec2Snapshot.setCreated(snap.getCreated()); + ec2Snapshot.setVolumeId(snap.getVolumeId()); - return ec2Snapshot; - } catch( Exception e ) { - logger.error( "EC2 CreateSnapshot - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } + List vols = getApi().listVolumes(null, null, null, snap.getVolumeId(), null, null, null, null, null, null, null, null); - /** - * Deletes a snapshot - * - * @param snapshotId - * @return - */ - public boolean deleteSnapshot(String snapshotId) { - try { - - CloudStackInfoResponse resp = getApi().deleteSnapshot(snapshotId); - if(resp != null) { - return resp.getSuccess(); - } + if(vols.size() > 0) { + assert(vols.get(0).getSize() != null); + Long sizeInGB = vols.get(0).getSize().longValue()/1073741824; + ec2Snapshot.setVolumeSize(sizeInGB); + } - return false; - } catch(Exception e) { - logger.error( "EC2 DeleteSnapshot - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - - - /** REST API calls this method. + return ec2Snapshot; + } catch( Exception e ) { + logger.error( "EC2 CreateSnapshot - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * Deletes a snapshot + * + * @param snapshotId + * @return + */ + public boolean deleteSnapshot(String snapshotId) { + try { + + CloudStackInfoResponse resp = getApi().deleteSnapshot(snapshotId); + if(resp != null) { + return resp.getSuccess(); + } + + return false; + } catch(Exception e) { + logger.error( "EC2 DeleteSnapshot - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + + /** REST API calls this method. * Modify an existing template * * @param request @@ -536,7 +527,7 @@ public class EC2Engine { try { images = listTemplates( request.getId(), images ); EC2Image[] imageSet = images.getImageSet(); - + CloudStackTemplate resp = getApi().updateTemplate(request.getId(), null, request.getDescription(), null, imageSet[0].getName(), null, null); if (resp != null) { return true; @@ -549,21 +540,21 @@ public class EC2Engine { } - /** - * Modify an existing template - * - * @param request - * @return - */ - public boolean modifyImageAttribute( EC2ModifyImageAttribute request ) - { + /** + * Modify an existing template + * + * @param request + * @return + */ + public boolean modifyImageAttribute( EC2ModifyImageAttribute request ) + { try { if(request.getAttribute().equals(ImageAttribute.launchPermission)){ - + String accounts = ""; Boolean isPublic = null; EC2ModifyImageAttribute.Operation operation = request.getLaunchPermOperation(); - + List accountOrGroupList = request.getLaunchPermissionAccountsList(); if(accountOrGroupList != null && !accountOrGroupList.isEmpty()){ boolean first = true; @@ -597,25 +588,25 @@ public class EC2Engine { logger.error( "EC2 modifyImageAttribute - ", e); throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); } - + return false; - - - } - + + + } + public EC2ImageAttributes describeImageAttribute(EC2DescribeImageAttribute request) { EC2ImageAttributes imageAtts = new EC2ImageAttributes(); - + try { imageAtts.setImageId(request.getImageId()); if(request.getAttribute().equals(ImageAttribute.launchPermission)){ CloudStackTemplatePermission tempPerm = getApi().listTemplatePermissions(request.getImageId(), null, null); if(tempPerm != null){ imageAtts.setDomainId(tempPerm.getDomainId()); - + List accntList = tempPerm.getAccounts(); imageAtts.setAccountNamesWithLaunchPermission(accntList); - + imageAtts.setIsPublic(tempPerm.getIsPublic()); } }else if(request.getAttribute().equals(ImageAttribute.description)){ @@ -631,47 +622,47 @@ public class EC2Engine { logger.error( "EC2 describeImageAttribute - ", e); throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); } - + return imageAtts; } - - /** - * If given a specific list of snapshots of interest, then only values from those snapshots are returned. - * - * @param interestedShots - can be null, should be a subset of all snapshots - */ + + /** + * If given a specific list of snapshots of interest, then only values from those snapshots are returned. + * + * @param interestedShots - can be null, should be a subset of all snapshots + */ private EC2DescribeSnapshotsResponse listSnapshots( String[] interestedShots, List resourceTagSet ) throws Exception { - EC2DescribeSnapshotsResponse snapshots = new EC2DescribeSnapshotsResponse(); + EC2DescribeSnapshotsResponse snapshots = new EC2DescribeSnapshotsResponse(); - List cloudSnaps; - if (interestedShots == null || interestedShots.length == 0) { + List cloudSnaps; + if (interestedShots == null || interestedShots.length == 0) { cloudSnaps = getApi().listSnapshots(null, null, null, null, null, null, null, null, null, resourceTagSet); - } else { - cloudSnaps = new ArrayList(); + } else { + cloudSnaps = new ArrayList(); - for(String id : interestedShots) { + for(String id : interestedShots) { List tmpList = getApi().listSnapshots(null, null, id, null, null, null, null, - null, null, resourceTagSet); - cloudSnaps.addAll(tmpList); - } - } + null, null, resourceTagSet); + cloudSnaps.addAll(tmpList); + } + } - if (cloudSnaps == null) { - return null; - } + if (cloudSnaps == null) { + return null; + } - for(CloudStackSnapshot cloudSnapshot : cloudSnaps) { - EC2Snapshot shot = new EC2Snapshot(); - shot.setId(cloudSnapshot.getId()); - shot.setName(cloudSnapshot.getName()); - shot.setVolumeId(cloudSnapshot.getVolumeId()); - shot.setType(cloudSnapshot.getSnapshotType()); - shot.setState(cloudSnapshot.getState()); - shot.setCreated(cloudSnapshot.getCreated()); - shot.setAccountName(cloudSnapshot.getAccountName()); - shot.setDomainId(cloudSnapshot.getDomainId()); + for(CloudStackSnapshot cloudSnapshot : cloudSnaps) { + EC2Snapshot shot = new EC2Snapshot(); + shot.setId(cloudSnapshot.getId()); + shot.setName(cloudSnapshot.getName()); + shot.setVolumeId(cloudSnapshot.getVolumeId()); + shot.setType(cloudSnapshot.getSnapshotType()); + shot.setState(cloudSnapshot.getState()); + shot.setCreated(cloudSnapshot.getCreated()); + shot.setAccountName(cloudSnapshot.getAccountName()); + shot.setDomainId(cloudSnapshot.getDomainId()); List resourceTags = cloudSnapshot.getTags(); for(CloudStackKeyValue resourceTag : resourceTags) { @@ -682,470 +673,470 @@ public class EC2Engine { shot.addResourceTag(param); } - snapshots.addSnapshot(shot); - } - return snapshots; - } + snapshots.addSnapshot(shot); + } + return snapshots; + } - // handlers - /** - * return password data from the instance - * - * @param instanceId - * @return - */ - public EC2PasswordData getPasswordData(String instanceId) { - try { - CloudStackPasswordData resp = getApi().getVMPassword(instanceId); - EC2PasswordData passwdData = new EC2PasswordData(); - if (resp != null) { - passwdData.setInstanceId(instanceId); - passwdData.setEncryptedPassword(resp.getEncryptedpassword()); - } - return passwdData; - } catch(Exception e) { - logger.error("EC2 GetPasswordData - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - /** - * Lists SSH KeyPairs on the systme - * - * @param request - * @return - */ - public EC2DescribeKeyPairsResponse describeKeyPairs( EC2DescribeKeyPairs request ) { - try { - EC2KeyPairFilterSet filterSet = request.getKeyFilterSet(); - String[] keyNames = request.getKeyNames(); - List keyPairs = getApi().listSSHKeyPairs(null, null, null); - List keyPairsList = new ArrayList(); - - if (keyPairs != null) { - // Let's trim the list of keypairs to only the ones listed in keyNames - List matchedKeyPairs = new ArrayList(); - if (keyNames != null && keyNames.length > 0) { - for (CloudStackKeyPair keyPair : keyPairs) { - boolean matched = false; - for (String keyName : keyNames) { - if (keyPair.getName().equalsIgnoreCase(keyName)) { - matched = true; - break; - } - } - if (matched) { - matchedKeyPairs.add(keyPair); - } - } - if (matchedKeyPairs.isEmpty()) { - throw new EC2ServiceException(ServerError.InternalError, "No matching keypairs found"); - } - }else{ - matchedKeyPairs = keyPairs; - } - - - // this should be reworked... converting from CloudStackKeyPairResponse to EC2SSHKeyPair is dumb - for (CloudStackKeyPair respKeyPair: matchedKeyPairs) { - EC2SSHKeyPair ec2KeyPair = new EC2SSHKeyPair(); - ec2KeyPair.setFingerprint(respKeyPair.getFingerprint()); - ec2KeyPair.setKeyName(respKeyPair.getName()); - ec2KeyPair.setPrivateKey(respKeyPair.getPrivatekey()); - keyPairsList.add(ec2KeyPair); - } - } - return filterSet.evaluate(keyPairsList); - } catch(Exception e) { - logger.error("EC2 DescribeKeyPairs - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } + // handlers + /** + * return password data from the instance + * + * @param instanceId + * @return + */ + public EC2PasswordData getPasswordData(String instanceId) { + try { + CloudStackPasswordData resp = getApi().getVMPassword(instanceId); + EC2PasswordData passwdData = new EC2PasswordData(); + if (resp != null) { + passwdData.setInstanceId(instanceId); + passwdData.setEncryptedPassword(resp.getEncryptedpassword()); + } + return passwdData; + } catch(Exception e) { + logger.error("EC2 GetPasswordData - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + /** + * Lists SSH KeyPairs on the systme + * + * @param request + * @return + */ + public EC2DescribeKeyPairsResponse describeKeyPairs( EC2DescribeKeyPairs request ) { + try { + EC2KeyPairFilterSet filterSet = request.getKeyFilterSet(); + String[] keyNames = request.getKeyNames(); + List keyPairs = getApi().listSSHKeyPairs(null, null, null); + List keyPairsList = new ArrayList(); - /** - * Delete SSHKeyPair - * - * @param request - * @return - */ - public boolean deleteKeyPair( EC2DeleteKeyPair request ) { - try { - CloudStackInfoResponse resp = getApi().deleteSSHKeyPair(request.getKeyName(), null, null); - if (resp == null) { - throw new Exception("Ivalid CloudStack API response"); - } + if (keyPairs != null) { + // Let's trim the list of keypairs to only the ones listed in keyNames + List matchedKeyPairs = new ArrayList(); + if (keyNames != null && keyNames.length > 0) { + for (CloudStackKeyPair keyPair : keyPairs) { + boolean matched = false; + for (String keyName : keyNames) { + if (keyPair.getName().equalsIgnoreCase(keyName)) { + matched = true; + break; + } + } + if (matched) { + matchedKeyPairs.add(keyPair); + } + } + if (matchedKeyPairs.isEmpty()) { + throw new EC2ServiceException(ServerError.InternalError, "No matching keypairs found"); + } + }else{ + matchedKeyPairs = keyPairs; + } - return resp.getSuccess(); - } catch(Exception e) { - logger.error("EC2 DeleteKeyPair - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } - /** - * Create SSHKeyPair - * - * @param request - * @return - */ - public EC2SSHKeyPair createKeyPair(EC2CreateKeyPair request) { - try { - CloudStackKeyPair resp = getApi().createSSHKeyPair(request.getKeyName(), null, null); - if (resp == null) { - throw new Exception("Ivalid CloudStack API response"); - } + // this should be reworked... converting from CloudStackKeyPairResponse to EC2SSHKeyPair is dumb + for (CloudStackKeyPair respKeyPair: matchedKeyPairs) { + EC2SSHKeyPair ec2KeyPair = new EC2SSHKeyPair(); + ec2KeyPair.setFingerprint(respKeyPair.getFingerprint()); + ec2KeyPair.setKeyName(respKeyPair.getName()); + ec2KeyPair.setPrivateKey(respKeyPair.getPrivatekey()); + keyPairsList.add(ec2KeyPair); + } + } + return filterSet.evaluate(keyPairsList); + } catch(Exception e) { + logger.error("EC2 DescribeKeyPairs - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } - EC2SSHKeyPair response = new EC2SSHKeyPair(); - response.setFingerprint(resp.getFingerprint()); - response.setKeyName(resp.getName()); - response.setPrivateKey(resp.getPrivatekey()); + /** + * Delete SSHKeyPair + * + * @param request + * @return + */ + public boolean deleteKeyPair( EC2DeleteKeyPair request ) { + try { + CloudStackInfoResponse resp = getApi().deleteSSHKeyPair(request.getKeyName(), null, null); + if (resp == null) { + throw new Exception("Ivalid CloudStack API response"); + } - return response; - } catch (Exception e) { - logger.error("EC2 CreateKeyPair - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } + return resp.getSuccess(); + } catch(Exception e) { + logger.error("EC2 DeleteKeyPair - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } - /** - * Import an existing SSH KeyPair - * - * @param request - * @return - */ - public EC2SSHKeyPair importKeyPair( EC2ImportKeyPair request ) { - try { - CloudStackKeyPair resp = getApi().registerSSHKeyPair(request.getKeyName(), request.getPublicKeyMaterial()); - if (resp == null) { - throw new Exception("Ivalid CloudStack API response"); - } + /** + * Create SSHKeyPair + * + * @param request + * @return + */ + public EC2SSHKeyPair createKeyPair(EC2CreateKeyPair request) { + try { + CloudStackKeyPair resp = getApi().createSSHKeyPair(request.getKeyName(), null, null); + if (resp == null) { + throw new Exception("Ivalid CloudStack API response"); + } - EC2SSHKeyPair response = new EC2SSHKeyPair(); - response.setFingerprint(resp.getFingerprint()); - response.setKeyName(resp.getName()); - response.setPrivateKey(resp.getPrivatekey()); + EC2SSHKeyPair response = new EC2SSHKeyPair(); + response.setFingerprint(resp.getFingerprint()); + response.setKeyName(resp.getName()); + response.setPrivateKey(resp.getPrivatekey()); - return response; - } catch (Exception e) { - logger.error("EC2 ImportKeyPair - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } + return response; + } catch (Exception e) { + logger.error("EC2 CreateKeyPair - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } - /** - * list ip addresses that have been allocated - * - * @param request - * @return - */ - public EC2DescribeAddressesResponse describeAddresses( EC2DescribeAddresses request ) { - try { - List addrList = getApi().listPublicIpAddresses(null, null, null, null, null, null, null, null, null); + /** + * Import an existing SSH KeyPair + * + * @param request + * @return + */ + public EC2SSHKeyPair importKeyPair( EC2ImportKeyPair request ) { + try { + CloudStackKeyPair resp = getApi().registerSSHKeyPair(request.getKeyName(), request.getPublicKeyMaterial()); + if (resp == null) { + throw new Exception("Ivalid CloudStack API response"); + } - EC2AddressFilterSet filterSet = request.getFilterSet(); - List addressList = new ArrayList(); - if (addrList != null && addrList.size() > 0) { - for (CloudStackIpAddress addr: addrList) { - // remember, if no filters are set, request.inPublicIpSet always returns true - if (request.inPublicIpSet(addr.getIpAddress())) { - EC2Address ec2Address = new EC2Address(); - ec2Address.setIpAddress(addr.getIpAddress()); - if (addr.getVirtualMachineId() != null) - ec2Address.setAssociatedInstanceId(addr.getVirtualMachineId().toString()); - addressList.add(ec2Address); - } - } - } + EC2SSHKeyPair response = new EC2SSHKeyPair(); + response.setFingerprint(resp.getFingerprint()); + response.setKeyName(resp.getName()); + response.setPrivateKey(resp.getPrivatekey()); - return filterSet.evaluate(addressList); - } catch(Exception e) { - logger.error("EC2 DescribeAddresses - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } + return response; + } catch (Exception e) { + logger.error("EC2 ImportKeyPair - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } - /** - * release an IP Address - * - * @param request - * @return - */ - public boolean releaseAddress(EC2ReleaseAddress request) { - try { - CloudStackIpAddress cloudIp = getApi().listPublicIpAddresses(null, null, null, null, null, request.getPublicIp(), null, null, null).get(0); - CloudStackInfoResponse resp = getApi().disassociateIpAddress(cloudIp.getId()); - if (resp != null) { - return resp.getSuccess(); - } - } catch(Exception e) { - logger.error("EC2 ReleaseAddress - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - return false; - } + /** + * list ip addresses that have been allocated + * + * @param request + * @return + */ + public EC2DescribeAddressesResponse describeAddresses( EC2DescribeAddresses request ) { + try { + List addrList = getApi().listPublicIpAddresses(null, null, null, null, null, null, null, null, null); - /** - * Associate an address with an instance - * - * @param request - * @return - */ - public boolean associateAddress( EC2AssociateAddress request ) { - try { - CloudStackIpAddress cloudIp = getApi().listPublicIpAddresses(null, null, null, null, null, request.getPublicIp(), null, null, null).get(0); - CloudStackUserVm cloudVm = getApi().listVirtualMachines(null, null, true, null, null, null, null, request.getInstanceId(), null, null, null, null, null, null, null, null, null).get(0); + EC2AddressFilterSet filterSet = request.getFilterSet(); + List addressList = new ArrayList(); + if (addrList != null && addrList.size() > 0) { + for (CloudStackIpAddress addr: addrList) { + // remember, if no filters are set, request.inPublicIpSet always returns true + if (request.inPublicIpSet(addr.getIpAddress())) { + EC2Address ec2Address = new EC2Address(); + ec2Address.setIpAddress(addr.getIpAddress()); + if (addr.getVirtualMachineId() != null) + ec2Address.setAssociatedInstanceId(addr.getVirtualMachineId().toString()); + addressList.add(ec2Address); + } + } + } - CloudStackInfoResponse resp = getApi().enableStaticNat(cloudIp.getId(), cloudVm.getId()); - if (resp != null) { - return resp.getSuccess(); - } - } catch(Exception e) { - logger.error( "EC2 AssociateAddress - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - return false; - } + return filterSet.evaluate(addressList); + } catch(Exception e) { + logger.error("EC2 DescribeAddresses - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } - /** - * Disassociate an address from an instance - * - * @param request - * @return - */ - public boolean disassociateAddress( EC2DisassociateAddress request ) { - try { - CloudStackIpAddress cloudIp = getApi().listPublicIpAddresses(null, null, null, null, null, request.getPublicIp(), null, null, null).get(0); - CloudStackInfoResponse resp = getApi().disableStaticNat(cloudIp.getId()); - if (resp != null) { - return resp.getSuccess(); - } - } catch(Exception e) { - logger.error( "EC2 DisassociateAddress - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - return false; - } + /** + * release an IP Address + * + * @param request + * @return + */ + public boolean releaseAddress(EC2ReleaseAddress request) { + try { + CloudStackIpAddress cloudIp = getApi().listPublicIpAddresses(null, null, null, null, null, request.getPublicIp(), null, null, null).get(0); + CloudStackInfoResponse resp = getApi().disassociateIpAddress(cloudIp.getId()); + if (resp != null) { + return resp.getSuccess(); + } + } catch(Exception e) { + logger.error("EC2 ReleaseAddress - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + return false; + } - /** - * Allocate an address - * - * @param request - * @return - */ - public EC2Address allocateAddress() - { - try { + /** + * Associate an address with an instance + * + * @param request + * @return + */ + public boolean associateAddress( EC2AssociateAddress request ) { + try { + CloudStackIpAddress cloudIp = getApi().listPublicIpAddresses(null, null, null, null, null, request.getPublicIp(), null, null, null).get(0); + CloudStackUserVm cloudVm = getApi().listVirtualMachines(null, null, true, null, null, null, null, request.getInstanceId(), null, null, null, null, null, null, null, null, null).get(0); + + CloudStackInfoResponse resp = getApi().enableStaticNat(cloudIp.getId(), cloudVm.getId()); + if (resp != null) { + return resp.getSuccess(); + } + } catch(Exception e) { + logger.error( "EC2 AssociateAddress - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + return false; + } + + /** + * Disassociate an address from an instance + * + * @param request + * @return + */ + public boolean disassociateAddress( EC2DisassociateAddress request ) { + try { + CloudStackIpAddress cloudIp = getApi().listPublicIpAddresses(null, null, null, null, null, request.getPublicIp(), null, null, null).get(0); + CloudStackInfoResponse resp = getApi().disableStaticNat(cloudIp.getId()); + if (resp != null) { + return resp.getSuccess(); + } + } catch(Exception e) { + logger.error( "EC2 DisassociateAddress - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + return false; + } + + /** + * Allocate an address + * + * @param request + * @return + */ + public EC2Address allocateAddress() + { + try { EC2Address ec2Address = new EC2Address(); // this gets our networkId CloudStackAccount caller = getCurrentAccount(); - + CloudStackZone zone = findZone(); CloudStackNetwork net = findNetwork(zone); // CloudStackIpAddress resp = getApi().associateIpAddress(null, null, null, "0036952d-48df-4422-9fd0-94b0885e18cb"); CloudStackIpAddress resp = getApi().associateIpAddress(zone.getId(), caller.getName(), caller.getDomainId(), net != null ? net.getId():null); - ec2Address.setAssociatedInstanceId(resp.getId()); - - if (resp.getIpAddress() == null) { - List addrList = getApi().listPublicIpAddresses(null, null, null, null, null, null, null, null, null); - if (addrList != null && addrList.size() > 0) { - for (CloudStackIpAddress addr: addrList) { - if (addr.getId().equalsIgnoreCase(resp.getId())) { - ec2Address.setIpAddress(addr.getIpAddress()); - } - } - } - } else { - ec2Address.setIpAddress(resp.getIpAddress()); - } + ec2Address.setAssociatedInstanceId(resp.getId()); - return ec2Address; - } catch(Exception e) { - logger.error( "EC2 AllocateAddress - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + if (resp.getIpAddress() == null) { + List addrList = getApi().listPublicIpAddresses(null, null, null, null, null, null, null, null, null); + if (addrList != null && addrList.size() > 0) { + for (CloudStackIpAddress addr: addrList) { + if (addr.getId().equalsIgnoreCase(resp.getId())) { + ec2Address.setIpAddress(addr.getIpAddress()); + } + } + } + } else { + ec2Address.setIpAddress(resp.getIpAddress()); + } - /** - * List of templates available. We only support the imageSet version of this call or when no search parameters are passed - * which results in asking for all templates. - * - * @param request - * @return - */ - public EC2DescribeImagesResponse describeImages(EC2DescribeImages request) - { - EC2DescribeImagesResponse images = new EC2DescribeImagesResponse(); + return ec2Address; + } catch(Exception e) { + logger.error( "EC2 AllocateAddress - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } - try { - String[] templateIds = request.getImageSet(); + /** + * List of templates available. We only support the imageSet version of this call or when no search parameters are passed + * which results in asking for all templates. + * + * @param request + * @return + */ + public EC2DescribeImagesResponse describeImages(EC2DescribeImages request) + { + EC2DescribeImagesResponse images = new EC2DescribeImagesResponse(); - if ( 0 == templateIds.length ) { - return listTemplates(null, images); - } - for (String s : templateIds) { - images = listTemplates(s, images); - } - return images; + try { + String[] templateIds = request.getImageSet(); - } catch( Exception e ) { - logger.error( "EC2 DescribeImages - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + if ( 0 == templateIds.length ) { + return listTemplates(null, images); + } + for (String s : templateIds) { + images = listTemplates(s, images); + } + return images; - /** - * Create a template - * Amazon API just gives us the instanceId to create the template from. - * But our createTemplate function requires the volumeId and osTypeId. - * So to get that we must make the following sequence of cloud API calls: - * 1) listVolumes&virtualMachineId= -- gets the volumeId - * 2) listVirtualMachinees&id= -- gets the templateId - * 3) listTemplates&id= -- gets the osTypeId - * - * If we have to start and stop the VM in question then this function is - * going to take a long time to complete. - * - * @param request - * @return - */ - public EC2CreateImageResponse createImage(EC2CreateImage request) - { - EC2CreateImageResponse response = null; - boolean needsRestart = false; - String volumeId = null; + } catch( Exception e ) { + logger.error( "EC2 DescribeImages - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } - try { - // [A] Creating a template from a VM volume should be from the ROOT volume - // Also for this to work the VM must be in a Stopped state so we 'reboot' it if its not - EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); - volumes = listVolumes( null, request.getInstanceId(), volumes, null ); - EC2Volume[] volSet = volumes.getVolumeSet(); - for (EC2Volume vol : volSet) { - if (vol.getType().equalsIgnoreCase( "ROOT" )) { - String vmState = vol.getVMState(); - if (vmState.equalsIgnoreCase( "running" ) || vmState.equalsIgnoreCase( "starting" )) { - needsRestart = true; - if (!stopVirtualMachine( request.getInstanceId() )) - throw new EC2ServiceException(ClientError.IncorrectState, "CreateImage - instance must be in a stopped state"); - } - volumeId = vol.getId(); - break; - } - } + /** + * Create a template + * Amazon API just gives us the instanceId to create the template from. + * But our createTemplate function requires the volumeId and osTypeId. + * So to get that we must make the following sequence of cloud API calls: + * 1) listVolumes&virtualMachineId= -- gets the volumeId + * 2) listVirtualMachinees&id= -- gets the templateId + * 3) listTemplates&id= -- gets the osTypeId + * + * If we have to start and stop the VM in question then this function is + * going to take a long time to complete. + * + * @param request + * @return + */ + public EC2CreateImageResponse createImage(EC2CreateImage request) + { + EC2CreateImageResponse response = null; + boolean needsRestart = false; + String volumeId = null; - // [B] The parameters must be in sorted order for proper signature generation - EC2DescribeInstancesResponse instances = new EC2DescribeInstancesResponse(); - instances = lookupInstances( request.getInstanceId(), instances, null ); - EC2Instance[] instanceSet = instances.getInstanceSet(); - String templateId = instanceSet[0].getTemplateId(); + try { + // [A] Creating a template from a VM volume should be from the ROOT volume + // Also for this to work the VM must be in a Stopped state so we 'reboot' it if its not + EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); + volumes = listVolumes( null, request.getInstanceId(), volumes, null ); + EC2Volume[] volSet = volumes.getVolumeSet(); + for (EC2Volume vol : volSet) { + if (vol.getType().equalsIgnoreCase( "ROOT" )) { + String vmState = vol.getVMState(); + if (vmState.equalsIgnoreCase( "running" ) || vmState.equalsIgnoreCase( "starting" )) { + needsRestart = true; + if (!stopVirtualMachine( request.getInstanceId() )) + throw new EC2ServiceException(ClientError.IncorrectState, "CreateImage - instance must be in a stopped state"); + } + volumeId = vol.getId(); + break; + } + } - EC2DescribeImagesResponse images = new EC2DescribeImagesResponse(); - images = listTemplates( templateId, images ); - EC2Image[] imageSet = images.getImageSet(); - String osTypeId = imageSet[0].getOsTypeId(); - - CloudStackTemplate resp = getApi().createTemplate((request.getDescription() == null ? "" : request.getDescription()), request.getName(), - osTypeId, null, null, null, null, null, null, volumeId); - if (resp == null || resp.getId() == null) { - throw new EC2ServiceException(ServerError.InternalError, "An upexpected error occurred."); - } - - //if template was created succesfully, create the new image response - response = new EC2CreateImageResponse(); - response.setId(resp.getId()); + // [B] The parameters must be in sorted order for proper signature generation + EC2DescribeInstancesResponse instances = new EC2DescribeInstancesResponse(); + instances = lookupInstances( request.getInstanceId(), instances, null ); + EC2Instance[] instanceSet = instances.getInstanceSet(); + String templateId = instanceSet[0].getTemplateId(); - // [C] If we stopped the virtual machine now we need to restart it - if (needsRestart) { - if (!startVirtualMachine( request.getInstanceId() )) - throw new EC2ServiceException(ServerError.InternalError, - "CreateImage - restarting instance " + request.getInstanceId() + " failed"); - } - return response; + EC2DescribeImagesResponse images = new EC2DescribeImagesResponse(); + images = listTemplates( templateId, images ); + EC2Image[] imageSet = images.getImageSet(); + String osTypeId = imageSet[0].getOsTypeId(); - } catch( Exception e ) { - logger.error( "EC2 CreateImage - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + CloudStackTemplate resp = getApi().createTemplate((request.getDescription() == null ? "" : request.getDescription()), request.getName(), + osTypeId, null, null, null, null, null, null, volumeId); + if (resp == null || resp.getId() == null) { + throw new EC2ServiceException(ServerError.InternalError, "An upexpected error occurred."); + } - /** - * Register a template - * - * @param request - * @return - */ - public EC2CreateImageResponse registerImage(EC2RegisterImage request) - { - try { - CloudStackAccount caller = getCurrentAccount(); + //if template was created succesfully, create the new image response + response = new EC2CreateImageResponse(); + response.setId(resp.getId()); + + // [C] If we stopped the virtual machine now we need to restart it + if (needsRestart) { + if (!startVirtualMachine( request.getInstanceId() )) + throw new EC2ServiceException(ServerError.InternalError, + "CreateImage - restarting instance " + request.getInstanceId() + " failed"); + } + return response; + + } catch( Exception e ) { + logger.error( "EC2 CreateImage - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Register a template + * + * @param request + * @return + */ + public EC2CreateImageResponse registerImage(EC2RegisterImage request) + { + try { + CloudStackAccount caller = getCurrentAccount(); if (null == request.getName()) throw new EC2ServiceException(ClientError.Unsupported, "Missing parameter - name"); - List templates = getApi().registerTemplate((request.getDescription() == null ? request.getName() : request.getDescription()), - request.getFormat(), request.getHypervisor(), request.getName(), toOSTypeId(request.getOsTypeName()), request.getLocation(), - toZoneId(request.getZoneName(), null), null, null, null, null, null, null, null, null, null); - if (templates != null) { - // technically we will only ever register a single template... - for (CloudStackTemplate template : templates) { - if (template != null && template.getId() != null) { - EC2CreateImageResponse image = new EC2CreateImageResponse(); - image.setId(template.getId().toString()); - return image; - } - } - } - return null; - } catch( Exception e ) { - logger.error( "EC2 RegisterImage - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + List templates = getApi().registerTemplate((request.getDescription() == null ? request.getName() : request.getDescription()), + request.getFormat(), request.getHypervisor(), request.getName(), toOSTypeId(request.getOsTypeName()), request.getLocation(), + toZoneId(request.getZoneName(), null), null, null, null, null, null, null, null, null, null); + if (templates != null) { + // technically we will only ever register a single template... + for (CloudStackTemplate template : templates) { + if (template != null && template.getId() != null) { + EC2CreateImageResponse image = new EC2CreateImageResponse(); + image.setId(template.getId().toString()); + return image; + } + } + } + return null; + } catch( Exception e ) { + logger.error( "EC2 RegisterImage - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } - /** - * Deregister a template(image) - * Our implementation is different from Amazon in that we do delete the template - * when we deregister it. The cloud API has not deregister call. - * - * @param image - * @return - */ - public boolean deregisterImage( EC2Image image ) - { - try { - CloudStackInfoResponse resp = getApi().deleteTemplate(image.getId(), null); - return resp.getSuccess(); - } catch( Exception e ) { - logger.error( "EC2 DeregisterImage - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + /** + * Deregister a template(image) + * Our implementation is different from Amazon in that we do delete the template + * when we deregister it. The cloud API has not deregister call. + * + * @param image + * @return + */ + public boolean deregisterImage( EC2Image image ) + { + try { + CloudStackInfoResponse resp = getApi().deleteTemplate(image.getId(), null); + return resp.getSuccess(); + } catch( Exception e ) { + logger.error( "EC2 DeregisterImage - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } - /** - * list instances - * - * @param request - * @return - */ - public EC2DescribeInstancesResponse describeInstances(EC2DescribeInstances request ) { - try { + /** + * list instances + * + * @param request + * @return + */ + public EC2DescribeInstancesResponse describeInstances(EC2DescribeInstances request ) { + try { EC2TagKeyValue[] tagKeyValueSet = request.getResourceTagSet(); return listVirtualMachines( request.getInstancesSet(), request.getFilterSet(), getResourceTags(tagKeyValueSet)); - } catch( Exception e ) { - logger.error( "EC2 DescribeInstances - " ,e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + } catch( Exception e ) { + logger.error( "EC2 DescribeInstances - " ,e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } - /** - * list Zones - * - * @param request - * @return - */ - public EC2DescribeAvailabilityZonesResponse handleRequest(EC2DescribeAvailabilityZones request) { - try { - EC2DescribeAvailabilityZonesResponse availableZones = listZones(request.getZoneSet(), null); + /** + * list Zones + * + * @param request + * @return + */ + public EC2DescribeAvailabilityZonesResponse handleRequest(EC2DescribeAvailabilityZones request) { + try { + EC2DescribeAvailabilityZonesResponse availableZones = listZones(request.getZoneSet(), null); EC2AvailabilityZonesFilterSet azfs = request.getFilterSet(); if ( null == azfs ) return availableZones; @@ -1155,185 +1146,185 @@ public class EC2Engine { return new EC2DescribeAvailabilityZonesResponse(); return listZones(matchedAvailableZones.toArray(new String[0]), null); } - } catch( EC2ServiceException error ) { - logger.error( "EC2 DescribeAvailabilityZones - ", error); - throw error; + } catch( EC2ServiceException error ) { + logger.error( "EC2 DescribeAvailabilityZones - ", error); + throw error; - } catch( Exception e ) { - logger.error( "EC2 DescribeAvailabilityZones - " ,e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + } catch( Exception e ) { + logger.error( "EC2 DescribeAvailabilityZones - " ,e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } - /** - * list volumes - * - * @param request - * @return - */ - public EC2DescribeVolumesResponse handleRequest( EC2DescribeVolumes request ) { - EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); - EC2VolumeFilterSet vfs = request.getFilterSet(); + /** + * list volumes + * + * @param request + * @return + */ + public EC2DescribeVolumesResponse handleRequest( EC2DescribeVolumes request ) { + EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); + EC2VolumeFilterSet vfs = request.getFilterSet(); EC2TagKeyValue[] tagKeyValueSet = request.getResourceTagSet(); - try { - String[] volumeIds = request.getVolumeSet(); - if ( 0 == volumeIds.length ){ + try { + String[] volumeIds = request.getVolumeSet(); + if ( 0 == volumeIds.length ){ volumes = listVolumes( null, null, volumes, getResourceTags(tagKeyValueSet) ); - } else { - for (String s : volumeIds) + } else { + for (String s : volumeIds) volumes = listVolumes(s, null, volumes, getResourceTags(tagKeyValueSet) ); - } + } - if ( null == vfs ) - return volumes; - else return vfs.evaluate( volumes ); - } catch( Exception e ) { - logger.error( "EC2 DescribeVolumes - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + if ( null == vfs ) + return volumes; + else return vfs.evaluate( volumes ); + } catch( Exception e ) { + logger.error( "EC2 DescribeVolumes - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } - /** - * Attach a volume to an instance - * - * @param request - * @return - */ - public EC2Volume attachVolume( EC2Volume request ) { - try { - request.setDeviceId(mapDeviceToCloudDeviceId(request.getDevice())); - EC2Volume resp = new EC2Volume(); - - CloudStackVolume vol = getApi().attachVolume(request.getId(), request.getInstanceId(), request.getDeviceId()); - if(vol != null) { - resp.setAttached(vol.getAttached()); - resp.setCreated(vol.getCreated()); - resp.setDevice(request.getDevice()); - resp.setDeviceId(vol.getDeviceId()); - resp.setHypervisor(vol.getHypervisor()); - resp.setId(vol.getId()); - resp.setInstanceId(vol.getVirtualMachineId()); - resp.setSize(vol.getSize()); - resp.setSnapshotId(vol.getSnapshotId()); - resp.setState(vol.getState()); - resp.setType(vol.getVolumeType()); - resp.setVMState(vol.getVirtualMachineState()); - resp.setZoneName(vol.getZoneName()); - return resp; - } - throw new EC2ServiceException( ServerError.InternalError, "An unexpected error occurred." ); - } catch( Exception e ) { - logger.error( "EC2 AttachVolume 2 - ", e); - throw new EC2ServiceException( ServerError.InternalError, e.getMessage() != null ? e.getMessage() : e.toString()); - } - } + /** + * Attach a volume to an instance + * + * @param request + * @return + */ + public EC2Volume attachVolume( EC2Volume request ) { + try { + request.setDeviceId(mapDeviceToCloudDeviceId(request.getDevice())); + EC2Volume resp = new EC2Volume(); - /** - * Detach a volume from an instance - * - * @param request - * @return - */ - public EC2Volume detachVolume(EC2Volume request) { - try { - CloudStackVolume vol = getApi().detachVolume(null, request.getId(), null); - EC2Volume resp = new EC2Volume(); - - if(vol != null) { - resp.setAttached(vol.getAttached()); - resp.setCreated(vol.getCreated()); - resp.setDevice(request.getDevice()); - resp.setDeviceId(vol.getDeviceId()); - resp.setHypervisor(vol.getHypervisor()); - resp.setId(vol.getId()); - resp.setInstanceId(vol.getVirtualMachineId()); - resp.setSize(vol.getSize()); - resp.setSnapshotId(vol.getSnapshotId()); - resp.setState(vol.getState()); - resp.setType(vol.getVolumeType()); - resp.setVMState(vol.getVirtualMachineState()); - resp.setZoneName(vol.getZoneName()); - return resp; - } + CloudStackVolume vol = getApi().attachVolume(request.getId(), request.getInstanceId(), request.getDeviceId()); + if(vol != null) { + resp.setAttached(vol.getAttached()); + resp.setCreated(vol.getCreated()); + resp.setDevice(request.getDevice()); + resp.setDeviceId(vol.getDeviceId()); + resp.setHypervisor(vol.getHypervisor()); + resp.setId(vol.getId()); + resp.setInstanceId(vol.getVirtualMachineId()); + resp.setSize(vol.getSize()); + resp.setSnapshotId(vol.getSnapshotId()); + resp.setState(vol.getState()); + resp.setType(vol.getVolumeType()); + resp.setVMState(vol.getVirtualMachineState()); + resp.setZoneName(vol.getZoneName()); + return resp; + } + throw new EC2ServiceException( ServerError.InternalError, "An unexpected error occurred." ); + } catch( Exception e ) { + logger.error( "EC2 AttachVolume 2 - ", e); + throw new EC2ServiceException( ServerError.InternalError, e.getMessage() != null ? e.getMessage() : e.toString()); + } + } - throw new EC2ServiceException( ServerError.InternalError, "An unexpected error occurred." ); - } catch( Exception e ) { - logger.error( "EC2 DetachVolume - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + /** + * Detach a volume from an instance + * + * @param request + * @return + */ + public EC2Volume detachVolume(EC2Volume request) { + try { + CloudStackVolume vol = getApi().detachVolume(null, request.getId(), null); + EC2Volume resp = new EC2Volume(); - /** - * Create a volume - * - * @param request - * @return - */ - public EC2Volume createVolume( EC2CreateVolume request ) { - try { - - CloudStackAccount caller = getCurrentAccount(); - // -> put either snapshotid or diskofferingid on the request - String snapshotId = request.getSnapshotId(); - Long size = request.getSize(); - String diskOfferingId = null; + if(vol != null) { + resp.setAttached(vol.getAttached()); + resp.setCreated(vol.getCreated()); + resp.setDevice(request.getDevice()); + resp.setDeviceId(vol.getDeviceId()); + resp.setHypervisor(vol.getHypervisor()); + resp.setId(vol.getId()); + resp.setInstanceId(vol.getVirtualMachineId()); + resp.setSize(vol.getSize()); + resp.setSnapshotId(vol.getSnapshotId()); + resp.setState(vol.getState()); + resp.setType(vol.getVolumeType()); + resp.setVMState(vol.getVirtualMachineState()); + resp.setZoneName(vol.getZoneName()); + return resp; + } - if (snapshotId == null) { - List disks = getApi().listDiskOfferings(null, null, null, null); - for (CloudStackDiskOffering offer : disks) { - if (offer.isCustomized()) { - diskOfferingId = offer.getId(); - } - } - if (diskOfferingId == null) throw new EC2ServiceException(ServerError.InternalError, "No Customize Disk Offering Found"); - } + throw new EC2ServiceException( ServerError.InternalError, "An unexpected error occurred." ); + } catch( Exception e ) { + logger.error( "EC2 DetachVolume - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Create a volume + * + * @param request + * @return + */ + public EC2Volume createVolume( EC2CreateVolume request ) { + try { + + CloudStackAccount caller = getCurrentAccount(); + // -> put either snapshotid or diskofferingid on the request + String snapshotId = request.getSnapshotId(); + Long size = request.getSize(); + String diskOfferingId = null; + + if (snapshotId == null) { + List disks = getApi().listDiskOfferings(null, null, null, null); + for (CloudStackDiskOffering offer : disks) { + if (offer.isCustomized()) { + diskOfferingId = offer.getId(); + } + } + if (diskOfferingId == null) throw new EC2ServiceException(ServerError.InternalError, "No Customize Disk Offering Found"); + } // // -> no volume name is given in the Amazon request but is required in the cloud API - CloudStackVolume vol = getApi().createVolume(UUID.randomUUID().toString(), null, diskOfferingId, null, size, snapshotId, toZoneId(request.getZoneName(), null)); - if (vol != null) { - EC2Volume resp = new EC2Volume(); - resp.setAttached(vol.getAttached()); - resp.setCreated(vol.getCreated()); + CloudStackVolume vol = getApi().createVolume(UUID.randomUUID().toString(), null, diskOfferingId, null, size, snapshotId, toZoneId(request.getZoneName(), null)); + if (vol != null) { + EC2Volume resp = new EC2Volume(); + resp.setAttached(vol.getAttached()); + resp.setCreated(vol.getCreated()); // resp.setDevice(); - resp.setDeviceId(vol.getDeviceId()); - resp.setHypervisor(vol.getHypervisor()); - resp.setId(vol.getId()); - resp.setInstanceId(vol.getVirtualMachineId()); - resp.setSize(vol.getSize()); - resp.setSnapshotId(vol.getSnapshotId()); - resp.setState(vol.getState()); - resp.setType(vol.getVolumeType()); - resp.setVMState(vol.getVirtualMachineState()); - resp.setZoneName(vol.getZoneName()); - return resp; - } - return null; - } catch( Exception e ) { - logger.error( "EC2 CreateVolume - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + resp.setDeviceId(vol.getDeviceId()); + resp.setHypervisor(vol.getHypervisor()); + resp.setId(vol.getId()); + resp.setInstanceId(vol.getVirtualMachineId()); + resp.setSize(vol.getSize()); + resp.setSnapshotId(vol.getSnapshotId()); + resp.setState(vol.getState()); + resp.setType(vol.getVolumeType()); + resp.setVMState(vol.getVirtualMachineState()); + resp.setZoneName(vol.getZoneName()); + return resp; + } + return null; + } catch( Exception e ) { + logger.error( "EC2 CreateVolume - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } - /** - * Delete a volume - * - * @param request - * @return - */ - public EC2Volume deleteVolume( EC2Volume request ) { - try { - CloudStackInfoResponse resp = getApi().deleteVolume(request.getId()); - if(resp != null) { - request.setState("deleted"); - return request; - } + /** + * Delete a volume + * + * @param request + * @return + */ + public EC2Volume deleteVolume( EC2Volume request ) { + try { + CloudStackInfoResponse resp = getApi().deleteVolume(request.getId()); + if(resp != null) { + request.setState("deleted"); + return request; + } - throw new EC2ServiceException(ServerError.InternalError, "An unexpected error occurred."); - } catch( Exception e ) { - logger.error( "EC2 DeleteVolume 2 - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + throw new EC2ServiceException(ServerError.InternalError, "An unexpected error occurred."); + } catch( Exception e ) { + logger.error( "EC2 DeleteVolume 2 - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } /** * Create/Delete tags @@ -1395,7 +1386,7 @@ public class EC2Engine { if (resourceTag.getValue() != null) tag.setValue(resourceTag.getValue()); tagResponse.addTags(tag); - } + } } EC2TagsFilterSet tfs = request.getFilterSet(); @@ -1409,116 +1400,116 @@ public class EC2Engine { } } - /** - * Reboot an instance or instances - * - * @param request - * @return - */ - public boolean rebootInstances(EC2RebootInstances request) - { - EC2Instance[] vms = null; + /** + * Reboot an instance or instances + * + * @param request + * @return + */ + public boolean rebootInstances(EC2RebootInstances request) + { + EC2Instance[] vms = null; - // -> reboot is not allowed on destroyed (i.e., terminated) instances - try { - String[] instanceSet = request.getInstancesSet(); - EC2DescribeInstancesResponse previousState = listVirtualMachines( instanceSet, null, null ); - vms = previousState.getInstanceSet(); + // -> reboot is not allowed on destroyed (i.e., terminated) instances + try { + String[] instanceSet = request.getInstancesSet(); + EC2DescribeInstancesResponse previousState = listVirtualMachines( instanceSet, null, null ); + vms = previousState.getInstanceSet(); - // -> send reboot requests for each found VM - for (EC2Instance vm : vms) { - if (vm.getState().equalsIgnoreCase( "Destroyed" )) continue; - - CloudStackUserVm resp = getApi().rebootVirtualMachine(vm.getId()); - if (logger.isDebugEnabled()) - logger.debug("Rebooting VM " + resp.getId() + " job " + resp.getJobId()); - } + // -> send reboot requests for each found VM + for (EC2Instance vm : vms) { + if (vm.getState().equalsIgnoreCase( "Destroyed" )) continue; - // -> if some specified VMs where not found we have to tell the caller - if (instanceSet.length != vms.length) - throw new EC2ServiceException(ClientError.InvalidAMIID_NotFound, "One or more instanceIds do not exist, other instances rebooted."); + CloudStackUserVm resp = getApi().rebootVirtualMachine(vm.getId()); + if (logger.isDebugEnabled()) + logger.debug("Rebooting VM " + resp.getId() + " job " + resp.getJobId()); + } - return true; - } catch( Exception e ) { - logger.error( "EC2 RebootInstances - ", e ); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + // -> if some specified VMs where not found we have to tell the caller + if (instanceSet.length != vms.length) + throw new EC2ServiceException(ClientError.InvalidAMIID_NotFound, "One or more instanceIds do not exist, other instances rebooted."); - /** - * Using a template (AMI), launch n instances - * - * @param request - * @return - */ - public EC2RunInstancesResponse runInstances(EC2RunInstances request) { - EC2RunInstancesResponse instances = new EC2RunInstancesResponse(); - int createInstances = 0; - int canCreateInstances = -1; - int countCreated = 0; + return true; + } catch( Exception e ) { + logger.error( "EC2 RebootInstances - ", e ); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } - try { - CloudStackAccount caller = getCurrentAccount(); - - // ugly... - canCreateInstances = calculateAllowedInstances(); - if (-1 == canCreateInstances) canCreateInstances = request.getMaxCount(); + /** + * Using a template (AMI), launch n instances + * + * @param request + * @return + */ + public EC2RunInstancesResponse runInstances(EC2RunInstances request) { + EC2RunInstancesResponse instances = new EC2RunInstancesResponse(); + int createInstances = 0; + int canCreateInstances = -1; + int countCreated = 0; - if (canCreateInstances < request.getMinCount()) { - logger.info( "EC2 RunInstances - min count too big (" + request.getMinCount() + "), " + canCreateInstances + " left to allocate"); - throw new EC2ServiceException(ClientError.InstanceLimitExceeded ,"Only " + canCreateInstances + " instance(s) left to allocate"); - } + try { + CloudStackAccount caller = getCurrentAccount(); - if ( canCreateInstances < request.getMaxCount()) - createInstances = request.getMinCount(); - else - createInstances = request.getMaxCount(); + // ugly... + canCreateInstances = calculateAllowedInstances(); + if (-1 == canCreateInstances) canCreateInstances = request.getMaxCount(); - //find CS service Offering ID - String instanceType = "m1.small"; - if(request.getInstanceType() != null){ - instanceType = request.getInstanceType(); - } - CloudStackServiceOfferingVO svcOffering = getCSServiceOfferingId(instanceType); - if(svcOffering == null){ - logger.info("No ServiceOffering found to be defined by name, please contact the administrator "+instanceType ); - throw new EC2ServiceException(ClientError.Unsupported, "instanceType: [" + instanceType + "] not found!"); - } - - // zone stuff - String zoneId = toZoneId(request.getZoneName(), null); - - List zones = getApi().listZones(null, null, zoneId, null); - if (zones == null || zones.size() == 0) { - logger.info("EC2 RunInstances - zone [" + request.getZoneName() + "] not found!"); - throw new EC2ServiceException(ClientError.InvalidZone_NotFound, "ZoneId [" + request.getZoneName() + "] not found!"); - } - // we choose first zone? - CloudStackZone zone = zones.get(0); + if (canCreateInstances < request.getMinCount()) { + logger.info( "EC2 RunInstances - min count too big (" + request.getMinCount() + "), " + canCreateInstances + " left to allocate"); + throw new EC2ServiceException(ClientError.InstanceLimitExceeded ,"Only " + canCreateInstances + " instance(s) left to allocate"); + } - // network - CloudStackNetwork network = findNetwork(zone); + if ( canCreateInstances < request.getMaxCount()) + createInstances = request.getMinCount(); + else + createInstances = request.getMaxCount(); - // now actually deploy the vms - for( int i=0; i < createInstances; i++ ) { - CloudStackUserVm resp = getApi().deployVirtualMachine(svcOffering.getId(), - request.getTemplateId(), zoneId, null, null, null, null, - null, null, null, request.getKeyName(), null, (network != null ? network.getId() : null), - null, constructList(request.getGroupSet()), request.getSize().longValue(), request.getUserData()); - EC2Instance vm = new EC2Instance(); - vm.setId(resp.getId().toString()); - vm.setName(resp.getName()); - vm.setZoneName(resp.getZoneName()); - vm.setTemplateId(resp.getTemplateId().toString()); - if (resp.getSecurityGroupList() != null && resp.getSecurityGroupList().size() > 0) { - // TODO, we have a list of security groups, just return the first one? + //find CS service Offering ID + String instanceType = "m1.small"; + if(request.getInstanceType() != null){ + instanceType = request.getInstanceType(); + } + CloudStackServiceOfferingVO svcOffering = getCSServiceOfferingId(instanceType); + if(svcOffering == null){ + logger.info("No ServiceOffering found to be defined by name, please contact the administrator "+instanceType ); + throw new EC2ServiceException(ClientError.Unsupported, "instanceType: [" + instanceType + "] not found!"); + } + + // zone stuff + String zoneId = toZoneId(request.getZoneName(), null); + + List zones = getApi().listZones(null, null, zoneId, null); + if (zones == null || zones.size() == 0) { + logger.info("EC2 RunInstances - zone [" + request.getZoneName() + "] not found!"); + throw new EC2ServiceException(ClientError.InvalidZone_NotFound, "ZoneId [" + request.getZoneName() + "] not found!"); + } + // we choose first zone? + CloudStackZone zone = zones.get(0); + + // network + CloudStackNetwork network = findNetwork(zone); + + // now actually deploy the vms + for( int i=0; i < createInstances; i++ ) { + CloudStackUserVm resp = getApi().deployVirtualMachine(svcOffering.getId(), + request.getTemplateId(), zoneId, null, null, null, null, + null, null, null, request.getKeyName(), null, (network != null ? network.getId() : null), + null, constructList(request.getGroupSet()), request.getSize().longValue(), request.getUserData()); + EC2Instance vm = new EC2Instance(); + vm.setId(resp.getId().toString()); + vm.setName(resp.getName()); + vm.setZoneName(resp.getZoneName()); + vm.setTemplateId(resp.getTemplateId().toString()); + if (resp.getSecurityGroupList() != null && resp.getSecurityGroupList().size() > 0) { + // TODO, we have a list of security groups, just return the first one? List securityGroupList = resp.getSecurityGroupList(); for (CloudStackSecurityGroup securityGroup : securityGroupList) { vm.addGroupName(securityGroup.getName()); } } - vm.setState(resp.getState()); - vm.setCreated(resp.getCreated()); + vm.setState(resp.getState()); + vm.setCreated(resp.getCreated()); List nicList = resp.getNics(); for (CloudStackNic nic : nicList) { if (nic.getIsDefault()) { @@ -1527,215 +1518,215 @@ public class EC2Engine { } } vm.setIpAddress(resp.getIpAddress()); - vm.setAccountName(resp.getAccountName()); - vm.setDomainId(resp.getDomainId()); - vm.setHypervisor(resp.getHypervisor()); - vm.setServiceOffering( svcOffering.getName()); + vm.setAccountName(resp.getAccountName()); + vm.setDomainId(resp.getDomainId()); + vm.setHypervisor(resp.getHypervisor()); + vm.setServiceOffering( svcOffering.getName()); vm.setKeyPairName(resp.getKeyPairName()); - instances.addInstance(vm); - countCreated++; - } + instances.addInstance(vm); + countCreated++; + } - if (0 == countCreated) { - // TODO, we actually need to destroy left-over VMs when the exception is thrown - throw new EC2ServiceException(ServerError.InsufficientInstanceCapacity, "Insufficient Instance Capacity" ); - } + if (0 == countCreated) { + // TODO, we actually need to destroy left-over VMs when the exception is thrown + throw new EC2ServiceException(ServerError.InsufficientInstanceCapacity, "Insufficient Instance Capacity" ); + } - return instances; - } catch( Exception e ) { - logger.error( "EC2 RunInstances - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + return instances; + } catch( Exception e ) { + logger.error( "EC2 RunInstances - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } - /** - * Start an instance or instances - * - * @param request - * @return - */ - public EC2StartInstancesResponse startInstances(EC2StartInstances request) { - EC2StartInstancesResponse instances = new EC2StartInstancesResponse(); - EC2Instance[] vms = null; + /** + * Start an instance or instances + * + * @param request + * @return + */ + public EC2StartInstancesResponse startInstances(EC2StartInstances request) { + EC2StartInstancesResponse instances = new EC2StartInstancesResponse(); + EC2Instance[] vms = null; - // -> first determine the current state of each VM (becomes it previous state) - try { - EC2DescribeInstancesResponse previousState = listVirtualMachines( request.getInstancesSet(), null, null ); - vms = previousState.getInstanceSet(); + // -> first determine the current state of each VM (becomes it previous state) + try { + EC2DescribeInstancesResponse previousState = listVirtualMachines( request.getInstancesSet(), null, null ); + vms = previousState.getInstanceSet(); - // -> send start requests for each item - for (EC2Instance vm : vms) { - vm.setPreviousState(vm.getState()); + // -> send start requests for each item + for (EC2Instance vm : vms) { + vm.setPreviousState(vm.getState()); - // -> if its already running then we don't care - if (vm.getState().equalsIgnoreCase( "Running" ) || vm.getState().equalsIgnoreCase( "Destroyed" )) { - instances.addInstance(vm); - continue; - } + // -> if its already running then we don't care + if (vm.getState().equalsIgnoreCase( "Running" ) || vm.getState().equalsIgnoreCase( "Destroyed" )) { + instances.addInstance(vm); + continue; + } - CloudStackUserVm resp = getApi().startVirtualMachine(vm.getId()); - if(resp != null){ - vm.setState(resp.getState()); - if(logger.isDebugEnabled()) - logger.debug("Starting VM " + vm.getId() + " job " + resp.getJobId()); - } - instances.addInstance(vm); - } - return instances; - } catch( Exception e ) { - logger.error( "EC2 StartInstances - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + CloudStackUserVm resp = getApi().startVirtualMachine(vm.getId()); + if(resp != null){ + vm.setState(resp.getState()); + if(logger.isDebugEnabled()) + logger.debug("Starting VM " + vm.getId() + " job " + resp.getJobId()); + } + instances.addInstance(vm); + } + return instances; + } catch( Exception e ) { + logger.error( "EC2 StartInstances - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } - /** - * Stop an instance or instances - * - * @param request - * @return - */ - public EC2StopInstancesResponse stopInstances(EC2StopInstances request) { - EC2StopInstancesResponse instances = new EC2StopInstancesResponse(); - EC2Instance[] virtualMachines = null; + /** + * Stop an instance or instances + * + * @param request + * @return + */ + public EC2StopInstancesResponse stopInstances(EC2StopInstances request) { + EC2StopInstancesResponse instances = new EC2StopInstancesResponse(); + EC2Instance[] virtualMachines = null; - // -> first determine the current state of each VM (becomes it previous state) - try { - String[] instanceSet = request.getInstancesSet(); + // -> first determine the current state of each VM (becomes it previous state) + try { + String[] instanceSet = request.getInstancesSet(); - EC2DescribeInstancesResponse previousState = listVirtualMachines( instanceSet, null, null ); - virtualMachines = previousState.getInstanceSet(); + EC2DescribeInstancesResponse previousState = listVirtualMachines( instanceSet, null, null ); + virtualMachines = previousState.getInstanceSet(); - // -> send stop requests for each item - for (EC2Instance vm : virtualMachines) { - vm.setPreviousState( vm.getState()); - CloudStackUserVm resp = null; - if (request.getDestroyInstances()) { - if (vm.getState().equalsIgnoreCase( "Destroyed" )) { - instances.addInstance(vm); - continue; - } - resp = getApi().destroyVirtualMachine(vm.getId()); - if(logger.isDebugEnabled()) - logger.debug("Destroying VM " + vm.getId() + " job " + resp.getJobId()); - } else { - if (vm.getState().equalsIgnoreCase("Stopped") || vm.getState().equalsIgnoreCase("Destroyed")) { - instances.addInstance(vm); - continue; - } - resp = getApi().stopVirtualMachine(vm.getId(), false); - if(logger.isDebugEnabled()) - logger.debug("Stopping VM " + vm.getId() + " job " + resp.getJobId()); - } - if (resp != null) { - vm.setState(resp.getState()); - instances.addInstance(vm); - } - } - return instances; - } catch( Exception e ) { - logger.error( "EC2 StopInstances - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() + ", might already be destroyed" : "An unexpected error occurred."); - } - } + // -> send stop requests for each item + for (EC2Instance vm : virtualMachines) { + vm.setPreviousState( vm.getState()); + CloudStackUserVm resp = null; + if (request.getDestroyInstances()) { + if (vm.getState().equalsIgnoreCase( "Destroyed" )) { + instances.addInstance(vm); + continue; + } + resp = getApi().destroyVirtualMachine(vm.getId()); + if(logger.isDebugEnabled()) + logger.debug("Destroying VM " + vm.getId() + " job " + resp.getJobId()); + } else { + if (vm.getState().equalsIgnoreCase("Stopped") || vm.getState().equalsIgnoreCase("Destroyed")) { + instances.addInstance(vm); + continue; + } + resp = getApi().stopVirtualMachine(vm.getId(), false); + if(logger.isDebugEnabled()) + logger.debug("Stopping VM " + vm.getId() + " job " + resp.getJobId()); + } + if (resp != null) { + vm.setState(resp.getState()); + instances.addInstance(vm); + } + } + return instances; + } catch( Exception e ) { + logger.error( "EC2 StopInstances - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() + ", might already be destroyed" : "An unexpected error occurred."); + } + } - /** - * RunInstances includes a min and max count of requested instances to create. - * We have to be able to create the min number for the user or none at all. So - * here we determine what the user has left to create. - * - * @return -1 means no limit exists, other positive numbers give max number left that - * the user can create. - */ - private int calculateAllowedInstances() throws Exception { - int maxAllowed = -1; - - CloudStackAccount ourAccount = getCurrentAccount(); - - if (ourAccount == null) { - // This should never happen, but - // we will return -99999 if this happens... - return -99999; - } - - // if accountType is Admin == 1, then let's return -1 - if (ourAccount.getAccountType() == 1) return -1; - - // -> get the user limits on instances - // "0" represents instances: - // http://download.cloud.com/releases/2.2.0/api_2.2.8/user/listResourceLimits.html - List limits = getApi().listResourceLimits(null, null, null, null, "0"); - if (limits != null && limits.size() > 0) { - maxAllowed = (int)limits.get(0).getMax().longValue(); - if (maxAllowed == -1) - return -1; // no limit + /** + * RunInstances includes a min and max count of requested instances to create. + * We have to be able to create the min number for the user or none at all. So + * here we determine what the user has left to create. + * + * @return -1 means no limit exists, other positive numbers give max number left that + * the user can create. + */ + private int calculateAllowedInstances() throws Exception { + int maxAllowed = -1; - EC2DescribeInstancesResponse existingVMS = listVirtualMachines( null, null, null ); - EC2Instance[] vmsList = existingVMS.getInstanceSet(); - return (maxAllowed - vmsList.length); - } else { - return 0; - } - } + CloudStackAccount ourAccount = getCurrentAccount(); - /** - * Performs the cloud API listVirtualMachines one or more times. - * - * @param virtualMachineIds - an array of instances we are interested in getting information on - * @param ifs - filter out unwanted instances - */ - private EC2DescribeInstancesResponse listVirtualMachines( String[] virtualMachineIds, EC2InstanceFilterSet ifs, + if (ourAccount == null) { + // This should never happen, but + // we will return -99999 if this happens... + return -99999; + } + + // if accountType is Admin == 1, then let's return -1 + if (ourAccount.getAccountType() == 1) return -1; + + // -> get the user limits on instances + // "0" represents instances: + // http://download.cloud.com/releases/2.2.0/api_2.2.8/user/listResourceLimits.html + List limits = getApi().listResourceLimits(null, null, null, null, "0"); + if (limits != null && limits.size() > 0) { + maxAllowed = (int)limits.get(0).getMax().longValue(); + if (maxAllowed == -1) + return -1; // no limit + + EC2DescribeInstancesResponse existingVMS = listVirtualMachines( null, null, null ); + EC2Instance[] vmsList = existingVMS.getInstanceSet(); + return (maxAllowed - vmsList.length); + } else { + return 0; + } + } + + /** + * Performs the cloud API listVirtualMachines one or more times. + * + * @param virtualMachineIds - an array of instances we are interested in getting information on + * @param ifs - filter out unwanted instances + */ + private EC2DescribeInstancesResponse listVirtualMachines( String[] virtualMachineIds, EC2InstanceFilterSet ifs, List resourceTags ) throws Exception - { - EC2DescribeInstancesResponse instances = new EC2DescribeInstancesResponse(); + { + EC2DescribeInstancesResponse instances = new EC2DescribeInstancesResponse(); - if (null == virtualMachineIds || 0 == virtualMachineIds.length) { + if (null == virtualMachineIds || 0 == virtualMachineIds.length) { instances = lookupInstances( null, instances, resourceTags ); - } else { - for( int i=0; i < virtualMachineIds.length; i++ ) { + } else { + for( int i=0; i < virtualMachineIds.length; i++ ) { instances = lookupInstances( virtualMachineIds[i], instances, resourceTags ); - } - } + } + } - if ( null == ifs ) - return instances; - else return ifs.evaluate( instances ); - } + if ( null == ifs ) + return instances; + else return ifs.evaluate( instances ); + } - /** - * Get one or more templates depending on the volumeId parameter. - * - * @param volumeId - if interested in one specific volume, null if want to list all volumes - * @param instanceId - if interested in volumes for a specific instance, null if instance is not important - */ - private EC2DescribeVolumesResponse listVolumes(String volumeId, String instanceId, EC2DescribeVolumesResponse volumes, + /** + * Get one or more templates depending on the volumeId parameter. + * + * @param volumeId - if interested in one specific volume, null if want to list all volumes + * @param instanceId - if interested in volumes for a specific instance, null if instance is not important + */ + private EC2DescribeVolumesResponse listVolumes(String volumeId, String instanceId, EC2DescribeVolumesResponse volumes, List resourceTagSet)throws Exception { List vols = getApi().listVolumes(null, null, null, volumeId, null, null, null, null, null, instanceId, null, resourceTagSet); - if(vols != null && vols.size() > 0) { - for(CloudStackVolume vol : vols) { - EC2Volume ec2Vol = new EC2Volume(); - ec2Vol.setId(vol.getId()); - if(vol.getAttached() != null) - ec2Vol.setAttached(vol.getAttached()); - ec2Vol.setCreated(vol.getCreated()); + if(vols != null && vols.size() > 0) { + for(CloudStackVolume vol : vols) { + EC2Volume ec2Vol = new EC2Volume(); + ec2Vol.setId(vol.getId()); + if(vol.getAttached() != null) + ec2Vol.setAttached(vol.getAttached()); + ec2Vol.setCreated(vol.getCreated()); - if(vol.getDeviceId() != null) - ec2Vol.setDeviceId(vol.getDeviceId()); - ec2Vol.setHypervisor(vol.getHypervisor()); + if(vol.getDeviceId() != null) + ec2Vol.setDeviceId(vol.getDeviceId()); + ec2Vol.setHypervisor(vol.getHypervisor()); - if(vol.getSnapshotId() != null) - ec2Vol.setSnapshotId(vol.getSnapshotId()); - ec2Vol.setState(mapToAmazonVolState(vol.getState())); - ec2Vol.setSize(vol.getSize()); - ec2Vol.setType(vol.getVolumeType()); + if(vol.getSnapshotId() != null) + ec2Vol.setSnapshotId(vol.getSnapshotId()); + ec2Vol.setState(mapToAmazonVolState(vol.getState())); + ec2Vol.setSize(vol.getSize()); + ec2Vol.setType(vol.getVolumeType()); - if(vol.getVirtualMachineId() != null) - ec2Vol.setInstanceId(vol.getVirtualMachineId()); + if(vol.getVirtualMachineId() != null) + ec2Vol.setInstanceId(vol.getVirtualMachineId()); - if(vol.getVirtualMachineState() != null) - ec2Vol.setVMState(vol.getVirtualMachineState()); - ec2Vol.setZoneName(vol.getZoneName()); + if(vol.getVirtualMachineState() != null) + ec2Vol.setVMState(vol.getVirtualMachineState()); + ec2Vol.setZoneName(vol.getZoneName()); List resourceTags = vol.getTags(); for(CloudStackKeyValue resourceTag : resourceTags) { @@ -1746,72 +1737,72 @@ public class EC2Engine { ec2Vol.addResourceTag(param); } - volumes.addVolume(ec2Vol); - } - } + volumes.addVolume(ec2Vol); + } + } - return volumes; - } + return volumes; + } - /** - * Translate the given zone name into the required zoneId. Query for - * a list of all zones and match the zone name given. Amazon uses zone - * names while the Cloud API often requires the zoneId. - * - * @param zoneName - (e.g., 'AH'), if null return the first zone in the available list - * - * @return the zoneId that matches the given zone name - */ - private String toZoneId(String zoneName, String domainId) throws Exception { - EC2DescribeAvailabilityZonesResponse zones = null; - String[] interestedZones = null; - - if ( null != zoneName) { - interestedZones = new String[1]; - interestedZones[0] = zoneName; - }else { - CloudStackZone zone = findZone(); - if(zone != null){ - return zone.getId(); - } - } - - zones = listZones(interestedZones, domainId); - - if (zones == null || zones.getZoneIdAt( 0 ) == null) - throw new EC2ServiceException(ClientError.InvalidParameterValue, "Unknown zoneName value - " + zoneName); - return zones.getZoneIdAt(0); - } - - - /** - * Convert from the Amazon instanceType strings to Cloud serviceOfferingId + /** + * Translate the given zone name into the required zoneId. Query for + * a list of all zones and match the zone name given. Amazon uses zone + * names while the Cloud API often requires the zoneId. * - */ - - private CloudStackServiceOfferingVO getCSServiceOfferingId(String instanceType){ - try { - if (null == instanceType) instanceType = "m1.small"; - - return scvoDao.getSvcOfferingByName(instanceType); - + * @param zoneName - (e.g., 'AH'), if null return the first zone in the available list + * + * @return the zoneId that matches the given zone name + */ + private String toZoneId(String zoneName, String domainId) throws Exception { + EC2DescribeAvailabilityZonesResponse zones = null; + String[] interestedZones = null; + + if ( null != zoneName) { + interestedZones = new String[1]; + interestedZones[0] = zoneName; + }else { + CloudStackZone zone = findZone(); + if(zone != null){ + return zone.getId(); + } + } + + zones = listZones(interestedZones, domainId); + + if (zones == null || zones.getZoneIdAt( 0 ) == null) + throw new EC2ServiceException(ClientError.InvalidParameterValue, "Unknown zoneName value - " + zoneName); + return zones.getZoneIdAt(0); + } + + + /** + * Convert from the Amazon instanceType strings to Cloud serviceOfferingId + * + */ + + private CloudStackServiceOfferingVO getCSServiceOfferingId(String instanceType){ + try { + if (null == instanceType) instanceType = "m1.small"; + + return scvoDao.getSvcOfferingByName(instanceType); + } catch(Exception e) { logger.error( "Error while retrieving ServiceOffering information by name - ", e); throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); } - } - - /** - * Convert from the Cloud serviceOfferingId to the Amazon instanceType strings based - * on the loaded map. - * - * @param serviceOfferingId - * @return A valid value for the Amazon defined instanceType - * @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException - */ - private String serviceOfferingIdToInstanceType( String serviceOfferingId ){ + } + + /** + * Convert from the Cloud serviceOfferingId to the Amazon instanceType strings based + * on the loaded map. + * + * @param serviceOfferingId + * @return A valid value for the Amazon defined instanceType + * @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException + */ + private String serviceOfferingIdToInstanceType( String serviceOfferingId ){ try{ - + CloudStackServiceOfferingVO offering = scvoDao.getSvcOfferingById(serviceOfferingId); //dao.getSvcOfferingById(serviceOfferingId); if(offering == null){ logger.warn( "No instanceType match for serviceOfferingId: [" + serviceOfferingId + "]" ); @@ -1823,107 +1814,107 @@ public class EC2Engine { logger.error( "sError while retrieving ServiceOffering information by id - ", e); throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); } - } + } - /** - * Match the value in the 'description' field of the listOsTypes response to get - * the osTypeId. - * - * @param osTypeName - * @return the Cloud.com API osTypeId - */ - private String toOSTypeId( String osTypeName ) throws Exception { - try { - List osTypes = getApi().listOsTypes(null, null, null); - for (CloudStackOsType osType : osTypes) { - if (osType.getDescription().toLowerCase().indexOf(osTypeName.toLowerCase()) != -1) - return osType.getId(); - } - return null; - } catch(Exception e) { - logger.error( "List OS Types - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } + /** + * Match the value in the 'description' field of the listOsTypes response to get + * the osTypeId. + * + * @param osTypeName + * @return the Cloud.com API osTypeId + */ + private String toOSTypeId( String osTypeName ) throws Exception { + try { + List osTypes = getApi().listOsTypes(null, null, null); + for (CloudStackOsType osType : osTypes) { + if (osType.getDescription().toLowerCase().indexOf(osTypeName.toLowerCase()) != -1) + return osType.getId(); + } + return null; + } catch(Exception e) { + logger.error( "List OS Types - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } - } + } - /** - * More than one place we need to access the defined list of zones. If given a specific - * list of zones of interest, then only values from those zones are returned. - * - * @param interestedZones - can be null, should be a subset of all zones - * - * @return EC2DescribeAvailabilityZonesResponse - */ - private EC2DescribeAvailabilityZonesResponse listZones(String[] interestedZones, String domainId) throws Exception - { - EC2DescribeAvailabilityZonesResponse zones = new EC2DescribeAvailabilityZonesResponse(); + /** + * More than one place we need to access the defined list of zones. If given a specific + * list of zones of interest, then only values from those zones are returned. + * + * @param interestedZones - can be null, should be a subset of all zones + * + * @return EC2DescribeAvailabilityZonesResponse + */ + private EC2DescribeAvailabilityZonesResponse listZones(String[] interestedZones, String domainId) throws Exception + { + EC2DescribeAvailabilityZonesResponse zones = new EC2DescribeAvailabilityZonesResponse(); - List cloudZones = getApi().listZones(true, domainId, null, null); + List cloudZones = getApi().listZones(true, domainId, null, null); - if(cloudZones != null) { - for(CloudStackZone cloudZone : cloudZones) { - if ( null != interestedZones && 0 < interestedZones.length ) { - for( int j=0; j < interestedZones.length; j++ ) { - if (interestedZones[j].equalsIgnoreCase( cloudZone.getName())) { - zones.addZone(cloudZone.getId().toString(), cloudZone.getName()); - break; - } - } - } else { - zones.addZone(cloudZone.getId().toString(), cloudZone.getName()); - } - } - } - return zones; - } + if(cloudZones != null) { + for(CloudStackZone cloudZone : cloudZones) { + if ( null != interestedZones && 0 < interestedZones.length ) { + for( int j=0; j < interestedZones.length; j++ ) { + if (interestedZones[j].equalsIgnoreCase( cloudZone.getName())) { + zones.addZone(cloudZone.getId().toString(), cloudZone.getName()); + break; + } + } + } else { + zones.addZone(cloudZone.getId().toString(), cloudZone.getName()); + } + } + } + return zones; + } - /** - * Get information on one or more virtual machines depending on the instanceId parameter. - * - * @param instanceId - if null then return information on all existing instances, otherwise - * just return information on the matching instance. - * @param instances - a container object to fill with one or more EC2Instance objects - * - * @return the same object passed in as the "instances" parameter modified with one or more - * EC2Instance objects loaded. - */ - private EC2DescribeInstancesResponse lookupInstances( String instanceId, EC2DescribeInstancesResponse instances, + /** + * Get information on one or more virtual machines depending on the instanceId parameter. + * + * @param instanceId - if null then return information on all existing instances, otherwise + * just return information on the matching instance. + * @param instances - a container object to fill with one or more EC2Instance objects + * + * @return the same object passed in as the "instances" parameter modified with one or more + * EC2Instance objects loaded. + */ + private EC2DescribeInstancesResponse lookupInstances( String instanceId, EC2DescribeInstancesResponse instances, List resourceTagSet ) - throws Exception { + throws Exception { - String instId = instanceId != null ? instanceId : null; + String instId = instanceId != null ? instanceId : null; List vms = getApi().listVirtualMachines(null, null, true, null, null, null, null, - instId, null, null, null, null, null, null, null, null, resourceTagSet); - - if(vms != null && vms.size() > 0) { - for(CloudStackUserVm cloudVm : vms) { - EC2Instance ec2Vm = new EC2Instance(); - - ec2Vm.setId(cloudVm.getId().toString()); - ec2Vm.setName(cloudVm.getName()); - ec2Vm.setZoneName(cloudVm.getZoneName()); - ec2Vm.setTemplateId(cloudVm.getTemplateId().toString()); - ec2Vm.setGroup(cloudVm.getGroup()); - ec2Vm.setState(cloudVm.getState()); - ec2Vm.setCreated(cloudVm.getCreated()); - ec2Vm.setIpAddress(cloudVm.getIpAddress()); - ec2Vm.setAccountName(cloudVm.getAccountName()); - ec2Vm.setDomainId(cloudVm.getDomainId()); - ec2Vm.setHypervisor(cloudVm.getHypervisor()); - ec2Vm.setRootDeviceType(cloudVm.getRootDeviceType()); - ec2Vm.setRootDeviceId(cloudVm.getRootDeviceId()); - ec2Vm.setServiceOffering(serviceOfferingIdToInstanceType(cloudVm.getServiceOfferingId().toString())); + instId, null, null, null, null, null, null, null, null, resourceTagSet); + + if(vms != null && vms.size() > 0) { + for(CloudStackUserVm cloudVm : vms) { + EC2Instance ec2Vm = new EC2Instance(); + + ec2Vm.setId(cloudVm.getId().toString()); + ec2Vm.setName(cloudVm.getName()); + ec2Vm.setZoneName(cloudVm.getZoneName()); + ec2Vm.setTemplateId(cloudVm.getTemplateId().toString()); + ec2Vm.setGroup(cloudVm.getGroup()); + ec2Vm.setState(cloudVm.getState()); + ec2Vm.setCreated(cloudVm.getCreated()); + ec2Vm.setIpAddress(cloudVm.getIpAddress()); + ec2Vm.setAccountName(cloudVm.getAccountName()); + ec2Vm.setDomainId(cloudVm.getDomainId()); + ec2Vm.setHypervisor(cloudVm.getHypervisor()); + ec2Vm.setRootDeviceType(cloudVm.getRootDeviceType()); + ec2Vm.setRootDeviceId(cloudVm.getRootDeviceId()); + ec2Vm.setServiceOffering(serviceOfferingIdToInstanceType(cloudVm.getServiceOfferingId().toString())); ec2Vm.setKeyPairName(cloudVm.getKeyPairName()); - List nics = cloudVm.getNics(); - for(CloudStackNic nic : nics) { - if(nic.getIsDefault()) { - ec2Vm.setPrivateIpAddress(nic.getIpaddress()); - break; - } - } + List nics = cloudVm.getNics(); + for(CloudStackNic nic : nics) { + if(nic.getIsDefault()) { + ec2Vm.setPrivateIpAddress(nic.getIpaddress()); + break; + } + } List resourceTags = cloudVm.getTags(); for(CloudStackKeyValue resourceTag : resourceTags) { @@ -1941,71 +1932,71 @@ public class EC2Engine { ec2Vm.addGroupName(securityGroup.getName()); } } - - instances.addInstance(ec2Vm); - } - }else{ - if(instanceId != null){ - //no such instance found - throw new EC2ServiceException(ServerError.InternalError, "Instance:" + instanceId + " not found"); - } - } - return instances; - } + + instances.addInstance(ec2Vm); + } + }else{ + if(instanceId != null){ + //no such instance found + throw new EC2ServiceException(ServerError.InternalError, "Instance:" + instanceId + " not found"); + } + } + return instances; + } - /** - * Get one or more templates depending on the templateId parameter. - * - * @param templateId - if null then return information on all existing templates, otherwise - * just return information on the matching template. - * @param images - a container object to fill with one or more EC2Image objects - * - * @return the same object passed in as the "images" parameter modified with one or more - * EC2Image objects loaded. - */ - private EC2DescribeImagesResponse listTemplates( String templateId, EC2DescribeImagesResponse images ) throws EC2ServiceException { - try { - List result = new ArrayList(); - - if(templateId != null){ + /** + * Get one or more templates depending on the templateId parameter. + * + * @param templateId - if null then return information on all existing templates, otherwise + * just return information on the matching template. + * @param images - a container object to fill with one or more EC2Image objects + * + * @return the same object passed in as the "images" parameter modified with one or more + * EC2Image objects loaded. + */ + private EC2DescribeImagesResponse listTemplates( String templateId, EC2DescribeImagesResponse images ) throws EC2ServiceException { + try { + List result = new ArrayList(); + + if(templateId != null){ List template = getApi().listTemplates("executable", null, null, null, templateId , null, null, null); if(template != null){ result.addAll(template); } - }else{ - List selfExecutable = getApi().listTemplates("selfexecutable", null, null, null, null, null, null, null); + }else{ + List selfExecutable = getApi().listTemplates("selfexecutable", null, null, null, null, null, null, null); if(selfExecutable != null){ result.addAll(selfExecutable); } - + List featured = getApi().listTemplates("featured", null, null, null, null, null, null, null); - if(featured != null){ - result.addAll(featured); - } - - List sharedExecutable = getApi().listTemplates("sharedexecutable", null, null, null, null, null, null, null); + if(featured != null){ + result.addAll(featured); + } + + List sharedExecutable = getApi().listTemplates("sharedexecutable", null, null, null, null, null, null, null); if(sharedExecutable != null){ result.addAll(sharedExecutable); } - + List community = getApi().listTemplates("community", null, null, null, null, null, null, null); if(community != null){ result.addAll(community); } - } - - if (result != null && result.size() > 0) { - for (CloudStackTemplate temp : result) { - EC2Image ec2Image = new EC2Image(); - ec2Image.setId(temp.getId().toString()); - ec2Image.setAccountName(temp.getAccount()); - ec2Image.setName(temp.getName()); - ec2Image.setDescription(temp.getDisplayText()); - ec2Image.setOsTypeId(temp.getOsTypeId().toString()); - ec2Image.setIsPublic(temp.getIsPublic()); - ec2Image.setIsReady(temp.getIsReady()); - ec2Image.setDomainId(temp.getDomainId()); + } + + if (result != null && result.size() > 0) { + for (CloudStackTemplate temp : result) { + EC2Image ec2Image = new EC2Image(); + ec2Image.setId(temp.getId().toString()); + ec2Image.setAccountName(temp.getAccount()); + ec2Image.setName(temp.getName()); + ec2Image.setDescription(temp.getDisplayText()); + ec2Image.setOsTypeId(temp.getOsTypeId().toString()); + ec2Image.setIsPublic(temp.getIsPublic()); + ec2Image.setIsReady(temp.getIsReady()); + ec2Image.setDomainId(temp.getDomainId()); List resourceTags = temp.getTags(); for(CloudStackKeyValue resourceTag : resourceTags) { EC2TagKeyValue param = new EC2TagKeyValue(); @@ -2014,184 +2005,184 @@ public class EC2Engine { param.setValue(resourceTag.getValue()); ec2Image.addResourceTag(param); } - images.addImage(ec2Image); - } + images.addImage(ec2Image); + } } - return images; - } catch(Exception e) { - logger.error( "List Templates - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } + return images; + } catch(Exception e) { + logger.error( "List Templates - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } - /** - * List security groups - * - * @param interestedGroups - * @return - * @throws EC2ServiceException - * @throws UnsupportedEncodingException - * @throws SignatureException - * @throws IOException - * @throws SAXException - * @throws ParserConfigurationException - * @throws ParseException - */ - public EC2DescribeSecurityGroupsResponse listSecurityGroups( String[] interestedGroups ) throws Exception { - try { - EC2DescribeSecurityGroupsResponse groupSet = new EC2DescribeSecurityGroupsResponse(); + /** + * List security groups + * + * @param interestedGroups + * @return + * @throws EC2ServiceException + * @throws UnsupportedEncodingException + * @throws SignatureException + * @throws IOException + * @throws SAXException + * @throws ParserConfigurationException + * @throws ParseException + */ + public EC2DescribeSecurityGroupsResponse listSecurityGroups( String[] interestedGroups ) throws Exception { + try { + EC2DescribeSecurityGroupsResponse groupSet = new EC2DescribeSecurityGroupsResponse(); List groups = getApi().listSecurityGroups(null, null, null, true, null, null, null); - if (groups != null && groups.size() > 0) - for (CloudStackSecurityGroup group : groups) { - boolean matched = false; - if (interestedGroups.length > 0) { - for (String groupName :interestedGroups) { - if (groupName.equalsIgnoreCase(group.getName())) { - matched = true; - break; - } - } - } else { - matched = true; - } - if (!matched) continue; - EC2SecurityGroup ec2Group = new EC2SecurityGroup(); - // not sure if we should set both account and account name to accountname - ec2Group.setAccount(group.getAccountName()); - ec2Group.setAccountName(group.getAccountName()); - ec2Group.setName(group.getName()); - ec2Group.setDescription(group.getDescription()); - ec2Group.setDomainId(group.getDomainId()); - ec2Group.setId(group.getId().toString()); - toPermission(ec2Group, group); - - groupSet.addGroup(ec2Group); - } - return groupSet; - } catch(Exception e) { - logger.error( "List Security Groups - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); - } - } + if (groups != null && groups.size() > 0) + for (CloudStackSecurityGroup group : groups) { + boolean matched = false; + if (interestedGroups.length > 0) { + for (String groupName :interestedGroups) { + if (groupName.equalsIgnoreCase(group.getName())) { + matched = true; + break; + } + } + } else { + matched = true; + } + if (!matched) continue; + EC2SecurityGroup ec2Group = new EC2SecurityGroup(); + // not sure if we should set both account and account name to accountname + ec2Group.setAccount(group.getAccountName()); + ec2Group.setAccountName(group.getAccountName()); + ec2Group.setName(group.getName()); + ec2Group.setDescription(group.getDescription()); + ec2Group.setDomainId(group.getDomainId()); + ec2Group.setId(group.getId().toString()); + toPermission(ec2Group, group); - /** - * Convert ingress rule to EC2IpPermission records - * - * @param response - * @param group - * @return - */ - private boolean toPermission(EC2SecurityGroup response, CloudStackSecurityGroup group ) { - List rules = group.getIngressRules(); + groupSet.addGroup(ec2Group); + } + return groupSet; + } catch(Exception e) { + logger.error( "List Security Groups - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } - if (rules == null || rules.isEmpty()) return false; + /** + * Convert ingress rule to EC2IpPermission records + * + * @param response + * @param group + * @return + */ + private boolean toPermission(EC2SecurityGroup response, CloudStackSecurityGroup group ) { + List rules = group.getIngressRules(); - for (CloudStackIngressRule rule : rules) { - EC2IpPermission perm = new EC2IpPermission(); - perm.setProtocol(rule.getProtocol()); - perm.setFromPort(rule.getStartPort()); - perm.setToPort(rule.getEndPort()); - perm.setRuleId(rule.getRuleId() != null ? rule.getRuleId().toString() : new String()); - perm.setIcmpCode(rule.getIcmpCode() != null ? rule.getIcmpCode().toString() : new String()); - perm.setIcmpType(rule.getIcmpType() != null ? rule.getIcmpType().toString() : new String()); - perm.setCIDR(rule.getCidr()); - perm.addIpRange(rule.getCidr()); + if (rules == null || rules.isEmpty()) return false; - if (rule.getAccountName() != null && rule.getSecurityGroupName() != null) { - EC2SecurityGroup newGroup = new EC2SecurityGroup(); - newGroup.setAccount(rule.getAccountName()); - newGroup.setName(rule.getSecurityGroupName()); - perm.addUser(newGroup); - } - response.addIpPermission(perm); - } - return true; - } + for (CloudStackIngressRule rule : rules) { + EC2IpPermission perm = new EC2IpPermission(); + perm.setProtocol(rule.getProtocol()); + perm.setFromPort(rule.getStartPort()); + perm.setToPort(rule.getEndPort()); + perm.setRuleId(rule.getRuleId() != null ? rule.getRuleId().toString() : new String()); + perm.setIcmpCode(rule.getIcmpCode() != null ? rule.getIcmpCode().toString() : new String()); + perm.setIcmpType(rule.getIcmpType() != null ? rule.getIcmpType().toString() : new String()); + perm.setCIDR(rule.getCidr()); + perm.addIpRange(rule.getCidr()); - /** - * Find the current account based on the SecretKey - * - * @return - * @throws Exception - */ - public CloudStackAccount getCurrentAccount() throws Exception { - if (currentAccount != null) { - // verify this is the same account!!! - for (CloudStackUser user : currentAccount.getUser()) { - if (user.getSecretkey() != null && user.getSecretkey().equalsIgnoreCase(UserContext.current().getSecretKey())) { - return currentAccount; - } - } - } - // otherwise let's find this user/account - List accounts = getApi().listAccounts(null, null, null, null, null, null, null, null); - for (CloudStackAccount account : accounts) { - CloudStackUser[] users = account.getUser(); - for (CloudStackUser user : users) { - String userSecretKey = user.getSecretkey(); - if (userSecretKey != null && userSecretKey.equalsIgnoreCase(UserContext.current().getSecretKey())) { - currentAccount = account; - return account; - } - } - } - // if we get here, there is something wrong... - return null; - } + if (rule.getAccountName() != null && rule.getSecurityGroupName() != null) { + EC2SecurityGroup newGroup = new EC2SecurityGroup(); + newGroup.setAccount(rule.getAccountName()); + newGroup.setName(rule.getSecurityGroupName()); + perm.addUser(newGroup); + } + response.addIpPermission(perm); + } + return true; + } - /** - * List networkOfferings by zone with securityGroup enabled - * - * @param zoneId - * @return - * @throws Exception - */ - private CloudStackNetwork getNetworksWithSecurityGroupEnabled(String zoneId) throws Exception { - List networks = getApi().listNetworks(null, null, null, null, null, null, null, null, null, zoneId); - List netWithSecGroup = new ArrayList(); - for (CloudStackNetwork network : networks ) { - if (!network.getNetworkOfferingAvailability().equalsIgnoreCase("unavailable") && network.getSecurityGroupEnabled()) - netWithSecGroup.add(network); - } - // we'll take the first one - return netWithSecGroup.get(0); - } + /** + * Find the current account based on the SecretKey + * + * @return + * @throws Exception + */ + public CloudStackAccount getCurrentAccount() throws Exception { + if (currentAccount != null) { + // verify this is the same account!!! + for (CloudStackUser user : currentAccount.getUser()) { + if (user.getSecretkey() != null && user.getSecretkey().equalsIgnoreCase(UserContext.current().getSecretKey())) { + return currentAccount; + } + } + } + // otherwise let's find this user/account + List accounts = getApi().listAccounts(null, null, null, null, null, null, null, null); + for (CloudStackAccount account : accounts) { + CloudStackUser[] users = account.getUser(); + for (CloudStackUser user : users) { + String userSecretKey = user.getSecretkey(); + if (userSecretKey != null && userSecretKey.equalsIgnoreCase(UserContext.current().getSecretKey())) { + currentAccount = account; + return account; + } + } + } + // if we get here, there is something wrong... + return null; + } - /** - * Create a network - * - * @param zoneId - * @param offering - * @param owner - * @return - * @throws Exception - */ - private CloudStackNetwork createDefaultGuestNetwork(String zoneId, CloudStackNetworkOffering offering, CloudStackAccount owner) throws Exception { - return getApi().createNetwork(owner.getName() + "-network", owner.getName() + "-network", offering.getId(), zoneId, owner.getName(), - owner.getDomainId(), true, null, null, null, null, null, null, null, null); - } + /** + * List networkOfferings by zone with securityGroup enabled + * + * @param zoneId + * @return + * @throws Exception + */ + private CloudStackNetwork getNetworksWithSecurityGroupEnabled(String zoneId) throws Exception { + List networks = getApi().listNetworks(null, null, null, null, null, null, null, null, null, zoneId); + List netWithSecGroup = new ArrayList(); + for (CloudStackNetwork network : networks ) { + if (!network.getNetworkOfferingAvailability().equalsIgnoreCase("unavailable") && network.getSecurityGroupEnabled()) + netWithSecGroup.add(network); + } + // we'll take the first one + return netWithSecGroup.get(0); + } - /** - * List of networks without securityGroup enabled by zone - * - * @param zoneId - * @return - * @throws Exception - */ - private CloudStackNetwork getNetworksWithoutSecurityGroupEnabled(String zoneId) throws Exception { - // grab current account - CloudStackAccount caller = getCurrentAccount(); - - //check if account has any networks in the system - List networks = getApi().listNetworks(caller.getName(), caller.getDomainId(), null, true, null, null, null, null, null, zoneId); - - //listRequired offerings in the system - the network created from this offering has to be specified in deployVm command - List reuquiredOfferings = getApi().listNetworkOfferings("Required", null, null, null, true, null, null, null, null, null, zoneId); - if (reuquiredOfferings != null && !reuquiredOfferings.isEmpty()) { - if (networks != null && !networks.isEmpty()) { - //pick up the first required network from the network list - for (CloudStackNetwork network : networks) { + /** + * Create a network + * + * @param zoneId + * @param offering + * @param owner + * @return + * @throws Exception + */ + private CloudStackNetwork createDefaultGuestNetwork(String zoneId, CloudStackNetworkOffering offering, CloudStackAccount owner) throws Exception { + return getApi().createNetwork(owner.getName() + "-network", owner.getName() + "-network", offering.getId(), zoneId, owner.getName(), + owner.getDomainId(), true, null, null, null, null, null, null, null, null); + } + + /** + * List of networks without securityGroup enabled by zone + * + * @param zoneId + * @return + * @throws Exception + */ + private CloudStackNetwork getNetworksWithoutSecurityGroupEnabled(String zoneId) throws Exception { + // grab current account + CloudStackAccount caller = getCurrentAccount(); + + //check if account has any networks in the system + List networks = getApi().listNetworks(caller.getName(), caller.getDomainId(), null, true, null, null, null, null, null, zoneId); + + //listRequired offerings in the system - the network created from this offering has to be specified in deployVm command + List reuquiredOfferings = getApi().listNetworkOfferings("Required", null, null, null, true, null, null, null, null, null, zoneId); + if (reuquiredOfferings != null && !reuquiredOfferings.isEmpty()) { + if (networks != null && !networks.isEmpty()) { + //pick up the first required network from the network list + for (CloudStackNetwork network : networks) { for (CloudStackNetworkOffering requiredOffering : reuquiredOfferings) { logger.debug("[reqd/virtual} offering: " + requiredOffering.getId() + " network " + network.getNetworkOfferingId()); if (network.getNetworkOfferingId().equals(requiredOffering.getId())) { @@ -2199,178 +2190,178 @@ public class EC2Engine { } } } - } else { - //create new network and return it - return createDefaultGuestNetwork(zoneId, reuquiredOfferings.get(0), caller); - } - } else { - //find all optional network offerings in the system - List optionalOfferings = getApi().listNetworkOfferings("Optional", null, null, null, true, null, null, null, null, null, zoneId); - if (optionalOfferings != null && !optionalOfferings.isEmpty()) { - if (networks != null && !networks.isEmpty()) { - for (CloudStackNetwork network : networks) { - for (CloudStackNetworkOffering optionalOffering : optionalOfferings) { - logger.debug("[optional] offering: " + optionalOffering.getId() + " network " + network.getNetworkOfferingId()); - if (network.getNetworkOfferingId().equals(optionalOffering.getId())) { - return network; - } - } - } - } - } - } - - // if we get this far and haven't returned already return an error - throw new EC2ServiceException(ServerError.InternalError, "Unable to find an appropriate network for account " + caller.getName()); - } - - /** - * Find a suitable network to use for deployVM - * - * @param zone - * @return - * @throws Exception - */ - private CloudStackNetwork findNetwork(CloudStackZone zone) throws Exception { - if (zone == null) return null; - - // for basic networking, we don't specify a networkid for deployvm - if (zone.getNetworkType().equalsIgnoreCase("basic")) return null; - - if (zone.getSecurityGroupsEnabled()) { - // find system security group enabled network - return getNetworksWithSecurityGroupEnabled(zone.getId()); - - } else { - return getNetworksWithoutSecurityGroupEnabled(zone.getId()); - } - } - - private CloudStackZone findZone() throws Exception { - CloudStackAccount caller = getCurrentAccount(); - List cloudZones; - - String defaultZoneId = getDefaultZoneId(caller.getId()); - if (defaultZoneId != null) { - cloudZones = getApi().listZones(true, null, defaultZoneId, null); } else { + //create new network and return it + return createDefaultGuestNetwork(zoneId, reuquiredOfferings.get(0), caller); + } + } else { + //find all optional network offerings in the system + List optionalOfferings = getApi().listNetworkOfferings("Optional", null, null, null, true, null, null, null, null, null, zoneId); + if (optionalOfferings != null && !optionalOfferings.isEmpty()) { + if (networks != null && !networks.isEmpty()) { + for (CloudStackNetwork network : networks) { + for (CloudStackNetworkOffering optionalOffering : optionalOfferings) { + logger.debug("[optional] offering: " + optionalOffering.getId() + " network " + network.getNetworkOfferingId()); + if (network.getNetworkOfferingId().equals(optionalOffering.getId())) { + return network; + } + } + } + } + } + } + + // if we get this far and haven't returned already return an error + throw new EC2ServiceException(ServerError.InternalError, "Unable to find an appropriate network for account " + caller.getName()); + } + + /** + * Find a suitable network to use for deployVM + * + * @param zone + * @return + * @throws Exception + */ + private CloudStackNetwork findNetwork(CloudStackZone zone) throws Exception { + if (zone == null) return null; + + // for basic networking, we don't specify a networkid for deployvm + if (zone.getNetworkType().equalsIgnoreCase("basic")) return null; + + if (zone.getSecurityGroupsEnabled()) { + // find system security group enabled network + return getNetworksWithSecurityGroupEnabled(zone.getId()); + + } else { + return getNetworksWithoutSecurityGroupEnabled(zone.getId()); + } + } + + private CloudStackZone findZone() throws Exception { + CloudStackAccount caller = getCurrentAccount(); + List cloudZones; + + String defaultZoneId = getDefaultZoneId(caller.getId()); + if (defaultZoneId != null) { + cloudZones = getApi().listZones(true, null, defaultZoneId, null); + } else { // caller.getDomainId doesn't work in user mode // List cloudZones = getApi().listZones(true, caller.getDomainId(), null, null); - cloudZones = getApi().listZones(true, null, null, null); - } - if (cloudZones != null && cloudZones.size() > 0) { - return cloudZones.get(0); - } - return null; + cloudZones = getApi().listZones(true, null, null, null); } + if (cloudZones != null && cloudZones.size() > 0) { + return cloudZones.get(0); + } + return null; + } - /** - * Finds the defaultZone marked for the account - */ - private String getDefaultZoneId(String accountId) { - try { - return accDao.getDefaultZoneId(accountId); - } catch(Exception e) { - logger.error( "Error while retrieving Account information by id - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + /** + * Finds the defaultZone marked for the account + */ + private String getDefaultZoneId(String accountId) { + try { + return accDao.getDefaultZoneId(accountId); + } catch(Exception e) { + logger.error( "Error while retrieving Account information by id - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage()); + } + } + + /** + * Windows has its own device strings. + * + * @param hypervisor + * @param deviceId + * @return + */ + public String cloudDeviceIdToDevicePath( String hypervisor, String deviceId ) + { + Integer devId = new Integer(deviceId); + if (null != hypervisor && hypervisor.toLowerCase().contains( "windows" )) { + switch( devId ) { + case 1: return "xvdb"; + case 2: return "xvdc"; + case 3: return "xvdd"; + case 4: return "xvde"; + case 5: return "xvdf"; + case 6: return "xvdg"; + case 7: return "xvdh"; + case 8: return "xvdi"; + case 9: return "xvdj"; + default: return new String( "" + deviceId ); + } + } else { // -> assume its unix + switch( devId ) { + case 1: return "/dev/sdb"; + case 2: return "/dev/sdc"; + case 3: return "/dev/sdd"; + case 4: return "/dev/sde"; + case 5: return "/dev/sdf"; + case 6: return "/dev/sdg"; + case 7: return "/dev/sdh"; + case 8: return "/dev/sdi"; + case 9: return "/dev/sdj"; + default: return new String( "" + deviceId ); } } - - /** - * Windows has its own device strings. - * - * @param hypervisor - * @param deviceId - * @return - */ - public String cloudDeviceIdToDevicePath( String hypervisor, String deviceId ) - { - Integer devId = new Integer(deviceId); - if (null != hypervisor && hypervisor.toLowerCase().contains( "windows" )) { - switch( devId ) { - case 1: return "xvdb"; - case 2: return "xvdc"; - case 3: return "xvdd"; - case 4: return "xvde"; - case 5: return "xvdf"; - case 6: return "xvdg"; - case 7: return "xvdh"; - case 8: return "xvdi"; - case 9: return "xvdj"; - default: return new String( "" + deviceId ); - } - } else { // -> assume its unix - switch( devId ) { - case 1: return "/dev/sdb"; - case 2: return "/dev/sdc"; - case 3: return "/dev/sdd"; - case 4: return "/dev/sde"; - case 5: return "/dev/sdf"; - case 6: return "/dev/sdg"; - case 7: return "/dev/sdh"; - case 8: return "/dev/sdi"; - case 9: return "/dev/sdj"; - default: return new String( "" + deviceId ); - } - } - } + } - /** - * Translate the device name string into a Cloud Stack deviceId. - * deviceId 3 is reserved for CDROM and 0 for the ROOT disk - * - * @param device string - * @return deviceId value - */ - private String mapDeviceToCloudDeviceId( String device ) - { - if (device.equalsIgnoreCase( "/dev/sdb" )) return "1"; - else if (device.equalsIgnoreCase( "/dev/sdc" )) return "2"; - else if (device.equalsIgnoreCase( "/dev/sde" )) return "4"; - else if (device.equalsIgnoreCase( "/dev/sdf" )) return "5"; - else if (device.equalsIgnoreCase( "/dev/sdg" )) return "6"; - else if (device.equalsIgnoreCase( "/dev/sdh" )) return "7"; - else if (device.equalsIgnoreCase( "/dev/sdi" )) return "8"; - else if (device.equalsIgnoreCase( "/dev/sdj" )) return "9"; + /** + * Translate the device name string into a Cloud Stack deviceId. + * deviceId 3 is reserved for CDROM and 0 for the ROOT disk + * + * @param device string + * @return deviceId value + */ + private String mapDeviceToCloudDeviceId( String device ) + { + if (device.equalsIgnoreCase( "/dev/sdb" )) return "1"; + else if (device.equalsIgnoreCase( "/dev/sdc" )) return "2"; + else if (device.equalsIgnoreCase( "/dev/sde" )) return "4"; + else if (device.equalsIgnoreCase( "/dev/sdf" )) return "5"; + else if (device.equalsIgnoreCase( "/dev/sdg" )) return "6"; + else if (device.equalsIgnoreCase( "/dev/sdh" )) return "7"; + else if (device.equalsIgnoreCase( "/dev/sdi" )) return "8"; + else if (device.equalsIgnoreCase( "/dev/sdj" )) return "9"; - else if (device.equalsIgnoreCase( "/dev/xvdb" )) return "1"; - else if (device.equalsIgnoreCase( "/dev/xvdc" )) return "2"; - else if (device.equalsIgnoreCase( "/dev/xvde" )) return "4"; - else if (device.equalsIgnoreCase( "/dev/xvdf" )) return "5"; - else if (device.equalsIgnoreCase( "/dev/xvdg" )) return "6"; - else if (device.equalsIgnoreCase( "/dev/xvdh" )) return "7"; - else if (device.equalsIgnoreCase( "/dev/xvdi" )) return "8"; - else if (device.equalsIgnoreCase( "/dev/xvdj" )) return "9"; + else if (device.equalsIgnoreCase( "/dev/xvdb" )) return "1"; + else if (device.equalsIgnoreCase( "/dev/xvdc" )) return "2"; + else if (device.equalsIgnoreCase( "/dev/xvde" )) return "4"; + else if (device.equalsIgnoreCase( "/dev/xvdf" )) return "5"; + else if (device.equalsIgnoreCase( "/dev/xvdg" )) return "6"; + else if (device.equalsIgnoreCase( "/dev/xvdh" )) return "7"; + else if (device.equalsIgnoreCase( "/dev/xvdi" )) return "8"; + else if (device.equalsIgnoreCase( "/dev/xvdj" )) return "9"; - else if (device.equalsIgnoreCase( "xvdb" )) return "1"; - else if (device.equalsIgnoreCase( "xvdc" )) return "2"; - else if (device.equalsIgnoreCase( "xvde" )) return "4"; - else if (device.equalsIgnoreCase( "xvdf" )) return "5"; - else if (device.equalsIgnoreCase( "xvdg" )) return "6"; - else if (device.equalsIgnoreCase( "xvdh" )) return "7"; - else if (device.equalsIgnoreCase( "xvdi" )) return "8"; - else if (device.equalsIgnoreCase( "xvdj" )) return "9"; + else if (device.equalsIgnoreCase( "xvdb" )) return "1"; + else if (device.equalsIgnoreCase( "xvdc" )) return "2"; + else if (device.equalsIgnoreCase( "xvde" )) return "4"; + else if (device.equalsIgnoreCase( "xvdf" )) return "5"; + else if (device.equalsIgnoreCase( "xvdg" )) return "6"; + else if (device.equalsIgnoreCase( "xvdh" )) return "7"; + else if (device.equalsIgnoreCase( "xvdi" )) return "8"; + else if (device.equalsIgnoreCase( "xvdj" )) return "9"; - else throw new EC2ServiceException( ClientError.Unsupported, device + " is not supported" ); - } + else throw new EC2ServiceException( ClientError.Unsupported, device + " is not supported" ); + } - /** - * Map CloudStack instance state to Amazon state strings - * - * @param state - * @return - */ - private String mapToAmazonVolState( String state ) - { - if (state.equalsIgnoreCase( "Allocated" ) || - state.equalsIgnoreCase( "Creating" ) || - state.equalsIgnoreCase( "Ready" )) return "available"; + /** + * Map CloudStack instance state to Amazon state strings + * + * @param state + * @return + */ + private String mapToAmazonVolState( String state ) + { + if (state.equalsIgnoreCase( "Allocated" ) || + state.equalsIgnoreCase( "Creating" ) || + state.equalsIgnoreCase( "Ready" )) return "available"; - if (state.equalsIgnoreCase( "Destroy" )) return "deleting"; + if (state.equalsIgnoreCase( "Destroy" )) return "deleting"; - return "error"; - } + return "error"; + } /** * Map Amazon resourceType to CloudStack resourceType @@ -2402,45 +2393,45 @@ public class EC2Engine { return (resourceType.toLowerCase()); } - /** - * Stop an instance - * Wait until one specific VM has stopped - * - * @param instanceId - * @return - * @throws Exception - */ - private boolean stopVirtualMachine( String instanceId) throws Exception { - try { - CloudStackUserVm resp = getApi().stopVirtualMachine(instanceId, false); - if (logger.isDebugEnabled()) - logger.debug("Stopping VM " + instanceId ); - return resp != null; - } catch(Exception e) { - logger.error( "StopVirtualMachine - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } + /** + * Stop an instance + * Wait until one specific VM has stopped + * + * @param instanceId + * @return + * @throws Exception + */ + private boolean stopVirtualMachine( String instanceId) throws Exception { + try { + CloudStackUserVm resp = getApi().stopVirtualMachine(instanceId, false); + if (logger.isDebugEnabled()) + logger.debug("Stopping VM " + instanceId ); + return resp != null; + } catch(Exception e) { + logger.error( "StopVirtualMachine - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } + + /** + * Start an existing stopped instance(VM) + * + * @param instanceId + * @return + * @throws Exception + */ + private boolean startVirtualMachine( String instanceId ) throws Exception { + try { + CloudStackUserVm resp = getApi().startVirtualMachine(instanceId); + if (logger.isDebugEnabled()) + logger.debug("Starting VM " + instanceId ); + return resp != null; + } catch(Exception e) { + logger.error("StartVirtualMachine - ", e); + throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); + } + } - /** - * Start an existing stopped instance(VM) - * - * @param instanceId - * @return - * @throws Exception - */ - private boolean startVirtualMachine( String instanceId ) throws Exception { - try { - CloudStackUserVm resp = getApi().startVirtualMachine(instanceId); - if (logger.isDebugEnabled()) - logger.debug("Starting VM " + instanceId ); - return resp != null; - } catch(Exception e) { - logger.error("StartVirtualMachine - ", e); - throw new EC2ServiceException(ServerError.InternalError, e.getMessage() != null ? e.getMessage() : "An unexpected error occurred."); - } - } - /** * Cloud Stack API takes a comma separated list as a parameter. * diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java index 916c51d846c..2ce9e339255 100644 --- a/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java +++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java @@ -33,13 +33,14 @@ import java.util.Set; import java.util.TimeZone; import java.util.UUID; +import javax.inject.Inject; import javax.servlet.http.HttpServletResponse; import org.apache.log4j.Logger; import org.json.simple.parser.ParseException; -import com.cloud.bridge.io.S3FileSystemBucketAdapter; import com.cloud.bridge.io.S3CAStorBucketAdapter; +import com.cloud.bridge.io.S3FileSystemBucketAdapter; import com.cloud.bridge.model.BucketPolicyVO; import com.cloud.bridge.model.MHostMountVO; import com.cloud.bridge.model.MHostVO; @@ -50,27 +51,18 @@ import com.cloud.bridge.model.SBucketVO; import com.cloud.bridge.model.SHost; import com.cloud.bridge.model.SHostVO; import com.cloud.bridge.model.SMetaVO; -import com.cloud.bridge.model.SObjectVO; import com.cloud.bridge.model.SObjectItemVO; +import com.cloud.bridge.model.SObjectVO; import com.cloud.bridge.persist.dao.BucketPolicyDao; -import com.cloud.bridge.persist.dao.BucketPolicyDaoImpl; import com.cloud.bridge.persist.dao.MHostDao; -import com.cloud.bridge.persist.dao.MHostDaoImpl; import com.cloud.bridge.persist.dao.MHostMountDao; -import com.cloud.bridge.persist.dao.MHostMountDaoImpl; import com.cloud.bridge.persist.dao.MultipartLoadDao; import com.cloud.bridge.persist.dao.SAclDao; -import com.cloud.bridge.persist.dao.SAclDaoImpl; import com.cloud.bridge.persist.dao.SBucketDao; -import com.cloud.bridge.persist.dao.SBucketDaoImpl; import com.cloud.bridge.persist.dao.SHostDao; -import com.cloud.bridge.persist.dao.SHostDaoImpl; import com.cloud.bridge.persist.dao.SMetaDao; -import com.cloud.bridge.persist.dao.SMetaDaoImpl; import com.cloud.bridge.persist.dao.SObjectDao; -import com.cloud.bridge.persist.dao.SObjectDaoImpl; import com.cloud.bridge.persist.dao.SObjectItemDao; -import com.cloud.bridge.persist.dao.SObjectItemDaoImpl; import com.cloud.bridge.service.UserContext; import com.cloud.bridge.service.controller.s3.ServiceProvider; import com.cloud.bridge.service.core.s3.S3BucketPolicy.PolicyAccess; @@ -86,11 +78,10 @@ import com.cloud.bridge.service.exception.OutOfServiceException; import com.cloud.bridge.service.exception.OutOfStorageException; import com.cloud.bridge.service.exception.PermissionDeniedException; import com.cloud.bridge.util.DateHelper; +import com.cloud.bridge.util.OrderedPair; import com.cloud.bridge.util.PolicyParser; import com.cloud.bridge.util.StringHelper; -import com.cloud.bridge.util.OrderedPair; import com.cloud.bridge.util.Triple; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; @@ -99,105 +90,105 @@ import com.cloud.utils.db.Transaction; */ public class S3Engine { protected final static Logger logger = Logger.getLogger(S3Engine.class); - protected final SHostDao shostDao = ComponentLocator.inject(SHostDaoImpl.class); - protected final MHostDao mhostDao = ComponentLocator.inject(MHostDaoImpl.class); - protected final static BucketPolicyDao bPolicy = ComponentLocator.inject(BucketPolicyDaoImpl.class); - protected final BucketPolicyDao bPolicyDao = ComponentLocator.inject(BucketPolicyDaoImpl.class); - protected final SBucketDao bucketDao = ComponentLocator.inject(SBucketDaoImpl.class); - protected final SAclDao aclDao = ComponentLocator.inject(SAclDaoImpl.class); - protected final static SAclDao saclDao = ComponentLocator.inject(SAclDaoImpl.class); - protected final SObjectDao objectDao = ComponentLocator.inject(SObjectDaoImpl.class); - protected final SObjectItemDao itemDao = ComponentLocator.inject(SObjectItemDaoImpl.class); - protected final SMetaDao metaDao = ComponentLocator.inject(SMetaDaoImpl.class); - protected final MHostMountDao mountDao = ComponentLocator.inject(MHostMountDaoImpl.class); + @Inject SHostDao shostDao; + @Inject MHostDao mhostDao; + @Inject static BucketPolicyDao bPolicy; + @Inject BucketPolicyDao bPolicyDao; + @Inject SBucketDao bucketDao; + @Inject SAclDao aclDao; + @Inject static SAclDao saclDao; + @Inject SObjectDao objectDao; + @Inject SObjectItemDao itemDao; + @Inject SMetaDao metaDao; + @Inject MHostMountDao mountDao; private final int LOCK_ACQUIRING_TIMEOUT_SECONDS = 10; // ten seconds private final Map bucketAdapters = new HashMap(); - + public S3Engine() { - bucketAdapters.put(SHost.STORAGE_HOST_TYPE_LOCAL, new S3FileSystemBucketAdapter()); + bucketAdapters.put(SHost.STORAGE_HOST_TYPE_LOCAL, new S3FileSystemBucketAdapter()); bucketAdapters.put(SHost.STORAGE_HOST_TYPE_CASTOR, new S3CAStorBucketAdapter()); } - - + + /** * Return a S3CopyObjectResponse which represents an object being copied from source * to destination bucket. * Called from S3ObjectAction when copying an object. * This can be treated as first a GET followed by a PUT of the object the user wants to copy. */ - - public S3CopyObjectResponse handleRequest(S3CopyObjectRequest request) - { - S3CopyObjectResponse response = new S3CopyObjectResponse(); - - // [A] Get the object we want to copy - S3GetObjectRequest getRequest = new S3GetObjectRequest(); - getRequest.setBucketName(request.getSourceBucketName()); - getRequest.setKey(request.getSourceKey()); - getRequest.setVersion(request.getVersion()); - getRequest.setConditions( request.getConditions()); - getRequest.setInlineData( true ); - getRequest.setReturnData( true ); - if ( MetadataDirective.COPY == request.getDirective()) - getRequest.setReturnMetadata( true ); - else getRequest.setReturnMetadata( false ); - - //-> before we do anything verify the permissions on a copy basis - String destinationBucketName = request.getDestinationBucketName(); - String destinationKeyName = request.getDestinationKey(); - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, destinationBucketName ); - context.setKeyName( destinationKeyName ); - context.setEvalParam( ConditionKeys.MetaData, request.getDirective().toString()); - context.setEvalParam( ConditionKeys.CopySource, "/" + request.getSourceBucketName() + "/" + request.getSourceKey()); - if (PolicyAccess.DENY == verifyPolicy( context )) + public S3CopyObjectResponse handleRequest(S3CopyObjectRequest request) + { + S3CopyObjectResponse response = new S3CopyObjectResponse(); + + // [A] Get the object we want to copy + S3GetObjectRequest getRequest = new S3GetObjectRequest(); + getRequest.setBucketName(request.getSourceBucketName()); + getRequest.setKey(request.getSourceKey()); + getRequest.setVersion(request.getVersion()); + getRequest.setConditions( request.getConditions()); + + getRequest.setInlineData( true ); + getRequest.setReturnData( true ); + if ( MetadataDirective.COPY == request.getDirective()) + getRequest.setReturnMetadata( true ); + else getRequest.setReturnMetadata( false ); + + //-> before we do anything verify the permissions on a copy basis + String destinationBucketName = request.getDestinationBucketName(); + String destinationKeyName = request.getDestinationKey(); + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, destinationBucketName ); + context.setKeyName( destinationKeyName ); + context.setEvalParam( ConditionKeys.MetaData, request.getDirective().toString()); + context.setEvalParam( ConditionKeys.CopySource, "/" + request.getSourceBucketName() + "/" + request.getSourceKey()); + if (PolicyAccess.DENY == verifyPolicy( context )) throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); - - S3GetObjectResponse originalObject = handleRequest(getRequest); - int resultCode = originalObject.getResultCode(); - if (200 != resultCode) { - response.setResultCode( resultCode ); - response.setResultDescription( originalObject.getResultDescription()); - return response; - } - - response.setCopyVersion( originalObject.getVersion()); - - // [B] Put the object into the destination bucket - S3PutObjectInlineRequest putRequest = new S3PutObjectInlineRequest(); - putRequest.setBucketName(request.getDestinationBucketName()) ; - putRequest.setKey(destinationKeyName); - if ( MetadataDirective.COPY == request.getDirective()) - putRequest.setMetaEntries(originalObject.getMetaEntries()); - else putRequest.setMetaEntries(request.getMetaEntries()); - putRequest.setAcl(request.getAcl()); // -> if via a SOAP call - putRequest.setCannedAccess(request.getCannedAccess()); // -> if via a REST call - putRequest.setContentLength(originalObject.getContentLength()); - putRequest.setData(originalObject.getData()); + S3GetObjectResponse originalObject = handleRequest(getRequest); + int resultCode = originalObject.getResultCode(); + if (200 != resultCode) { + response.setResultCode( resultCode ); + response.setResultDescription( originalObject.getResultDescription()); + return response; + } - S3PutObjectInlineResponse putResp = handleRequest(putRequest); - response.setResultCode( putResp.resultCode ); - response.setResultDescription( putResp.getResultDescription()); - response.setETag( putResp.getETag()); - response.setLastModified( putResp.getLastModified()); - response.setPutVersion( putResp.getVersion()); - return response; - } + response.setCopyVersion( originalObject.getVersion()); + + + // [B] Put the object into the destination bucket + S3PutObjectInlineRequest putRequest = new S3PutObjectInlineRequest(); + putRequest.setBucketName(request.getDestinationBucketName()) ; + putRequest.setKey(destinationKeyName); + if ( MetadataDirective.COPY == request.getDirective()) + putRequest.setMetaEntries(originalObject.getMetaEntries()); + else putRequest.setMetaEntries(request.getMetaEntries()); + putRequest.setAcl(request.getAcl()); // -> if via a SOAP call + putRequest.setCannedAccess(request.getCannedAccess()); // -> if via a REST call + putRequest.setContentLength(originalObject.getContentLength()); + putRequest.setData(originalObject.getData()); + + S3PutObjectInlineResponse putResp = handleRequest(putRequest); + response.setResultCode( putResp.resultCode ); + response.setResultDescription( putResp.getResultDescription()); + response.setETag( putResp.getETag()); + response.setLastModified( putResp.getLastModified()); + response.setPutVersion( putResp.getVersion()); + return response; + } public S3CreateBucketResponse handleRequest(S3CreateBucketRequest request) { - S3CreateBucketResponse response = new S3CreateBucketResponse(); - String cannedAccessPolicy = request.getCannedAccess(); - String bucketName = request.getBucketName(); - response.setBucketName( bucketName ); - Transaction txn= null; - verifyBucketName( bucketName, false ); - - S3PolicyContext context = new S3PolicyContext( PolicyActions.CreateBucket, bucketName ); - context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy ); - if (PolicyAccess.DENY == verifyPolicy( context )) + S3CreateBucketResponse response = new S3CreateBucketResponse(); + String cannedAccessPolicy = request.getCannedAccess(); + String bucketName = request.getBucketName(); + response.setBucketName( bucketName ); + Transaction txn= null; + verifyBucketName( bucketName, false ); + + S3PolicyContext context = new S3PolicyContext( PolicyActions.CreateBucket, bucketName ); + context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy ); + if (PolicyAccess.DENY == verifyPolicy( context )) throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); OrderedPair shost_storagelocation_pair = null; boolean success = false; @@ -211,7 +202,7 @@ public class S3Engine { request.getBucketName(), null); SBucketVO sbucket = new SBucketVO(request.getBucketName(), DateHelper.currentGMTTime(), UserContext.current() - .getCanonicalUserId(), + .getCanonicalUserId(), shost_storagelocation_pair.getFirst()); shost_storagelocation_pair.getFirst().getBuckets().add(sbucket); @@ -239,29 +230,29 @@ public class S3Engine { txn.rollback(); txn.close(); } - return response; + return response; } - + /** * Return a S3Response which represents the effect of an object being deleted from its bucket. * Called from S3BucketAction when deleting an object. */ - + public S3Response handleRequest( S3DeleteBucketRequest request ) { - S3Response response = new S3Response(); - // - String bucketName = request.getBucketName(); - SBucketVO sbucket = bucketDao.getByName(bucketName); - - Transaction txn = null; - if ( sbucket != null ) - { - txn = Transaction.open(Transaction.AWSAPI_DB); - txn.start(); - S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteBucket, bucketName ); - switch( verifyPolicy( context )) - { + S3Response response = new S3Response(); + // + String bucketName = request.getBucketName(); + SBucketVO sbucket = bucketDao.getByName(bucketName); + + Transaction txn = null; + if ( sbucket != null ) + { + txn = Transaction.open(Transaction.AWSAPI_DB); + txn.start(); + S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteBucket, bucketName ); + switch( verifyPolicy( context )) + { case ALLOW: // The bucket policy can give users permission to delete a // bucket whereas ACLs cannot @@ -282,110 +273,110 @@ public class S3Engine { } break; } - - // Delete the file from its storage location - OrderedPair host_storagelocation_pair = getBucketStorageHost(sbucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - bucketAdapter.deleteContainer(host_storagelocation_pair.getSecond(), request.getBucketName()); - - // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects. - // To delete SMeta & SAcl objects: - // (1)Get all the objects in the bucket, - // (2)then all the items in each object, - // (3) then all meta & acl data for each item - Set objectsInBucket = sbucket.getObjectsInBucket(); - Iterator it = objectsInBucket.iterator(); - while( it.hasNext()) - { - SObjectVO oneObject = (SObjectVO)it.next(); - Set itemsInObject = oneObject.getItems(); - Iterator is = itemsInObject.iterator(); - while( is.hasNext()) - { - SObjectItemVO oneItem = (SObjectItemVO) is.next(); + + // Delete the file from its storage location + OrderedPair host_storagelocation_pair = getBucketStorageHost(sbucket); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + bucketAdapter.deleteContainer(host_storagelocation_pair.getSecond(), request.getBucketName()); + + // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects. + // To delete SMeta & SAcl objects: + // (1)Get all the objects in the bucket, + // (2)then all the items in each object, + // (3) then all meta & acl data for each item + Set objectsInBucket = sbucket.getObjectsInBucket(); + Iterator it = objectsInBucket.iterator(); + while( it.hasNext()) + { + SObjectVO oneObject = it.next(); + Set itemsInObject = oneObject.getItems(); + Iterator is = itemsInObject.iterator(); + while( is.hasNext()) + { + SObjectItemVO oneItem = is.next(); deleteMetaData(oneItem.getId()); deleteObjectAcls("SObjectItem", oneItem.getId()); - } - } - - // Delete all the policy state associated with the bucket - try { + } + } + + // Delete all the policy state associated with the bucket + try { ServiceProvider.getInstance().deleteBucketPolicy(bucketName); bPolicyDao.deletePolicy(bucketName); - } catch( Exception e ) { - logger.error("When deleting a bucket we must try to delete its policy: ", e); - } - - deleteBucketAcls( sbucket.getId()); - bucketDao.remove(sbucket.getId()); - - - response.setResultCode(204); - response.setResultDescription("OK"); - - txn.close(); - } - else - { response.setResultCode(404); - response.setResultDescription("Bucket does not exist"); - } - return response; + } catch( Exception e ) { + logger.error("When deleting a bucket we must try to delete its policy: ", e); + } + + deleteBucketAcls( sbucket.getId()); + bucketDao.remove(sbucket.getId()); + + + response.setResultCode(204); + response.setResultDescription("OK"); + + txn.close(); + } + else + { response.setResultCode(404); + response.setResultDescription("Bucket does not exist"); + } + return response; } - + /** * Return a S3ListBucketResponse which represents a list of up to 1000 objects contained ins the bucket. * Called from S3BucketAction for GETting objects and for GETting object versions. */ - + public S3ListBucketResponse listBucketContents(S3ListBucketRequest request, boolean includeVersions) { - S3ListBucketResponse response = new S3ListBucketResponse(); - String bucketName = request.getBucketName(); - String prefix = request.getPrefix(); - if (prefix == null) prefix = StringHelper.EMPTY_STRING; - String marker = request.getMarker(); - if (marker == null) marker = StringHelper.EMPTY_STRING; - - String delimiter = request.getDelimiter(); - int maxKeys = request.getMaxKeys(); - if(maxKeys <= 0) maxKeys = 1000; - - // - SBucketVO sbucket = bucketDao.getByName(bucketName); - if (sbucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + S3ListBucketResponse response = new S3ListBucketResponse(); + String bucketName = request.getBucketName(); + String prefix = request.getPrefix(); + if (prefix == null) prefix = StringHelper.EMPTY_STRING; + String marker = request.getMarker(); + if (marker == null) marker = StringHelper.EMPTY_STRING; - PolicyActions action = (includeVersions ? PolicyActions.ListBucketVersions : PolicyActions.ListBucket); - S3PolicyContext context = new S3PolicyContext( action, bucketName ); - context.setEvalParam( ConditionKeys.MaxKeys, new String( "" + maxKeys )); - context.setEvalParam( ConditionKeys.Prefix, prefix ); - context.setEvalParam( ConditionKeys.Delimiter, delimiter ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ ); + String delimiter = request.getDelimiter(); + int maxKeys = request.getMaxKeys(); + if(maxKeys <= 0) maxKeys = 1000; - - // Wen execting the query, request one more item so that we know how to set isTruncated flag - List l = null; - - if ( includeVersions ) - l = objectDao.listAllBucketObjects( sbucket, prefix, marker, maxKeys+1 ); - else l = objectDao.listBucketObjects( sbucket, prefix, marker, maxKeys+1 ); - - response.setBucketName(bucketName); - response.setMarker(marker); - response.setMaxKeys(maxKeys); - response.setPrefix(prefix); - response.setDelimiter(delimiter); - if (null != l ) { - response.setTruncated(l.size() > maxKeys); - if(l.size() > maxKeys) { - response.setNextMarker(l.get(l.size() - 1).getNameKey()); - } - } - // If needed - SOAP response does not support versioning - response.setContents( composeListBucketContentEntries(l, prefix, delimiter, maxKeys, includeVersions, request.getVersionIdMarker())); - response.setCommonPrefixes( composeListBucketPrefixEntries(l, prefix, delimiter, maxKeys)); - return response; + // + SBucketVO sbucket = bucketDao.getByName(bucketName); + if (sbucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + PolicyActions action = (includeVersions ? PolicyActions.ListBucketVersions : PolicyActions.ListBucket); + S3PolicyContext context = new S3PolicyContext( action, bucketName ); + context.setEvalParam( ConditionKeys.MaxKeys, new String( "" + maxKeys )); + context.setEvalParam( ConditionKeys.Prefix, prefix ); + context.setEvalParam( ConditionKeys.Delimiter, delimiter ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ ); + + + // Wen execting the query, request one more item so that we know how to set isTruncated flag + List l = null; + + if ( includeVersions ) + l = objectDao.listAllBucketObjects( sbucket, prefix, marker, maxKeys+1 ); + else l = objectDao.listBucketObjects( sbucket, prefix, marker, maxKeys+1 ); + + response.setBucketName(bucketName); + response.setMarker(marker); + response.setMaxKeys(maxKeys); + response.setPrefix(prefix); + response.setDelimiter(delimiter); + if (null != l ) { + response.setTruncated(l.size() > maxKeys); + if(l.size() > maxKeys) { + response.setNextMarker(l.get(l.size() - 1).getNameKey()); + } + } + // If needed - SOAP response does not support versioning + response.setContents( composeListBucketContentEntries(l, prefix, delimiter, maxKeys, includeVersions, request.getVersionIdMarker())); + response.setCommonPrefixes( composeListBucketPrefixEntries(l, prefix, delimiter, maxKeys)); + return response; } - + /** * Return a S3ListAllMyBucketResponse which represents a list of all buckets owned by the requester. * Called from S3BucketAction for GETting all buckets. @@ -394,90 +385,90 @@ public class S3Engine { */ public S3ListAllMyBucketsResponse handleRequest(S3ListAllMyBucketsRequest request) { - S3ListAllMyBucketsResponse response = new S3ListAllMyBucketsResponse(); + S3ListAllMyBucketsResponse response = new S3ListAllMyBucketsResponse(); - - // "...you can only list buckets for which you are the owner." - List buckets = bucketDao.listBuckets(UserContext.current().getCanonicalUserId()); - S3CanonicalUser owner = new S3CanonicalUser(); - owner.setID(UserContext.current().getCanonicalUserId()); - owner.setDisplayName(""); - response.setOwner(owner); - - if (buckets != null) - { - S3ListAllMyBucketsEntry[] entries = new S3ListAllMyBucketsEntry[buckets.size()]; - int i = 0; - for(SBucketVO bucket : buckets) - { - String bucketName = bucket.getName(); - S3PolicyContext context = new S3PolicyContext( PolicyActions.ListAllMyBuckets, bucketName ); - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_PASS ); - - entries[i] = new S3ListAllMyBucketsEntry(); - entries[i].setName(bucketName); - entries[i].setCreationDate(DateHelper.toCalendar(bucket.getCreateTime())); - i++; - } - response.setBuckets(entries); - } - return response; + + // "...you can only list buckets for which you are the owner." + List buckets = bucketDao.listBuckets(UserContext.current().getCanonicalUserId()); + S3CanonicalUser owner = new S3CanonicalUser(); + owner.setID(UserContext.current().getCanonicalUserId()); + owner.setDisplayName(""); + response.setOwner(owner); + + if (buckets != null) + { + S3ListAllMyBucketsEntry[] entries = new S3ListAllMyBucketsEntry[buckets.size()]; + int i = 0; + for(SBucketVO bucket : buckets) + { + String bucketName = bucket.getName(); + S3PolicyContext context = new S3PolicyContext( PolicyActions.ListAllMyBuckets, bucketName ); + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_PASS ); + + entries[i] = new S3ListAllMyBucketsEntry(); + entries[i].setName(bucketName); + entries[i].setCreationDate(DateHelper.toCalendar(bucket.getCreateTime())); + i++; + } + response.setBuckets(entries); + } + return response; } - + /** * Return an S3Response representing the result of PUTTING the ACL of a given bucket. * Called from S3BucketAction to PUT its ACL. */ - + public S3Response handleRequest(S3SetBucketAccessControlPolicyRequest request) { - S3Response response = new S3Response(); - String bucketName = request.getBucketName(); - SBucketVO sbucket = bucketDao.getByName(bucketName); - if(sbucket == null) { - response.setResultCode(404); - response.setResultDescription("Bucket does not exist"); - return response; - } - - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutBucketAcl, bucketName ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE_ACL ); + S3Response response = new S3Response(); + String bucketName = request.getBucketName(); + SBucketVO sbucket = bucketDao.getByName(bucketName); + if(sbucket == null) { + response.setResultCode(404); + response.setResultDescription("Bucket does not exist"); + return response; + } - aclDao.save("SBucket", sbucket.getId(), request.getAcl()); - - response.setResultCode(200); - response.setResultDescription("OK"); - return response; + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutBucketAcl, bucketName ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE_ACL ); + + aclDao.save("SBucket", sbucket.getId(), request.getAcl()); + + response.setResultCode(200); + response.setResultDescription("OK"); + return response; } - - + + /** * Return a S3AccessControlPolicy representing the ACL of a given bucket. * Called from S3BucketAction to GET its ACL. */ - + public S3AccessControlPolicy handleRequest(S3GetBucketAccessControlPolicyRequest request) { - S3AccessControlPolicy policy = new S3AccessControlPolicy(); - String bucketName = request.getBucketName(); - SBucketVO sbucket = bucketDao.getByName( bucketName ); - if (sbucket == null) - throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - - S3CanonicalUser owner = new S3CanonicalUser(); - owner.setID(sbucket.getOwnerCanonicalId()); - owner.setDisplayName(""); - policy.setOwner(owner); - - S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketAcl, bucketName ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ_ACL ); + S3AccessControlPolicy policy = new S3AccessControlPolicy(); + String bucketName = request.getBucketName(); + SBucketVO sbucket = bucketDao.getByName( bucketName ); + if (sbucket == null) + throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + S3CanonicalUser owner = new S3CanonicalUser(); + owner.setID(sbucket.getOwnerCanonicalId()); + owner.setDisplayName(""); + policy.setOwner(owner); + + S3PolicyContext context = new S3PolicyContext( PolicyActions.GetBucketAcl, bucketName ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ_ACL ); - List grants = aclDao.listGrants("SBucket", sbucket.getId()); - policy.setGrants(S3Grant.toGrants(grants)); - return policy; + List grants = aclDao.listGrants("SBucket", sbucket.getId()); + policy.setGrants(S3Grant.toGrants(grants)); + return policy; } - + /** * This method should be called if a multipart upload is aborted OR has completed successfully and * the individual parts have to be cleaned up. @@ -487,67 +478,67 @@ public class S3Engine { * @param verifyPermissiod - If false then do not check the user's permission to clean up the state */ public int freeUploadParts(String bucketName, int uploadId, boolean verifyPermission) { - - // -> we need to look up the final bucket to figure out which mount - // point to use to save the part in - // SBucketDao bucketDao = new SBucketDao(); - SBucketVO bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error("initiateMultipartUpload failed since " + bucketName - + " does not exist"); - return 404; - } - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + // -> we need to look up the final bucket to figure out which mount + // point to use to save the part in + // SBucketDao bucketDao = new SBucketDao(); + SBucketVO bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error("initiateMultipartUpload failed since " + bucketName + + " does not exist"); + return 404; + } - try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - OrderedPair exists = uploadDao.multipartExits(uploadId); - - if (null == exists) { - logger.error("initiateMultipartUpload failed since multipart upload" - + uploadId + " does not exist"); - return 404; - } + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - // -> the multipart initiator or bucket owner can do this action by - // default - if (verifyPermission) { - String initiator = uploadDao.getInitiator(uploadId); - if (null == initiator - || !initiator.equals(UserContext.current() - .getAccessKey())) { - // -> write permission on a bucket allows a PutObject / - // DeleteObject action on any object in the bucket - S3PolicyContext context = new S3PolicyContext( - PolicyActions.AbortMultipartUpload, bucketName); - context.setKeyName(exists.getSecond()); - verifyAccess(context, "SBucket", bucket.getId(), - SAcl.PERMISSION_WRITE); - } - } + try { + MultipartLoadDao uploadDao = new MultipartLoadDao(); + OrderedPair exists = uploadDao.multipartExits(uploadId); - // -> first get a list of all the uploaded files and delete one by - // one - S3MultipartPart[] parts = uploadDao.getParts(uploadId, 10000, 0); - for (int i = 0; i < parts.length; i++) { - bucketAdapter.deleteObject(host_storagelocation_pair.getSecond(), ServiceProvider.getInstance() - .getMultipartDir(), parts[i].getPath()); - } - uploadDao.deleteUpload(uploadId); - return 204; + if (null == exists) { + logger.error("initiateMultipartUpload failed since multipart upload" + + uploadId + " does not exist"); + return 404; + } - } catch (PermissionDeniedException e) { - logger.error("freeUploadParts failed due to [" + e.getMessage() - + "]", e); - throw e; - } catch (Exception e) { - logger.error("freeUploadParts failed due to [" + e.getMessage() - + "]", e); - return 500; - } - } + // -> the multipart initiator or bucket owner can do this action by + // default + if (verifyPermission) { + String initiator = uploadDao.getInitiator(uploadId); + if (null == initiator + || !initiator.equals(UserContext.current() + .getAccessKey())) { + // -> write permission on a bucket allows a PutObject / + // DeleteObject action on any object in the bucket + S3PolicyContext context = new S3PolicyContext( + PolicyActions.AbortMultipartUpload, bucketName); + context.setKeyName(exists.getSecond()); + verifyAccess(context, "SBucket", bucket.getId(), + SAcl.PERMISSION_WRITE); + } + } + + // -> first get a list of all the uploaded files and delete one by + // one + S3MultipartPart[] parts = uploadDao.getParts(uploadId, 10000, 0); + for (int i = 0; i < parts.length; i++) { + bucketAdapter.deleteObject(host_storagelocation_pair.getSecond(), ServiceProvider.getInstance() + .getMultipartDir(), parts[i].getPath()); + } + uploadDao.deleteUpload(uploadId); + return 204; + + } catch (PermissionDeniedException e) { + logger.error("freeUploadParts failed due to [" + e.getMessage() + + "]", e); + throw e; + } catch (Exception e) { + logger.error("freeUploadParts failed due to [" + e.getMessage() + + "]", e); + return 500; + } + } /** * The initiator must have permission to write to the bucket in question in order to initiate @@ -557,33 +548,33 @@ public class S3Engine { */ public S3PutObjectInlineResponse initiateMultipartUpload(S3PutObjectInlineRequest request) { - S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); - String bucketName = request.getBucketName(); - String nameKey = request.getKey(); + S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); + String bucketName = request.getBucketName(); + String nameKey = request.getKey(); - // -> does the bucket exist and can we write to it? - SBucketVO bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" ); - response.setResultCode(404); - } - - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName ); - context.setKeyName( nameKey ); - context.setEvalParam( ConditionKeys.Acl, request.getCannedAccess()); - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); + // -> does the bucket exist and can we write to it? + SBucketVO bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error( "initiateMultipartUpload failed since " + bucketName + " does not exist" ); + response.setResultCode(404); + } - createUploadFolder( bucketName ); + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName ); + context.setKeyName( nameKey ); + context.setEvalParam( ConditionKeys.Acl, request.getCannedAccess()); + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); + + createUploadFolder( bucketName ); try { - MultipartLoadDao uploadDao = new MultipartLoadDao(); - int uploadId = uploadDao.initiateUpload( UserContext.current().getAccessKey(), bucketName, nameKey, request.getCannedAccess(), request.getMetaEntries()); - response.setUploadId( uploadId ); - response.setResultCode(200); - + MultipartLoadDao uploadDao = new MultipartLoadDao(); + int uploadId = uploadDao.initiateUpload( UserContext.current().getAccessKey(), bucketName, nameKey, request.getCannedAccess(), request.getMetaEntries()); + response.setUploadId( uploadId ); + response.setResultCode(200); + } catch( Exception e ) { logger.error("initiateMultipartUpload exception: ", e); - response.setResultCode(500); + response.setResultCode(500); } return response; @@ -600,55 +591,55 @@ public class S3Engine { */ public S3PutObjectInlineResponse saveUploadPart(S3PutObjectInlineRequest request, int uploadId, int partNumber) { - S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); - String bucketName = request.getBucketName(); + S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); + String bucketName = request.getBucketName(); - // -> we need to look up the final bucket to figure out which mount point to use to save the part in - //SBucketDao bucketDao = new SBucketDao(); - SBucketVO bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error( "saveUploadedPart failed since " + bucketName + " does not exist" ); - response.setResultCode(404); - } - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName ); - context.setKeyName( request.getKey()); - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); - - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - String itemFileName = new String( uploadId + "-" + partNumber ); - InputStream is = null; + // -> we need to look up the final bucket to figure out which mount point to use to save the part in + //SBucketDao bucketDao = new SBucketDao(); + SBucketVO bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error( "saveUploadedPart failed since " + bucketName + " does not exist" ); + response.setResultCode(404); + } + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucketName ); + context.setKeyName( request.getKey()); + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); - try { - is = request.getDataInputStream(); - String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName); - response.setETag(md5Checksum); - MultipartLoadDao uploadDao = new MultipartLoadDao(); - uploadDao.savePart(uploadId, partNumber, md5Checksum, itemFileName,(int) request.getContentLength()); - response.setResultCode(200); - - } catch (IOException e) { - logger.error("UploadPart failed due to " + e.getMessage(), e); - response.setResultCode(500); - } catch (OutOfStorageException e) { - logger.error("UploadPart failed due to " + e.getMessage(), e); - response.setResultCode(500); - } catch (Exception e) { - logger.error("UploadPart failed due to " + e.getMessage(), e); - response.setResultCode(500); - } finally { - if(is != null) { - try { - is.close(); - } catch (IOException e) { - logger.error("UploadPart unable to close stream from data handler.", e); - } - } - } - - return response; + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + String itemFileName = new String( uploadId + "-" + partNumber ); + InputStream is = null; + + try { + is = request.getDataInputStream(); + String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), ServiceProvider.getInstance().getMultipartDir(), itemFileName); + response.setETag(md5Checksum); + MultipartLoadDao uploadDao = new MultipartLoadDao(); + uploadDao.savePart(uploadId, partNumber, md5Checksum, itemFileName,(int) request.getContentLength()); + response.setResultCode(200); + + } catch (IOException e) { + logger.error("UploadPart failed due to " + e.getMessage(), e); + response.setResultCode(500); + } catch (OutOfStorageException e) { + logger.error("UploadPart failed due to " + e.getMessage(), e); + response.setResultCode(500); + } catch (Exception e) { + logger.error("UploadPart failed due to " + e.getMessage(), e); + response.setResultCode(500); + } finally { + if(is != null) { + try { + is.close(); + } catch (IOException e) { + logger.error("UploadPart unable to close stream from data handler.", e); + } + } + } + + return response; } - + /** * Create the real object represented by all the parts of the multipart upload. * Called from S3ObjectAction at completion of multipart upload. @@ -659,55 +650,55 @@ public class S3Engine { * N.B. - This method can be long-lasting * We are required to keep the connection alive by returning whitespace characters back periodically. */ - + public S3PutObjectInlineResponse concatentateMultipartUploads(HttpServletResponse httpResp, S3PutObjectInlineRequest request, S3MultipartPart[] parts, OutputStream outputStream) throws IOException { - // [A] Set up and initial error checking - S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); - String bucketName = request.getBucketName(); - String key = request.getKey(); - S3MetaDataEntry[] meta = request.getMetaEntries(); + // [A] Set up and initial error checking + S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); + String bucketName = request.getBucketName(); + String key = request.getKey(); + S3MetaDataEntry[] meta = request.getMetaEntries(); - SBucketVO bucket = bucketDao.getByName(bucketName); - if (bucket == null) { - logger.error("completeMultipartUpload( failed since " + bucketName - + " does not exist"); - response.setResultCode(404); - } + SBucketVO bucket = bucketDao.getByName(bucketName); + if (bucket == null) { + logger.error("completeMultipartUpload( failed since " + bucketName + + " does not exist"); + response.setResultCode(404); + } - // [B] Now we need to create the final re-assembled object - // -> the allocObjectItem checks for the bucket policy PutObject - // permissions - OrderedPair object_objectitem_pair = allocObjectItem( - bucket, key, meta, null, request.getCannedAccess()); - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + // [B] Now we need to create the final re-assembled object + // -> the allocObjectItem checks for the bucket policy PutObject + // permissions + OrderedPair object_objectitem_pair = allocObjectItem( + bucket, key, meta, null, request.getCannedAccess()); + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair - .getFirst()); - String itemFileName = object_objectitem_pair.getSecond() - .getStoredPath(); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair + .getFirst()); + String itemFileName = object_objectitem_pair.getSecond() + .getStoredPath(); + + // -> Amazon defines that we must return a 200 response immediately to + // the client, but + // -> we don't know the version header until we hit here + httpResp.setStatus(200); + httpResp.setContentType("text/xml; charset=UTF-8"); + String version = object_objectitem_pair.getSecond().getVersion(); + if (null != version) + httpResp.addHeader("x-amz-version-id", version); + httpResp.flushBuffer(); + Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + // [C] Re-assemble the object from its uploaded file parts + try { + // explicit transaction control to avoid holding transaction during + // long file concatenation process + txn.start(); + OrderedPair result = bucketAdapter + .concatentateObjects(host_storagelocation_pair.getSecond(), + bucket.getName(), itemFileName, ServiceProvider + .getInstance().getMultipartDir(), parts, + outputStream); - // -> Amazon defines that we must return a 200 response immediately to - // the client, but - // -> we don't know the version header until we hit here - httpResp.setStatus(200); - httpResp.setContentType("text/xml; charset=UTF-8"); - String version = object_objectitem_pair.getSecond().getVersion(); - if (null != version) - httpResp.addHeader("x-amz-version-id", version); - httpResp.flushBuffer(); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); - // [C] Re-assemble the object from its uploaded file parts - try { - // explicit transaction control to avoid holding transaction during - // long file concatenation process - txn.start(); - OrderedPair result = bucketAdapter - .concatentateObjects(host_storagelocation_pair.getSecond(), - bucket.getName(), itemFileName, ServiceProvider - .getInstance().getMultipartDir(), parts, - outputStream); - response.setETag(result.getFirst()); response.setLastModified(DateHelper.toCalendar(object_objectitem_pair.getSecond().getLastModifiedTime())); SObjectItemVO item = itemDao.findById(object_objectitem_pair @@ -716,13 +707,13 @@ public class S3Engine { item.setStoredSize(result.getSecond().longValue()); itemDao.update(item.getId(), item); response.setResultCode(200); - } catch (Exception e) { - logger.error("completeMultipartUpload failed due to " + e.getMessage(),e); - txn.close(); - } - return response; + } catch (Exception e) { + logger.error("completeMultipartUpload failed due to " + e.getMessage(),e); + txn.close(); + } + return response; } - + /** * Return a S3PutObjectInlineResponse which represents an object being created into a bucket * Called from S3ObjectAction when PUTting or POTing an object. @@ -730,61 +721,61 @@ public class S3Engine { @DB public S3PutObjectInlineResponse handleRequest(S3PutObjectInlineRequest request) { - S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); - String bucketName = request.getBucketName(); - String key = request.getKey(); - long contentLength = request.getContentLength(); - S3MetaDataEntry[] meta = request.getMetaEntries(); - S3AccessControlList acl = request.getAcl(); - - SBucketVO bucket = bucketDao.getByName(bucketName); - if (bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - + S3PutObjectInlineResponse response = new S3PutObjectInlineResponse(); + String bucketName = request.getBucketName(); + String key = request.getKey(); + long contentLength = request.getContentLength(); + S3MetaDataEntry[] meta = request.getMetaEntries(); + S3AccessControlList acl = request.getAcl(); - // Is the caller allowed to write the object? - // The allocObjectItem checks for the bucket policy PutObject permissions - OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess()); - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); - InputStream is = null; - Transaction txn = null; - try { - // explicit transaction control to avoid holding transaction during file-copy process - - txn = Transaction.open(Transaction.AWSAPI_DB); - txn.start(); - is = request.getDataInputStream(); - String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); - response.setETag(md5Checksum); - response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); - response.setVersion( object_objectitem_pair.getSecond().getVersion()); - - //SObjectItemDaoImpl itemDao = new SObjectItemDaoImpl(); - SObjectItemVO item = itemDao.findById(object_objectitem_pair.getSecond().getId()); - item.setMd5(md5Checksum); - item.setStoredSize(contentLength); - itemDao.update(item.getId(), item); - txn.commit(); - } catch (IOException e) { - logger.error("PutObjectInline failed due to " + e.getMessage(), e); - } catch (OutOfStorageException e) { - logger.error("PutObjectInline failed due to " + e.getMessage(), e); - } finally { - if(is != null) { - try { - is.close(); - } catch (IOException e) { - logger.error("PutObjectInline unable to close stream from data handler.", e); - } - } - txn.close(); - } - - return response; + SBucketVO bucket = bucketDao.getByName(bucketName); + if (bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + + // Is the caller allowed to write the object? + // The allocObjectItem checks for the bucket policy PutObject permissions + OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, request.getCannedAccess()); + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); + InputStream is = null; + Transaction txn = null; + try { + // explicit transaction control to avoid holding transaction during file-copy process + + txn = Transaction.open(Transaction.AWSAPI_DB); + txn.start(); + is = request.getDataInputStream(); + String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); + response.setETag(md5Checksum); + response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); + response.setVersion( object_objectitem_pair.getSecond().getVersion()); + + //SObjectItemDaoImpl itemDao = new SObjectItemDaoImpl(); + SObjectItemVO item = itemDao.findById(object_objectitem_pair.getSecond().getId()); + item.setMd5(md5Checksum); + item.setStoredSize(contentLength); + itemDao.update(item.getId(), item); + txn.commit(); + } catch (IOException e) { + logger.error("PutObjectInline failed due to " + e.getMessage(), e); + } catch (OutOfStorageException e) { + logger.error("PutObjectInline failed due to " + e.getMessage(), e); + } finally { + if(is != null) { + try { + is.close(); + } catch (IOException e) { + logger.error("PutObjectInline unable to close stream from data handler.", e); + } + } + txn.close(); + } + + return response; } - + /** * Return a S3PutObjectResponse which represents an object being created into a bucket * Called from S3RestServlet when processing a DIME request. @@ -792,56 +783,56 @@ public class S3Engine { public S3PutObjectResponse handleRequest(S3PutObjectRequest request) { - S3PutObjectResponse response = new S3PutObjectResponse(); - String bucketName = request.getBucketName(); - String key = request.getKey(); - long contentLength = request.getContentLength(); - S3MetaDataEntry[] meta = request.getMetaEntries(); - S3AccessControlList acl = request.getAcl(); - - SBucketVO bucket = bucketDao.getByName(bucketName); - if(bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - - // Is the caller allowed to write the object? - // The allocObjectItem checks for the bucket policy PutObject permissions - OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, null); - OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); - String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); - InputStream is = null; - Transaction txn = null; - try { - // explicit transaction control to avoid holding transaction during file-copy process - - txn = Transaction.open(Transaction.AWSAPI_DB); - txn.start(); - - is = request.getInputStream(); - String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); - response.setETag(md5Checksum); - response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); - - SObjectItemVO item = itemDao.findById(object_objectitem_pair.getSecond().getId()); - item.setMd5(md5Checksum); - item.setStoredSize(contentLength); - itemDao.update(item.getId(), item); - txn.commit(); - - } catch (OutOfStorageException e) { - logger.error("PutObject failed due to " + e.getMessage(), e); - } finally { - if(is != null) { - try { - is.close(); - } catch (IOException e) { - logger.error("Unable to close stream from data handler.", e); - } - } - txn.close(); - } - - return response; + S3PutObjectResponse response = new S3PutObjectResponse(); + String bucketName = request.getBucketName(); + String key = request.getKey(); + long contentLength = request.getContentLength(); + S3MetaDataEntry[] meta = request.getMetaEntries(); + S3AccessControlList acl = request.getAcl(); + + SBucketVO bucket = bucketDao.getByName(bucketName); + if(bucket == null) throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + // Is the caller allowed to write the object? + // The allocObjectItem checks for the bucket policy PutObject permissions + OrderedPair object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, null); + OrderedPair host_storagelocation_pair = getBucketStorageHost(bucket); + + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); + String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); + InputStream is = null; + Transaction txn = null; + try { + // explicit transaction control to avoid holding transaction during file-copy process + + txn = Transaction.open(Transaction.AWSAPI_DB); + txn.start(); + + is = request.getInputStream(); + String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); + response.setETag(md5Checksum); + response.setLastModified(DateHelper.toCalendar( object_objectitem_pair.getSecond().getLastModifiedTime())); + + SObjectItemVO item = itemDao.findById(object_objectitem_pair.getSecond().getId()); + item.setMd5(md5Checksum); + item.setStoredSize(contentLength); + itemDao.update(item.getId(), item); + txn.commit(); + + } catch (OutOfStorageException e) { + logger.error("PutObject failed due to " + e.getMessage(), e); + } finally { + if(is != null) { + try { + is.close(); + } catch (IOException e) { + logger.error("Unable to close stream from data handler.", e); + } + } + txn.close(); + } + + return response; } /** @@ -849,795 +840,795 @@ public class S3Engine { * version of an object. To set the ACL of a different version, using the versionId subresource. * Called from S3ObjectAction to PUT an object's ACL. */ - + public S3Response handleRequest(S3SetObjectAccessControlPolicyRequest request) { - S3PolicyContext context = null; - - // [A] First find the object in the bucket - S3Response response = new S3Response(); - String bucketName = request.getBucketName(); - SBucketVO sbucket = bucketDao.getByName( bucketName ); - if(sbucket == null) { - response.setResultCode(404); - response.setResultDescription("Bucket " + bucketName + "does not exist"); - return response; - } - - String nameKey = request.getKey(); - SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey ); - if(sobject == null) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " in bucket " + bucketName + " does not exist"); - return response; - } - - String deletionMark = sobject.getDeletionMark(); - if (null != deletionMark) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); - return response; - } - + S3PolicyContext context = null; - // [B] Versioning allow the client to ask for a specific version not just the latest - SObjectItemVO item = null; + // [A] First find the object in the bucket + S3Response response = new S3Response(); + String bucketName = request.getBucketName(); + SBucketVO sbucket = bucketDao.getByName( bucketName ); + if(sbucket == null) { + response.setResultCode(404); + response.setResultDescription("Bucket " + bucketName + "does not exist"); + return response; + } + + String nameKey = request.getKey(); + SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey ); + if(sobject == null) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " in bucket " + bucketName + " does not exist"); + return response; + } + + String deletionMark = sobject.getDeletionMark(); + if (null != deletionMark) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); + return response; + } + + + // [B] Versioning allow the client to ask for a specific version not just the latest + SObjectItemVO item = null; int versioningStatus = sbucket.getVersioningStatus(); - String wantVersion = request.getVersion(); - if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) - item = sobject.getVersion( wantVersion ); - else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); - - if (item == null) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); - return response; - } + String wantVersion = request.getVersion(); + if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) + item = sobject.getVersion( wantVersion ); + else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { - context = new S3PolicyContext( PolicyActions.PutObjectAclVersion, bucketName ); - context.setEvalParam( ConditionKeys.VersionId, wantVersion ); - response.setVersion( item.getVersion()); - } - else context = new S3PolicyContext( PolicyActions.PutObjectAcl, bucketName ); - context.setKeyName( nameKey ); - verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_WRITE_ACL ); + if (item == null) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); + return response; + } - // -> the acl always goes on the instance of the object - aclDao.save("SObjectItem", item.getId(), request.getAcl()); - - response.setResultCode(200); - response.setResultDescription("OK"); - return response; + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { + context = new S3PolicyContext( PolicyActions.PutObjectAclVersion, bucketName ); + context.setEvalParam( ConditionKeys.VersionId, wantVersion ); + response.setVersion( item.getVersion()); + } + else context = new S3PolicyContext( PolicyActions.PutObjectAcl, bucketName ); + context.setKeyName( nameKey ); + verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_WRITE_ACL ); + + // -> the acl always goes on the instance of the object + aclDao.save("SObjectItem", item.getId(), request.getAcl()); + + response.setResultCode(200); + response.setResultDescription("OK"); + return response; } - + /** * By default, GET returns ACL information about the latest version of an object. To return ACL * information about a different version, use the versionId subresource * Called from S3ObjectAction to get an object's ACL. */ - + public S3AccessControlPolicy handleRequest(S3GetObjectAccessControlPolicyRequest request) { - S3PolicyContext context = null; + S3PolicyContext context = null; - // [A] Does the object exist that holds the ACL we are looking for? - S3AccessControlPolicy policy = new S3AccessControlPolicy(); - - String bucketName = request.getBucketName(); - SBucketVO sbucket = bucketDao.getByName( bucketName ); - if (sbucket == null) - throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); - - //SObjectDaoImpl sobjectDao = new SObjectDaoImpl(); - String nameKey = request.getKey(); - SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey ); - if (sobject == null) - throw new NoSuchObjectException("Object " + request.getKey() + " does not exist"); - - String deletionMark = sobject.getDeletionMark(); - if (null != deletionMark) { - policy.setResultCode(404); - policy.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); - return policy; - } - + // [A] Does the object exist that holds the ACL we are looking for? + S3AccessControlPolicy policy = new S3AccessControlPolicy(); - // [B] Versioning allow the client to ask for a specific version not just the latest - SObjectItemVO item = null; + String bucketName = request.getBucketName(); + SBucketVO sbucket = bucketDao.getByName( bucketName ); + if (sbucket == null) + throw new NoSuchObjectException("Bucket " + bucketName + " does not exist"); + + //SObjectDaoImpl sobjectDao = new SObjectDaoImpl(); + String nameKey = request.getKey(); + SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey ); + if (sobject == null) + throw new NoSuchObjectException("Object " + request.getKey() + " does not exist"); + + String deletionMark = sobject.getDeletionMark(); + if (null != deletionMark) { + policy.setResultCode(404); + policy.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); + return policy; + } + + + // [B] Versioning allow the client to ask for a specific version not just the latest + SObjectItemVO item = null; int versioningStatus = sbucket.getVersioningStatus(); - String wantVersion = request.getVersion(); - if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) - item = sobject.getVersion( wantVersion ); - else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); - - if (item == null) { - policy.setResultCode(404); - policy.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); - return policy; - } + String wantVersion = request.getVersion(); + if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) + item = sobject.getVersion( wantVersion ); + else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); + + if (item == null) { + policy.setResultCode(404); + policy.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); + return policy; + } + + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { + context = new S3PolicyContext( PolicyActions.GetObjectVersionAcl, bucketName ); + context.setEvalParam( ConditionKeys.VersionId, wantVersion ); + policy.setVersion( item.getVersion()); + } + else context = new S3PolicyContext( PolicyActions.GetObjectAcl, bucketName ); + context.setKeyName( nameKey ); + verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ_ACL ); - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { - context = new S3PolicyContext( PolicyActions.GetObjectVersionAcl, bucketName ); - context.setEvalParam( ConditionKeys.VersionId, wantVersion ); - policy.setVersion( item.getVersion()); - } - else context = new S3PolicyContext( PolicyActions.GetObjectAcl, bucketName ); - context.setKeyName( nameKey ); - verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ_ACL ); - // [C] ACLs are ALWAYS on an instance of the object - S3CanonicalUser owner = new S3CanonicalUser(); - owner.setID(sobject.getOwnerCanonicalId()); - owner.setDisplayName(""); - policy.setOwner(owner); - policy.setResultCode(200); - - - List grants = aclDao.listGrants( "SObjectItem", item.getId()); - policy.setGrants(S3Grant.toGrants(grants)); - return policy; + S3CanonicalUser owner = new S3CanonicalUser(); + owner.setID(sobject.getOwnerCanonicalId()); + owner.setDisplayName(""); + policy.setOwner(owner); + policy.setResultCode(200); + + + List grants = aclDao.listGrants( "SObjectItem", item.getId()); + policy.setGrants(S3Grant.toGrants(grants)); + return policy; } - + /** * Handle requests for GET object and HEAD "get object extended" * Called from S3ObjectAction for GET and HEAD of an object. */ - + public S3GetObjectResponse handleRequest(S3GetObjectRequest request) { - S3GetObjectResponse response = new S3GetObjectResponse(); - S3PolicyContext context = null; - boolean ifRange = false; - long bytesStart = request.getByteRangeStart(); - long bytesEnd = request.getByteRangeEnd(); - int resultCode = 200; + S3GetObjectResponse response = new S3GetObjectResponse(); + S3PolicyContext context = null; + boolean ifRange = false; + long bytesStart = request.getByteRangeStart(); + long bytesEnd = request.getByteRangeEnd(); + int resultCode = 200; - // [A] Verify that the bucket and the object exist - - String bucketName = request.getBucketName(); - SBucketVO sbucket = bucketDao.getByName(bucketName); - if (sbucket == null) { - response.setResultCode(404); - response.setResultDescription("Bucket " + request.getBucketName() + " does not exist"); - return response; - } + // [A] Verify that the bucket and the object exist - String nameKey = request.getKey(); - SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey ); - if (sobject == null) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " does not exist in bucket " + request.getBucketName()); - return response; - } - - String deletionMark = sobject.getDeletionMark(); - if (null != deletionMark) { - response.setDeleteMarker( deletionMark ); - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); - return response; - } - + String bucketName = request.getBucketName(); + SBucketVO sbucket = bucketDao.getByName(bucketName); + if (sbucket == null) { + response.setResultCode(404); + response.setResultDescription("Bucket " + request.getBucketName() + " does not exist"); + return response; + } - // [B] Versioning allow the client to ask for a specific version not just the latest - SObjectItemVO item = null; + String nameKey = request.getKey(); + SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey ); + if (sobject == null) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " does not exist in bucket " + request.getBucketName()); + return response; + } + + String deletionMark = sobject.getDeletionMark(); + if (null != deletionMark) { + response.setDeleteMarker( deletionMark ); + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " has been deleted (1)"); + return response; + } + + + // [B] Versioning allow the client to ask for a specific version not just the latest + SObjectItemVO item = null; int versioningStatus = sbucket.getVersioningStatus(); - String wantVersion = request.getVersion(); - if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) - item = sobject.getVersion( wantVersion ); - else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); - - if (item == null) { - response.setResultCode(404); - response.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); - return response; - } - - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { - context = new S3PolicyContext( PolicyActions.GetObjectVersion, bucketName ); - context.setEvalParam( ConditionKeys.VersionId, wantVersion ); - } - else context = new S3PolicyContext( PolicyActions.GetObject, bucketName ); - context.setKeyName( nameKey ); - verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ ); - - - // [C] Handle all the IFModifiedSince ... conditions, and access privileges - // -> http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27 (HTTP If-Range header) - if (request.isReturnCompleteObjectOnConditionFailure() && (0 <= bytesStart && 0 <= bytesEnd)) ifRange = true; + String wantVersion = request.getVersion(); + if ( SBucket.VERSIONING_ENABLED == versioningStatus && null != wantVersion) + item = sobject.getVersion( wantVersion ); + else item = sobject.getLatestVersion(( SBucket.VERSIONING_ENABLED != versioningStatus )); - resultCode = conditionPassed( request.getConditions(), item.getLastModifiedTime(), item.getMd5(), ifRange ); - if ( -1 == resultCode ) { - // -> If-Range implementation, we have to return the entire object - resultCode = 200; - bytesStart = -1; - bytesEnd = -1; - } - else if (200 != resultCode) { - response.setResultCode( resultCode ); - response.setResultDescription( "Precondition Failed" ); - return response; - } + if (item == null) { + response.setResultCode(404); + response.setResultDescription("Object " + request.getKey() + " has been deleted (2)"); + return response; + } + + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) { + context = new S3PolicyContext( PolicyActions.GetObjectVersion, bucketName ); + context.setEvalParam( ConditionKeys.VersionId, wantVersion ); + } + else context = new S3PolicyContext( PolicyActions.GetObject, bucketName ); + context.setKeyName( nameKey ); + verifyAccess( context, "SObjectItem", item.getId(), SAcl.PERMISSION_READ ); - // [D] Return the contents of the object inline - // -> extract the meta data that corresponds the specific versioned item + // [C] Handle all the IFModifiedSince ... conditions, and access privileges + // -> http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27 (HTTP If-Range header) + if (request.isReturnCompleteObjectOnConditionFailure() && (0 <= bytesStart && 0 <= bytesEnd)) ifRange = true; - List itemMetaData = metaDao.getByTarget( "SObjectItem", item.getId()); - if (null != itemMetaData) - { - int i = 0; - S3MetaDataEntry[] metaEntries = new S3MetaDataEntry[ itemMetaData.size() ]; - ListIterator it = itemMetaData.listIterator(); - while( it.hasNext()) { - SMetaVO oneTag = (SMetaVO)it.next(); - S3MetaDataEntry oneEntry = new S3MetaDataEntry(); - oneEntry.setName( oneTag.getName()); - oneEntry.setValue( oneTag.getValue()); - metaEntries[i++] = oneEntry; - } - response.setMetaEntries( metaEntries ); - } - - // -> support a single byte range - if ( 0 <= bytesStart && 0 <= bytesEnd ) { - response.setContentLength( bytesEnd - bytesStart ); - resultCode = 206; - } - else response.setContentLength( item.getStoredSize()); - - if(request.isReturnData()) - { - response.setETag(item.getMd5()); - response.setLastModified(DateHelper.toCalendar( item.getLastModifiedTime())); - response.setVersion( item.getVersion()); - if (request.isInlineData()) - { - OrderedPair tupleSHostInfo = getBucketStorageHost(sbucket); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleSHostInfo.getFirst()); - - if ( 0 <= bytesStart && 0 <= bytesEnd ) - response.setData(bucketAdapter.loadObjectRange(tupleSHostInfo.getSecond(), - request.getBucketName(), item.getStoredPath(), bytesStart, bytesEnd )); - else response.setData(bucketAdapter.loadObject(tupleSHostInfo.getSecond(), request.getBucketName(), item.getStoredPath())); - } - } - - response.setResultCode( resultCode ); - response.setResultDescription("OK"); - return response; + resultCode = conditionPassed( request.getConditions(), item.getLastModifiedTime(), item.getMd5(), ifRange ); + if ( -1 == resultCode ) { + // -> If-Range implementation, we have to return the entire object + resultCode = 200; + bytesStart = -1; + bytesEnd = -1; + } + else if (200 != resultCode) { + response.setResultCode( resultCode ); + response.setResultDescription( "Precondition Failed" ); + return response; + } + + + // [D] Return the contents of the object inline + // -> extract the meta data that corresponds the specific versioned item + + List itemMetaData = metaDao.getByTarget( "SObjectItem", item.getId()); + if (null != itemMetaData) + { + int i = 0; + S3MetaDataEntry[] metaEntries = new S3MetaDataEntry[ itemMetaData.size() ]; + ListIterator it = itemMetaData.listIterator(); + while( it.hasNext()) { + SMetaVO oneTag = it.next(); + S3MetaDataEntry oneEntry = new S3MetaDataEntry(); + oneEntry.setName( oneTag.getName()); + oneEntry.setValue( oneTag.getValue()); + metaEntries[i++] = oneEntry; + } + response.setMetaEntries( metaEntries ); + } + + // -> support a single byte range + if ( 0 <= bytesStart && 0 <= bytesEnd ) { + response.setContentLength( bytesEnd - bytesStart ); + resultCode = 206; + } + else response.setContentLength( item.getStoredSize()); + + if(request.isReturnData()) + { + response.setETag(item.getMd5()); + response.setLastModified(DateHelper.toCalendar( item.getLastModifiedTime())); + response.setVersion( item.getVersion()); + if (request.isInlineData()) + { + OrderedPair tupleSHostInfo = getBucketStorageHost(sbucket); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(tupleSHostInfo.getFirst()); + + if ( 0 <= bytesStart && 0 <= bytesEnd ) + response.setData(bucketAdapter.loadObjectRange(tupleSHostInfo.getSecond(), + request.getBucketName(), item.getStoredPath(), bytesStart, bytesEnd )); + else response.setData(bucketAdapter.loadObject(tupleSHostInfo.getSecond(), request.getBucketName(), item.getStoredPath())); + } + } + + response.setResultCode( resultCode ); + response.setResultDescription("OK"); + return response; } - + /** * Handle object deletion requests, both versioning and non-versioning requirements. * Called from S3ObjectAction for deletion. */ - public S3Response handleRequest(S3DeleteObjectRequest request) - { - // Verify that the bucket and object exist - S3Response response = new S3Response(); - - String bucketName = request.getBucketName(); - SBucketVO sbucket = bucketDao.getByName( bucketName ); - if (sbucket == null) { - response.setResultCode(404); - response.setResultDescription("Bucket dosen't existsBucket " + bucketName + " does not exist"); - return response; - } - - - String nameKey = request.getKey(); - SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey ); - if (sobject == null) { - response.setResultCode(404); - response.setResultDescription("Not FoundNo object with key " + nameKey + " exists in bucket " + bucketName+""); - return response; - } - - - // Discover whether versioning is enabled. If so versioning requires the setting of a deletion marker. - String storedPath = null; - SObjectItemVO item = null; + public S3Response handleRequest(S3DeleteObjectRequest request) + { + // Verify that the bucket and object exist + S3Response response = new S3Response(); + + String bucketName = request.getBucketName(); + SBucketVO sbucket = bucketDao.getByName( bucketName ); + if (sbucket == null) { + response.setResultCode(404); + response.setResultDescription("Bucket dosen't existsBucket " + bucketName + " does not exist"); + return response; + } + + + String nameKey = request.getKey(); + SObjectVO sobject = objectDao.getByNameKey( sbucket, nameKey ); + if (sobject == null) { + response.setResultCode(404); + response.setResultDescription("Not FoundNo object with key " + nameKey + " exists in bucket " + bucketName+""); + return response; + } + + + // Discover whether versioning is enabled. If so versioning requires the setting of a deletion marker. + String storedPath = null; + SObjectItemVO item = null; int versioningStatus = sbucket.getVersioningStatus(); - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) - { - String wantVersion = request.getVersion(); - S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObjectVersion, bucketName ); - context.setKeyName( nameKey ); - context.setEvalParam( ConditionKeys.VersionId, wantVersion ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE ); + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) + { + String wantVersion = request.getVersion(); + S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObjectVersion, bucketName ); + context.setKeyName( nameKey ); + context.setEvalParam( ConditionKeys.VersionId, wantVersion ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE ); - if (null == wantVersion) { - // If versioning is on and no versionId is given then we just write a deletion marker - sobject.setDeletionMark( UUID.randomUUID().toString()); - objectDao.update(sobject.getId(), sobject ); - response.setResultDescription("true"+ sobject.getDeletionMark() +""); - } - else { - // Otherwise remove the deletion marker if this has been set - String deletionMarker = sobject.getDeletionMark(); - if (null != deletionMarker && wantVersion.equalsIgnoreCase( deletionMarker )) { - sobject.setDeletionMark( null ); - objectDao.update(sobject.getId(), sobject ); - response.setResultDescription("" + wantVersion +""); - response.setResultDescription("true"+ sobject.getDeletionMark() +""); - response.setResultCode(204); - return response; - } - - // If versioning is on and the versionId is given (non-null) then delete the object matching that version - if ( null == (item = sobject.getVersion( wantVersion ))) { - response.setResultCode(404); - return response; - } - else { - // Providing versionId is non-null, then just delete the one item that matches the versionId from the database - storedPath = item.getStoredPath(); - sobject.deleteItem( item.getId()); - objectDao.update(sobject.getId(), sobject ); - response.setResultDescription("" + wantVersion +""); - } - } - } - else - { // If versioning is off then we do delete the null object - S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObject, bucketName ); - context.setKeyName( nameKey ); - verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE ); + if (null == wantVersion) { + // If versioning is on and no versionId is given then we just write a deletion marker + sobject.setDeletionMark( UUID.randomUUID().toString()); + objectDao.update(sobject.getId(), sobject ); + response.setResultDescription("true"+ sobject.getDeletionMark() +""); + } + else { + // Otherwise remove the deletion marker if this has been set + String deletionMarker = sobject.getDeletionMark(); + if (null != deletionMarker && wantVersion.equalsIgnoreCase( deletionMarker )) { + sobject.setDeletionMark( null ); + objectDao.update(sobject.getId(), sobject ); + response.setResultDescription("" + wantVersion +""); + response.setResultDescription("true"+ sobject.getDeletionMark() +""); + response.setResultCode(204); + return response; + } - if ( null == (item = sobject.getLatestVersion( true ))) { - response.setResultCode(404); - response.setResultDescription("AccessDeniedAccess Denied"); - return response; - } - else { - // If there is no item with a null version then we are done - if (null == item.getVersion()) { - // Otherwiswe remove the entire object - // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects. - storedPath = item.getStoredPath(); - deleteMetaData( item.getId()); - deleteObjectAcls( "SObjectItem", item.getId()); - objectDao.remove(sobject.getId()); - } - } - } - - // Delete the file holding the object - if (null != storedPath) - { - OrderedPair host_storagelocation_pair = getBucketStorageHost( sbucket ); - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter( host_storagelocation_pair.getFirst()); - bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), bucketName, storedPath ); - } - - response.setResultCode(204); - return response; + // If versioning is on and the versionId is given (non-null) then delete the object matching that version + if ( null == (item = sobject.getVersion( wantVersion ))) { + response.setResultCode(404); + return response; + } + else { + // Providing versionId is non-null, then just delete the one item that matches the versionId from the database + storedPath = item.getStoredPath(); + sobject.deleteItem( item.getId()); + objectDao.update(sobject.getId(), sobject ); + response.setResultDescription("" + wantVersion +""); + } + } + } + else + { // If versioning is off then we do delete the null object + S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteObject, bucketName ); + context.setKeyName( nameKey ); + verifyAccess( context, "SBucket", sbucket.getId(), SAcl.PERMISSION_WRITE ); + + if ( null == (item = sobject.getLatestVersion( true ))) { + response.setResultCode(404); + response.setResultDescription("AccessDeniedAccess Denied"); + return response; + } + else { + // If there is no item with a null version then we are done + if (null == item.getVersion()) { + // Otherwiswe remove the entire object + // Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects. + storedPath = item.getStoredPath(); + deleteMetaData( item.getId()); + deleteObjectAcls( "SObjectItem", item.getId()); + objectDao.remove(sobject.getId()); + } + } + } + + // Delete the file holding the object + if (null != storedPath) + { + OrderedPair host_storagelocation_pair = getBucketStorageHost( sbucket ); + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter( host_storagelocation_pair.getFirst()); + bucketAdapter.deleteObject( host_storagelocation_pair.getSecond(), bucketName, storedPath ); + } + + response.setResultCode(204); + return response; } - - private void deleteMetaData( long itemId ) { - List itemMetaData = metaDao.getByTarget( "SObjectItem", itemId ); - if (null != itemMetaData) - { - ListIterator it = itemMetaData.listIterator(); - while( it.hasNext()) { - SMetaVO oneTag = (SMetaVO)it.next(); - metaDao.remove(oneTag.getId()); - } - } - } - private void deleteObjectAcls( String target, long itemId ) { - List itemAclData = aclDao.listGrants( target, itemId ); - if (null != itemAclData) - { - ListIterator it = itemAclData.listIterator(); - while( it.hasNext()) { - SAclVO oneTag = (SAclVO)it.next(); - aclDao.remove(oneTag.getId()); - } - } - } + private void deleteMetaData( long itemId ) { + List itemMetaData = metaDao.getByTarget( "SObjectItem", itemId ); + if (null != itemMetaData) + { + ListIterator it = itemMetaData.listIterator(); + while( it.hasNext()) { + SMetaVO oneTag = it.next(); + metaDao.remove(oneTag.getId()); + } + } + } - private void deleteBucketAcls( long bucketId ) { + private void deleteObjectAcls( String target, long itemId ) { + List itemAclData = aclDao.listGrants( target, itemId ); + if (null != itemAclData) + { + ListIterator it = itemAclData.listIterator(); + while( it.hasNext()) { + SAclVO oneTag = it.next(); + aclDao.remove(oneTag.getId()); + } + } + } - List bucketAclData = aclDao.listGrants( "SBucket", bucketId ); - if (null != bucketAclData) - { - ListIterator it = bucketAclData.listIterator(); - while( it.hasNext()) { - SAclVO oneTag = (SAclVO)it.next(); - aclDao.remove(oneTag.getId()); - } - } - } - - private S3ListBucketPrefixEntry[] composeListBucketPrefixEntries(List l, String prefix, String delimiter, int maxKeys) - { - List entries = new ArrayList(); - int count = 0; - - for(SObjectVO sobject : l) - { - if(delimiter != null && !delimiter.isEmpty()) - { - String subName = StringHelper.substringInBetween(sobject.getNameKey(), prefix, delimiter); - if(subName != null) - { - S3ListBucketPrefixEntry entry = new S3ListBucketPrefixEntry(); - if ( prefix != null && prefix.length() > 0) - entry.setPrefix(prefix + delimiter + subName); - else entry.setPrefix(subName); - } - } - count++; - if(count >= maxKeys) break; - } - - if(entries.size() > 0) return entries.toArray(new S3ListBucketPrefixEntry[0]); - return null; - } - - /** - * The 'versionIdMarker' parameter only makes sense if enableVersion is true. - * versionIdMarker is the starting point to return information back. So for example if an - * object has versions 1,2,3,4,5 and the versionIdMarker is '3', then 3,4,5 will be returned - * by this function. If the versionIdMarker is null then all versions are returned. - * - * TODO - how does the versionIdMarker work when there is a deletion marker in the object? - */ - private S3ListBucketObjectEntry[] composeListBucketContentEntries(List l, String prefix, String delimiter, int maxKeys, boolean enableVersion, String versionIdMarker) - { - List entries = new ArrayList(); - SObjectItemVO latest = null; - boolean hitIdMarker = false; - int count = 0; - - for( SObjectVO sobject : l ) - { - if (delimiter != null && !delimiter.isEmpty()) - { - if (StringHelper.substringInBetween(sobject.getNameKey(), prefix, delimiter) != null) - continue; - } - - if (enableVersion) - { - hitIdMarker = (null == versionIdMarker ? true : false); + private void deleteBucketAcls( long bucketId ) { - // This supports GET REST calls with /?versions - String deletionMarker = sobject.getDeletionMark(); + List bucketAclData = aclDao.listGrants( "SBucket", bucketId ); + if (null != bucketAclData) + { + ListIterator it = bucketAclData.listIterator(); + while( it.hasNext()) { + SAclVO oneTag = it.next(); + aclDao.remove(oneTag.getId()); + } + } + } + + private S3ListBucketPrefixEntry[] composeListBucketPrefixEntries(List l, String prefix, String delimiter, int maxKeys) + { + List entries = new ArrayList(); + int count = 0; + + for(SObjectVO sobject : l) + { + if(delimiter != null && !delimiter.isEmpty()) + { + String subName = StringHelper.substringInBetween(sobject.getNameKey(), prefix, delimiter); + if(subName != null) + { + S3ListBucketPrefixEntry entry = new S3ListBucketPrefixEntry(); + if ( prefix != null && prefix.length() > 0) + entry.setPrefix(prefix + delimiter + subName); + else entry.setPrefix(subName); + } + } + count++; + if(count >= maxKeys) break; + } + + if(entries.size() > 0) return entries.toArray(new S3ListBucketPrefixEntry[0]); + return null; + } + + /** + * The 'versionIdMarker' parameter only makes sense if enableVersion is true. + * versionIdMarker is the starting point to return information back. So for example if an + * object has versions 1,2,3,4,5 and the versionIdMarker is '3', then 3,4,5 will be returned + * by this function. If the versionIdMarker is null then all versions are returned. + * + * TODO - how does the versionIdMarker work when there is a deletion marker in the object? + */ + private S3ListBucketObjectEntry[] composeListBucketContentEntries(List l, String prefix, String delimiter, int maxKeys, boolean enableVersion, String versionIdMarker) + { + List entries = new ArrayList(); + SObjectItemVO latest = null; + boolean hitIdMarker = false; + int count = 0; + + for( SObjectVO sobject : l ) + { + if (delimiter != null && !delimiter.isEmpty()) + { + if (StringHelper.substringInBetween(sobject.getNameKey(), prefix, delimiter) != null) + continue; + } + + if (enableVersion) + { + hitIdMarker = (null == versionIdMarker ? true : false); + + // This supports GET REST calls with /?versions + String deletionMarker = sobject.getDeletionMark(); if ( null != deletionMarker ) { - // TODO we should also save the timestamp when something is deleted - S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry(); - entry.setKey(sobject.getNameKey()); - entry.setVersion( deletionMarker ); - entry.setIsLatest( true ); - entry.setIsDeletionMarker( true ); - entry.setLastModified( Calendar.getInstance( TimeZone.getTimeZone("GMT") )); - entry.setOwnerCanonicalId(sobject.getOwnerCanonicalId()); - entry.setOwnerDisplayName(""); - entries.add( entry ); - latest = null; + // TODO we should also save the timestamp when something is deleted + S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry(); + entry.setKey(sobject.getNameKey()); + entry.setVersion( deletionMarker ); + entry.setIsLatest( true ); + entry.setIsDeletionMarker( true ); + entry.setLastModified( Calendar.getInstance( TimeZone.getTimeZone("GMT") )); + entry.setOwnerCanonicalId(sobject.getOwnerCanonicalId()); + entry.setOwnerDisplayName(""); + entries.add( entry ); + latest = null; } else latest = sobject.getLatestVersion( false ); - - Iterator it = sobject.getItems().iterator(); - while( it.hasNext()) - { - SObjectItemVO item = (SObjectItemVO)it.next(); - - if ( !hitIdMarker ) - { - if (item.getVersion().equalsIgnoreCase( versionIdMarker )) { - hitIdMarker = true; - entries.add( toListEntry( sobject, item, latest )); - } - } - else entries.add( toListEntry( sobject, item, latest )); - } - } - else - { // -> if there are multiple versions of an object then just return its last version - Iterator it = sobject.getItems().iterator(); - SObjectItemVO lastestItem = null; - int maxVersion = 0; - int version = 0; - while(it.hasNext()) - { - SObjectItemVO item = (SObjectItemVO)it.next(); - String versionStr = item.getVersion(); - - if ( null != versionStr ) - version = Integer.parseInt(item.getVersion()); - else lastestItem = item; - - // -> if the bucket has versions turned on - if (version > maxVersion) { - maxVersion = version; - lastestItem = item; - } - } - if (lastestItem != null) { - entries.add( toListEntry( sobject, lastestItem, null )); - } - } - - count++; - if(count >= maxKeys) break; - } - - if ( entries.size() > 0 ) - return entries.toArray(new S3ListBucketObjectEntry[0]); - else return null; - } - - private static S3ListBucketObjectEntry toListEntry( SObjectVO sobject, SObjectItemVO item, SObjectItemVO latest ) - { - S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry(); - entry.setKey(sobject.getNameKey()); - entry.setVersion( item.getVersion()); - entry.setETag( "\"" + item.getMd5() + "\"" ); - entry.setSize(item.getStoredSize()); - entry.setStorageClass( "STANDARD" ); - entry.setLastModified(DateHelper.toCalendar(item.getLastModifiedTime())); - entry.setOwnerCanonicalId(sobject.getOwnerCanonicalId()); - entry.setOwnerDisplayName(""); - - if (null != latest && item == latest) entry.setIsLatest( true ); - return entry; - } - - private OrderedPair getBucketStorageHost(SBucketVO bucket) - { - - SHostVO shost = shostDao.findById(bucket.getShostID()); - if(shost.getHostType() == SHost.STORAGE_HOST_TYPE_LOCAL) { - return new OrderedPair(shost, shost.getExportRoot()); - } - + + Iterator it = sobject.getItems().iterator(); + while( it.hasNext()) + { + SObjectItemVO item = it.next(); + + if ( !hitIdMarker ) + { + if (item.getVersion().equalsIgnoreCase( versionIdMarker )) { + hitIdMarker = true; + entries.add( toListEntry( sobject, item, latest )); + } + } + else entries.add( toListEntry( sobject, item, latest )); + } + } + else + { // -> if there are multiple versions of an object then just return its last version + Iterator it = sobject.getItems().iterator(); + SObjectItemVO lastestItem = null; + int maxVersion = 0; + int version = 0; + while(it.hasNext()) + { + SObjectItemVO item = it.next(); + String versionStr = item.getVersion(); + + if ( null != versionStr ) + version = Integer.parseInt(item.getVersion()); + else lastestItem = item; + + // -> if the bucket has versions turned on + if (version > maxVersion) { + maxVersion = version; + lastestItem = item; + } + } + if (lastestItem != null) { + entries.add( toListEntry( sobject, lastestItem, null )); + } + } + + count++; + if(count >= maxKeys) break; + } + + if ( entries.size() > 0 ) + return entries.toArray(new S3ListBucketObjectEntry[0]); + else return null; + } + + private static S3ListBucketObjectEntry toListEntry( SObjectVO sobject, SObjectItemVO item, SObjectItemVO latest ) + { + S3ListBucketObjectEntry entry = new S3ListBucketObjectEntry(); + entry.setKey(sobject.getNameKey()); + entry.setVersion( item.getVersion()); + entry.setETag( "\"" + item.getMd5() + "\"" ); + entry.setSize(item.getStoredSize()); + entry.setStorageClass( "STANDARD" ); + entry.setLastModified(DateHelper.toCalendar(item.getLastModifiedTime())); + entry.setOwnerCanonicalId(sobject.getOwnerCanonicalId()); + entry.setOwnerDisplayName(""); + + if (null != latest && item == latest) entry.setIsLatest( true ); + return entry; + } + + private OrderedPair getBucketStorageHost(SBucketVO bucket) + { + + SHostVO shost = shostDao.findById(bucket.getShostID()); + if(shost.getHostType() == SHost.STORAGE_HOST_TYPE_LOCAL) { + return new OrderedPair(shost, shost.getExportRoot()); + } + if(shost.getHostType() == SHost.STORAGE_HOST_TYPE_CASTOR ) { return new OrderedPair(shost, shost.getExportRoot()); } - MHostMountVO mount = mountDao.getHostMount(ServiceProvider.getInstance().getManagementHostId(), shost.getId()); - if(mount != null) { - return new OrderedPair(shost, mount.getMountPath()); - } - //return null; - // need to redirect request to other node - throw new HostNotMountedException("Storage host "); // + shost.getHost() + " is not locally mounted"); - } - - /** - * Locate the folder to hold upload parts at the same mount point as the upload's final bucket - * location. Create the upload folder dynamically. - * - * @param bucketName - */ - private void createUploadFolder(String bucketName) - { - try { - allocBucketStorageHost(bucketName, ServiceProvider.getInstance().getMultipartDir()); - } - finally { - - } - } - - /** - * The overrideName is used to create a hidden storage bucket (folder) in the same location - * as the given bucketName. This can be used to create a folder for parts of a multipart - * upload for the associated bucket. - * - * @param bucketName - * @param overrideName - * @return - */ - private OrderedPair allocBucketStorageHost(String bucketName, String overrideName) - { - //SHostDao shostDao = new SHostDao(); - - MHostVO mhost = mhostDao.findById(ServiceProvider.getInstance().getManagementHostId()); - if(mhost == null) - throw new OutOfServiceException("Temporarily out of service"); - - if(mhost.getMounts().size() > 0) { - Random random = new Random(); - MHostMountVO[] mounts = (MHostMountVO[])mhost.getMounts().toArray(); - MHostMountVO mount = mounts[random.nextInt(mounts.length)]; - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(mount.getShost()); - bucketAdapter.createContainer(mount.getMountPath(), (null != overrideName ? overrideName : bucketName)); - return new OrderedPair(mount.getShost(), mount.getMountPath()); - } - - // To make things simple, only allow one local mounted storage root TODO - Change in the future - String localStorageRoot = ServiceProvider.getInstance().getStartupProperties().getProperty("storage.root"); - if(localStorageRoot != null) { - SHostVO localSHost = shostDao.getLocalStorageHost(mhost.getId(), localStorageRoot); - if(localSHost == null) - throw new InternalErrorException("storage.root is configured but not initialized"); - - S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(localSHost); - bucketAdapter.createContainer(localSHost.getExportRoot(),(null != overrideName ? overrideName : bucketName)); - return new OrderedPair(localSHost, localStorageRoot); - } - - throw new OutOfStorageException("No storage host is available"); - } - - public S3BucketAdapter getStorageHostBucketAdapter(SHostVO shost) - { - S3BucketAdapter adapter = bucketAdapters.get(shost.getHostType()); - if(adapter == null) - throw new InternalErrorException("Bucket adapter is not installed for host type: " + shost.getHostType()); - - return adapter; - } + MHostMountVO mount = mountDao.getHostMount(ServiceProvider.getInstance().getManagementHostId(), shost.getId()); + if(mount != null) { + return new OrderedPair(shost, mount.getMountPath()); + } + //return null; + // need to redirect request to other node + throw new HostNotMountedException("Storage host "); // + shost.getHost() + " is not locally mounted"); + } - /** - * If acl is set then the cannedAccessPolicy parameter should be null and is ignored. - * The cannedAccessPolicy parameter is for REST Put requests only where a simple set of ACLs can be - * created with a single header value. Note that we do not currently support "anonymous" un-authenticated - * access in our implementation. - * - * @throws IOException - */ - @SuppressWarnings("deprecation") - public OrderedPair allocObjectItem(SBucketVO bucket, String nameKey, S3MetaDataEntry[] meta, S3AccessControlList acl, String cannedAccessPolicy) - { - SObjectItemVO item = null; - int versionSeq = 1; - int versioningStatus = bucket.getVersioningStatus(); - - //Session session = PersistContext.getSession(); - - // [A] To write into a bucket the user must have write permission to that bucket - S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucket.getName()); - context.setKeyName( nameKey ); - context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy); + /** + * Locate the folder to hold upload parts at the same mount point as the upload's final bucket + * location. Create the upload folder dynamically. + * + * @param bucketName + */ + private void createUploadFolder(String bucketName) + { + try { + allocBucketStorageHost(bucketName, ServiceProvider.getInstance().getMultipartDir()); + } + finally { - verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); // TODO - check this validates plain POSTs - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); - txn.start(); + } + } - // [B] If versioning is off them we over write a null object item - SObjectVO object = objectDao.getByNameKey(bucket, nameKey); - if ( object != null ) - { - // -> if versioning is on create new object items - if ( SBucket.VERSIONING_ENABLED == versioningStatus ) - { + /** + * The overrideName is used to create a hidden storage bucket (folder) in the same location + * as the given bucketName. This can be used to create a folder for parts of a multipart + * upload for the associated bucket. + * + * @param bucketName + * @param overrideName + * @return + */ + private OrderedPair allocBucketStorageHost(String bucketName, String overrideName) + { + //SHostDao shostDao = new SHostDao(); - versionSeq = object.getNextSequence(); - object.setNextSequence(versionSeq + 1); - objectDao.update(object.getId(), object); - - item = new SObjectItemVO(); - item.setTheObject(object); - object.getItems().add(item); - item.setsObjectID(object.getId()); - item.setVersion(String.valueOf(versionSeq)); - Date ts = DateHelper.currentGMTTime(); - item.setCreateTime(ts); - item.setLastAccessTime(ts); - item.setLastModifiedTime(ts); - item = itemDao.persist(item); - txn.commit(); - //session.save(item); - } - else - { // -> find an object item with a null version, can be null - // if bucket started out with versioning enabled and was then suspended - item = itemDao.getByObjectIdNullVersion( object.getId()); - if (item == null) - { - item = new SObjectItemVO(); - item.setTheObject(object); - item.setsObjectID(object.getId()); - object.getItems().add(item); - Date ts = DateHelper.currentGMTTime(); - item.setCreateTime(ts); - item.setLastAccessTime(ts); - item.setLastModifiedTime(ts); - item = itemDao.persist(item); - txn.commit(); - } - } - } - else - { - Transaction txn1 = Transaction.open(Transaction.AWSAPI_DB); - txn1.start(); - // -> there is no object nor an object item - object = new SObjectVO(); - object.setBucket(bucket); - object.setNameKey(nameKey); - object.setNextSequence(2); - object.setBucketID(bucket.getId()); - object.setCreateTime(DateHelper.currentGMTTime()); - object.setOwnerCanonicalId(UserContext.current().getCanonicalUserId()); - object = objectDao.persist(object); - item = new SObjectItemVO(); - item.setTheObject(object); - item.setsObjectID(object.getId()); - object.getItems().add(item); - if (SBucket.VERSIONING_ENABLED == versioningStatus) item.setVersion(String.valueOf(versionSeq)); - Date ts = DateHelper.currentGMTTime(); - item.setCreateTime(ts); - item.setLastAccessTime(ts); - item.setLastModifiedTime(ts); - item = itemDao.persist(item); - txn.commit(); - txn.close(); - - } - - - // [C] We will use the item DB id as the file name, MD5/contentLength will be stored later - String suffix = null; - int dotPos = nameKey.lastIndexOf('.'); - if (dotPos >= 0) suffix = nameKey.substring(dotPos); - if ( suffix != null ) - item.setStoredPath(String.valueOf(item.getId()) + suffix); - else item.setStoredPath(String.valueOf(item.getId())); - - metaDao.save("SObjectItem", item.getId(), meta); - - - // [D] Are we setting an ACL along with the object - // -> the ACL is ALWAYS set on a particular instance of the object (i.e., a version) - if ( null != cannedAccessPolicy ) - { - setCannedAccessControls( cannedAccessPolicy, "SObjectItem", item.getId(), bucket ); - } - else if (null == acl || 0 == acl.size()) - { - // -> this is termed the "private" or default ACL, "Owner gets FULL_CONTROL" - setSingleAcl( "SObjectItem", item.getId(), SAcl.PERMISSION_FULL ); - } - else if (null != acl) { - aclDao.save( "SObjectItem", item.getId(), acl ); - } - - itemDao.update(item.getId(), item); - txn.close(); - return new OrderedPair(object, item); - } - - - /** - * Access controls that are specified via the "x-amz-acl:" headers in REST requests. - * Note that canned policies can be set when the object's contents are set - */ - public void setCannedAccessControls( String cannedAccessPolicy, String target, long objectId, SBucketVO bucket ) - { - // Find the permission and symbol for the principal corresponding to the requested cannedAccessPolicy - Triple permission_permission_symbol_triple = - SAclVO.getCannedAccessControls(cannedAccessPolicy, target, bucket.getOwnerCanonicalId()); - if ( null == permission_permission_symbol_triple.getThird() ) - setSingleAcl(target, objectId, permission_permission_symbol_triple.getFirst()); - else - { setDefaultAcls( target, - objectId, - permission_permission_symbol_triple.getFirst(), // permission according to ownership of object - permission_permission_symbol_triple.getSecond(), // permission according to ownership of bucket - permission_permission_symbol_triple.getThird() ); // "symbol" to indicate principal or otherwise name of owner - - } - } + MHostVO mhost = mhostDao.findById(ServiceProvider.getInstance().getManagementHostId()); + if(mhost == null) + throw new OutOfServiceException("Temporarily out of service"); - - private void setSingleAcl( String target, long targetId, int permission ) - { + if(mhost.getMounts().size() > 0) { + Random random = new Random(); + MHostMountVO[] mounts = (MHostMountVO[])mhost.getMounts().toArray(); + MHostMountVO mount = mounts[random.nextInt(mounts.length)]; + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(mount.getShost()); + bucketAdapter.createContainer(mount.getMountPath(), (null != overrideName ? overrideName : bucketName)); + return new OrderedPair(mount.getShost(), mount.getMountPath()); + } + + // To make things simple, only allow one local mounted storage root TODO - Change in the future + String localStorageRoot = ServiceProvider.getInstance().getStartupProperties().getProperty("storage.root"); + if(localStorageRoot != null) { + SHostVO localSHost = shostDao.getLocalStorageHost(mhost.getId(), localStorageRoot); + if(localSHost == null) + throw new InternalErrorException("storage.root is configured but not initialized"); + + S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(localSHost); + bucketAdapter.createContainer(localSHost.getExportRoot(),(null != overrideName ? overrideName : bucketName)); + return new OrderedPair(localSHost, localStorageRoot); + } + + throw new OutOfStorageException("No storage host is available"); + } + + public S3BucketAdapter getStorageHostBucketAdapter(SHostVO shost) + { + S3BucketAdapter adapter = bucketAdapters.get(shost.getHostType()); + if(adapter == null) + throw new InternalErrorException("Bucket adapter is not installed for host type: " + shost.getHostType()); + + return adapter; + } + + /** + * If acl is set then the cannedAccessPolicy parameter should be null and is ignored. + * The cannedAccessPolicy parameter is for REST Put requests only where a simple set of ACLs can be + * created with a single header value. Note that we do not currently support "anonymous" un-authenticated + * access in our implementation. + * + * @throws IOException + */ + @SuppressWarnings("deprecation") + public OrderedPair allocObjectItem(SBucketVO bucket, String nameKey, S3MetaDataEntry[] meta, S3AccessControlList acl, String cannedAccessPolicy) + { + SObjectItemVO item = null; + int versionSeq = 1; + int versioningStatus = bucket.getVersioningStatus(); + + //Session session = PersistContext.getSession(); + + // [A] To write into a bucket the user must have write permission to that bucket + S3PolicyContext context = new S3PolicyContext( PolicyActions.PutObject, bucket.getName()); + context.setKeyName( nameKey ); + context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy); + + verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); // TODO - check this validates plain POSTs + Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + txn.start(); + + // [B] If versioning is off them we over write a null object item + SObjectVO object = objectDao.getByNameKey(bucket, nameKey); + if ( object != null ) + { + // -> if versioning is on create new object items + if ( SBucket.VERSIONING_ENABLED == versioningStatus ) + { + + versionSeq = object.getNextSequence(); + object.setNextSequence(versionSeq + 1); + objectDao.update(object.getId(), object); + + item = new SObjectItemVO(); + item.setTheObject(object); + object.getItems().add(item); + item.setsObjectID(object.getId()); + item.setVersion(String.valueOf(versionSeq)); + Date ts = DateHelper.currentGMTTime(); + item.setCreateTime(ts); + item.setLastAccessTime(ts); + item.setLastModifiedTime(ts); + item = itemDao.persist(item); + txn.commit(); + //session.save(item); + } + else + { // -> find an object item with a null version, can be null + // if bucket started out with versioning enabled and was then suspended + item = itemDao.getByObjectIdNullVersion( object.getId()); + if (item == null) + { + item = new SObjectItemVO(); + item.setTheObject(object); + item.setsObjectID(object.getId()); + object.getItems().add(item); + Date ts = DateHelper.currentGMTTime(); + item.setCreateTime(ts); + item.setLastAccessTime(ts); + item.setLastModifiedTime(ts); + item = itemDao.persist(item); + txn.commit(); + } + } + } + else + { + Transaction txn1 = Transaction.open(Transaction.AWSAPI_DB); + txn1.start(); + // -> there is no object nor an object item + object = new SObjectVO(); + object.setBucket(bucket); + object.setNameKey(nameKey); + object.setNextSequence(2); + object.setBucketID(bucket.getId()); + object.setCreateTime(DateHelper.currentGMTTime()); + object.setOwnerCanonicalId(UserContext.current().getCanonicalUserId()); + object = objectDao.persist(object); + item = new SObjectItemVO(); + item.setTheObject(object); + item.setsObjectID(object.getId()); + object.getItems().add(item); + if (SBucket.VERSIONING_ENABLED == versioningStatus) item.setVersion(String.valueOf(versionSeq)); + Date ts = DateHelper.currentGMTTime(); + item.setCreateTime(ts); + item.setLastAccessTime(ts); + item.setLastModifiedTime(ts); + item = itemDao.persist(item); + txn.commit(); + txn.close(); + + } + + + // [C] We will use the item DB id as the file name, MD5/contentLength will be stored later + String suffix = null; + int dotPos = nameKey.lastIndexOf('.'); + if (dotPos >= 0) suffix = nameKey.substring(dotPos); + if ( suffix != null ) + item.setStoredPath(String.valueOf(item.getId()) + suffix); + else item.setStoredPath(String.valueOf(item.getId())); + + metaDao.save("SObjectItem", item.getId(), meta); + + + // [D] Are we setting an ACL along with the object + // -> the ACL is ALWAYS set on a particular instance of the object (i.e., a version) + if ( null != cannedAccessPolicy ) + { + setCannedAccessControls( cannedAccessPolicy, "SObjectItem", item.getId(), bucket ); + } + else if (null == acl || 0 == acl.size()) + { + // -> this is termed the "private" or default ACL, "Owner gets FULL_CONTROL" + setSingleAcl( "SObjectItem", item.getId(), SAcl.PERMISSION_FULL ); + } + else if (null != acl) { + aclDao.save( "SObjectItem", item.getId(), acl ); + } + + itemDao.update(item.getId(), item); + txn.close(); + return new OrderedPair(object, item); + } + + + /** + * Access controls that are specified via the "x-amz-acl:" headers in REST requests. + * Note that canned policies can be set when the object's contents are set + */ + public void setCannedAccessControls( String cannedAccessPolicy, String target, long objectId, SBucketVO bucket ) + { + // Find the permission and symbol for the principal corresponding to the requested cannedAccessPolicy + Triple permission_permission_symbol_triple = + SAclVO.getCannedAccessControls(cannedAccessPolicy, target, bucket.getOwnerCanonicalId()); + if ( null == permission_permission_symbol_triple.getThird() ) + setSingleAcl(target, objectId, permission_permission_symbol_triple.getFirst()); + else + { setDefaultAcls( target, + objectId, + permission_permission_symbol_triple.getFirst(), // permission according to ownership of object + permission_permission_symbol_triple.getSecond(), // permission according to ownership of bucket + permission_permission_symbol_triple.getThird() ); // "symbol" to indicate principal or otherwise name of owner + + } + } + + + private void setSingleAcl( String target, long targetId, int permission ) + { S3AccessControlList defaultAcl = new S3AccessControlList(); - - // -> if an annoymous request, then do not rewrite the ACL - String userId = UserContext.current().getCanonicalUserId(); + + // -> if an annoymous request, then do not rewrite the ACL + String userId = UserContext.current().getCanonicalUserId(); if (0 < userId.length()) { S3Grant defaultGrant = new S3Grant(); @@ -1647,28 +1638,28 @@ public class S3Engine { defaultAcl.addGrant( defaultGrant ); aclDao.save( target, targetId, defaultAcl ); } - } - + } - /** - * The Cloud Stack API Access key is used for for the Canonical User Id everywhere (buckets and objects). - * - * @param owner - this can be the Cloud Access Key for a bucket owner or one of the - * following special symbols: - * (a) '*' - any principal authenticated user (i.e., any user with a registered Cloud Access Key) - * (b) 'A' - any anonymous principal (i.e., S3 request without an Authorization header) - */ - private void setDefaultAcls( String target, long objectId, int permission1, int permission2, String owner ) - { - S3AccessControlList defaultAcl = new S3AccessControlList(); - - // -> object owner + + /** + * The Cloud Stack API Access key is used for for the Canonical User Id everywhere (buckets and objects). + * + * @param owner - this can be the Cloud Access Key for a bucket owner or one of the + * following special symbols: + * (a) '*' - any principal authenticated user (i.e., any user with a registered Cloud Access Key) + * (b) 'A' - any anonymous principal (i.e., S3 request without an Authorization header) + */ + private void setDefaultAcls( String target, long objectId, int permission1, int permission2, String owner ) + { + S3AccessControlList defaultAcl = new S3AccessControlList(); + + // -> object owner S3Grant defaultGrant = new S3Grant(); defaultGrant.setGrantee(SAcl.GRANTEE_USER); defaultGrant.setCanonicalUserID( UserContext.current().getCanonicalUserId()); defaultGrant.setPermission( permission1 ); defaultAcl.addGrant( defaultGrant ); - + // -> bucket owner defaultGrant = new S3Grant(); defaultGrant.setGrantee(SAcl.GRANTEE_USER); @@ -1676,238 +1667,238 @@ public class S3Engine { defaultGrant.setPermission( permission2 ); defaultAcl.addGrant( defaultGrant ); aclDao.save( target, objectId, defaultAcl ); - } + } - public static PolicyAccess verifyPolicy( S3PolicyContext context ) - { - S3BucketPolicy policy = null; - - // Ordinarily a REST request will pass in an S3PolicyContext for a given bucket by this stage. The HttpServletRequest object - // should be held in the UserContext ready for extraction of the S3BucketPolicy. - // If there is an error in obtaining the request object or in loading the policy then log the failure and return a S3PolicyContext - // which indicates DEFAULT_DENY. Where there is no failure, the policy returned should be specific to the Canonical User ID of the requester. - - try { - // -> in SOAP the HttpServletRequest object is hidden and not passed around - if (null != context) { - context.setHttp( UserContext.current().getHttp()); - policy = loadPolicy( context ); - } - - if ( null != policy ) - return policy.eval(context, UserContext.current().getCanonicalUserId()); - else return PolicyAccess.DEFAULT_DENY; - } - catch( Exception e ) { + public static PolicyAccess verifyPolicy( S3PolicyContext context ) + { + S3BucketPolicy policy = null; + + // Ordinarily a REST request will pass in an S3PolicyContext for a given bucket by this stage. The HttpServletRequest object + // should be held in the UserContext ready for extraction of the S3BucketPolicy. + // If there is an error in obtaining the request object or in loading the policy then log the failure and return a S3PolicyContext + // which indicates DEFAULT_DENY. Where there is no failure, the policy returned should be specific to the Canonical User ID of the requester. + + try { + // -> in SOAP the HttpServletRequest object is hidden and not passed around + if (null != context) { + context.setHttp( UserContext.current().getHttp()); + policy = loadPolicy( context ); + } + + if ( null != policy ) + return policy.eval(context, UserContext.current().getCanonicalUserId()); + else return PolicyAccess.DEFAULT_DENY; + } + catch( Exception e ) { logger.error("verifyAccess - loadPolicy failed, bucket: " + context.getBucketName() + " policy ignored", e); - return PolicyAccess.DEFAULT_DENY; - } - } - - /** - * To determine access to a bucket or an object in a bucket evaluate first a define - * bucket policy and then any defined ACLs. - * - * @param context - all data needed for bucket policies - * @param target - used for ACL evaluation, object identifier - * @param targetId - used for ACL evaluation - * @param requestedPermission - ACL type access requested - * - * @throws ParseException, SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException - */ - public static void verifyAccess( S3PolicyContext context, String target, long targetId, int requestedPermission ) - { - switch( verifyPolicy( context ) ) { + return PolicyAccess.DEFAULT_DENY; + } + } + + /** + * To determine access to a bucket or an object in a bucket evaluate first a define + * bucket policy and then any defined ACLs. + * + * @param context - all data needed for bucket policies + * @param target - used for ACL evaluation, object identifier + * @param targetId - used for ACL evaluation + * @param requestedPermission - ACL type access requested + * + * @throws ParseException, SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException + */ + public static void verifyAccess( S3PolicyContext context, String target, long targetId, int requestedPermission ) + { + switch( verifyPolicy( context ) ) { case ALLOW: // overrides ACLs (?) - return; + return; case DENY: - throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); - + throw new PermissionDeniedException( "Access Denied - bucket policy DENY result" ); + case DEFAULT_DENY: default: - accessAllowed( target, targetId, requestedPermission ); - break; + accessAllowed( target, targetId, requestedPermission ); + break; } - } - - /** - * This method verifies that the accessing client has the requested - * permission on the object/bucket/Acl represented by the tuple: - * - * For cases where an ACL is meant for any authenticated user we place a "*" for the - * Canonical User Id. N.B. - "*" is not a legal Cloud (Bridge) Access key. - * - * For cases where an ACL is meant for any anonymous user (or 'AllUsers') we place a "A" for the - * Canonical User Id. N.B. - "A" is not a legal Cloud (Bridge) Access key. - */ - public static void accessAllowed( String target, long targetId, int requestedPermission ) - { - if (SAcl.PERMISSION_PASS == requestedPermission) return; - - // If an annoymous request, then canonicalUserId is an empty string - String userId = UserContext.current().getCanonicalUserId(); + } + + /** + * This method verifies that the accessing client has the requested + * permission on the object/bucket/Acl represented by the tuple: + * + * For cases where an ACL is meant for any authenticated user we place a "*" for the + * Canonical User Id. N.B. - "*" is not a legal Cloud (Bridge) Access key. + * + * For cases where an ACL is meant for any anonymous user (or 'AllUsers') we place a "A" for the + * Canonical User Id. N.B. - "A" is not a legal Cloud (Bridge) Access key. + */ + public static void accessAllowed( String target, long targetId, int requestedPermission ) + { + if (SAcl.PERMISSION_PASS == requestedPermission) return; + + // If an annoymous request, then canonicalUserId is an empty string + String userId = UserContext.current().getCanonicalUserId(); if ( 0 == userId.length()) { - // Is an anonymous principal ACL set for this ? - if (hasPermission( saclDao.listGrants( target, targetId, "A" ), requestedPermission )) return; + // Is an anonymous principal ACL set for this ? + if (hasPermission( saclDao.listGrants( target, targetId, "A" ), requestedPermission )) return; } else { - if (hasPermission( saclDao.listGrants( target, targetId, userId ), requestedPermission )) return; - // Or alternatively is there is any principal authenticated ACL set for this ? - if (hasPermission( saclDao.listGrants( target, targetId, "*" ), requestedPermission )) return; + if (hasPermission( saclDao.listGrants( target, targetId, userId ), requestedPermission )) return; + // Or alternatively is there is any principal authenticated ACL set for this ? + if (hasPermission( saclDao.listGrants( target, targetId, "*" ), requestedPermission )) return; } // No privileges implies that no access is allowed in the case of an anonymous user throw new PermissionDeniedException( "Access Denied - ACLs do not give user the required permission" ); - } - - /** - * This method assumes that the bucket has been tested to make sure it exists before - * it is called. - * - * @param context - * @return S3BucketPolicy - * @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException, ParseException - */ - public static S3BucketPolicy loadPolicy( S3PolicyContext context ) - throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException, ParseException - { - OrderedPair result = ServiceProvider.getInstance().getBucketPolicy( context.getBucketName()); - S3BucketPolicy policy = result.getFirst(); - if ( null == policy ) - { - // -> do we have to load it from the database (any other value means there is no policy)? - if (-1 == result.getSecond().intValue()) - { - BucketPolicyVO policyvo = bPolicy.getByName(context.getBucketName()); - String policyInJson = null; - if (null != policyvo) - policyInJson = policyvo.getPolicy(); - - // -> place in cache that no policy exists in the database - if (null == policyInJson) { - ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), null); - return null; - } - - PolicyParser parser = new PolicyParser(); - policy = parser.parse( policyInJson, context.getBucketName()); - if (null != policy) - ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), policy); - } - } - return policy; - } - - public static void verifyBucketName( String bucketName, boolean useDNSGuidelines ) throws InvalidBucketName - { - // [A] To comply with Amazon S3 basic requirements, bucket names must meet the following conditions - // -> must be between 3 and 255 characters long - int size = bucketName.length(); - if (3 > size || size > 255) - throw new InvalidBucketName( bucketName + " is not between 3 and 255 characters long" ); - - // -> must start with a number or letter - if (!Character.isLetterOrDigit( bucketName.charAt( 0 ))) - throw new InvalidBucketName( bucketName + " does not start with a number or letter" ); - - // -> can contain lowercase letters, numbers, periods (.), underscores (_), and dashes (-) - // -> the bucket name can also contain uppercase letters but it is not recommended - for( int i=0; i < bucketName.length(); i++ ) - { - char next = bucketName.charAt(i); - if (Character.isLetter( next )) continue; - else if (Character.isDigit( next )) continue; - else if ('.' == next) continue; - else if ('_' == next) continue; - else if ('-' == next) continue; - else throw new InvalidBucketName( bucketName + " contains the invalid character: " + next ); - } - - // -> must not be formatted as an IP address (e.g., 192.168.5.4) - String[] parts = bucketName.split( "\\." ); - if (4 == parts.length) - { - try { - int first = Integer.parseInt( parts[0] ); - int second = Integer.parseInt( parts[1] ); - int third = Integer.parseInt( parts[2] ); - int fourth = Integer.parseInt( parts[3] ); - throw new InvalidBucketName( bucketName + " is formatted as an IP address" ); - } - catch( NumberFormatException e ) - {throw new InvalidBucketName( bucketName);} - } - - - // [B] To conform with DNS requirements, Amazon recommends following these additional guidelines when creating buckets - // -> bucket names should be between 3 and 63 characters long - if (useDNSGuidelines) - { - // -> bucket names should be between 3 and 63 characters long - if (3 > size || size > 63) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " is not between 3 and 63 characters long" ); + } - // -> bucket names should not contain underscores (_) - int pos = bucketName.indexOf( '_' ); - if (-1 != pos) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain underscores" ); - - // -> bucket names should not end with a dash - if (bucketName.endsWith( "-" )) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not end with a dash" ); - - // -> bucket names cannot contain two, adjacent periods - pos = bucketName.indexOf( ".." ); - if (-1 != pos) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain \"..\"" ); - - // -> bucket names cannot contain dashes next to periods (e.g., "my-.bucket.com" and "my.-bucket" are invalid) - if (-1 != bucketName.indexOf( "-." ) || -1 != bucketName.indexOf( ".-" )) - throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain \".-\" or \"-.\"" ); - } - } - - private static boolean hasPermission( List privileges, int requestedPermission ) - { + /** + * This method assumes that the bucket has been tested to make sure it exists before + * it is called. + * + * @param context + * @return S3BucketPolicy + * @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException, ParseException + */ + public static S3BucketPolicy loadPolicy( S3PolicyContext context ) + throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException, ParseException + { + OrderedPair result = ServiceProvider.getInstance().getBucketPolicy( context.getBucketName()); + S3BucketPolicy policy = result.getFirst(); + if ( null == policy ) + { + // -> do we have to load it from the database (any other value means there is no policy)? + if (-1 == result.getSecond().intValue()) + { + BucketPolicyVO policyvo = bPolicy.getByName(context.getBucketName()); + String policyInJson = null; + if (null != policyvo) + policyInJson = policyvo.getPolicy(); + + // -> place in cache that no policy exists in the database + if (null == policyInJson) { + ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), null); + return null; + } + + PolicyParser parser = new PolicyParser(); + policy = parser.parse( policyInJson, context.getBucketName()); + if (null != policy) + ServiceProvider.getInstance().setBucketPolicy(context.getBucketName(), policy); + } + } + return policy; + } + + public static void verifyBucketName( String bucketName, boolean useDNSGuidelines ) throws InvalidBucketName + { + // [A] To comply with Amazon S3 basic requirements, bucket names must meet the following conditions + // -> must be between 3 and 255 characters long + int size = bucketName.length(); + if (3 > size || size > 255) + throw new InvalidBucketName( bucketName + " is not between 3 and 255 characters long" ); + + // -> must start with a number or letter + if (!Character.isLetterOrDigit( bucketName.charAt( 0 ))) + throw new InvalidBucketName( bucketName + " does not start with a number or letter" ); + + // -> can contain lowercase letters, numbers, periods (.), underscores (_), and dashes (-) + // -> the bucket name can also contain uppercase letters but it is not recommended + for( int i=0; i < bucketName.length(); i++ ) + { + char next = bucketName.charAt(i); + if (Character.isLetter( next )) continue; + else if (Character.isDigit( next )) continue; + else if ('.' == next) continue; + else if ('_' == next) continue; + else if ('-' == next) continue; + else throw new InvalidBucketName( bucketName + " contains the invalid character: " + next ); + } + + // -> must not be formatted as an IP address (e.g., 192.168.5.4) + String[] parts = bucketName.split( "\\." ); + if (4 == parts.length) + { + try { + int first = Integer.parseInt( parts[0] ); + int second = Integer.parseInt( parts[1] ); + int third = Integer.parseInt( parts[2] ); + int fourth = Integer.parseInt( parts[3] ); + throw new InvalidBucketName( bucketName + " is formatted as an IP address" ); + } + catch( NumberFormatException e ) + {throw new InvalidBucketName( bucketName);} + } + + + // [B] To conform with DNS requirements, Amazon recommends following these additional guidelines when creating buckets + // -> bucket names should be between 3 and 63 characters long + if (useDNSGuidelines) + { + // -> bucket names should be between 3 and 63 characters long + if (3 > size || size > 63) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " is not between 3 and 63 characters long" ); + + // -> bucket names should not contain underscores (_) + int pos = bucketName.indexOf( '_' ); + if (-1 != pos) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain underscores" ); + + // -> bucket names should not end with a dash + if (bucketName.endsWith( "-" )) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not end with a dash" ); + + // -> bucket names cannot contain two, adjacent periods + pos = bucketName.indexOf( ".." ); + if (-1 != pos) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain \"..\"" ); + + // -> bucket names cannot contain dashes next to periods (e.g., "my-.bucket.com" and "my.-bucket" are invalid) + if (-1 != bucketName.indexOf( "-." ) || -1 != bucketName.indexOf( ".-" )) + throw new InvalidBucketName( "DNS requiremens, bucket name: " + bucketName + " should not contain \".-\" or \"-.\"" ); + } + } + + private static boolean hasPermission( List privileges, int requestedPermission ) + { ListIterator it = privileges.listIterator(); while( it.hasNext()) { - // True providing the requested permission is contained in one or the granted rights for this user. False otherwise. - SAclVO rights = (SAclVO)it.next(); - int permission = rights.getPermission(); - if (requestedPermission == (permission & requestedPermission)) return true; + // True providing the requested permission is contained in one or the granted rights for this user. False otherwise. + SAclVO rights = it.next(); + int permission = rights.getPermission(); + if (requestedPermission == (permission & requestedPermission)) return true; } return false; - } - - /** - * ifRange is true and ifUnmodifiedSince or IfMatch fails then we return the entire object (indicated by - * returning a -1 as the function result. - * - * @param ifCond - conditional get defined by these tests - * @param lastModified - value used on ifModifiedSince or ifUnmodifiedSince - * @param ETag - value used on ifMatch and ifNoneMatch - * @param ifRange - using an if-Range HTTP functionality - * @return -1 means return the entire object with an HTTP 200 (not a subrange) - */ - private int conditionPassed( S3ConditionalHeaders ifCond, Date lastModified, String ETag, boolean ifRange ) - { - if (null == ifCond) return 200; - - if (0 > ifCond.ifModifiedSince( lastModified )) - return 304; - - if (0 > ifCond.ifUnmodifiedSince( lastModified )) - return (ifRange ? -1 : 412); - - if (0 > ifCond.ifMatchEtag( ETag )) - return (ifRange ? -1 : 412); - - if (0 > ifCond.ifNoneMatchEtag( ETag )) - return 412; - - return 200; - } + } + + /** + * ifRange is true and ifUnmodifiedSince or IfMatch fails then we return the entire object (indicated by + * returning a -1 as the function result. + * + * @param ifCond - conditional get defined by these tests + * @param lastModified - value used on ifModifiedSince or ifUnmodifiedSince + * @param ETag - value used on ifMatch and ifNoneMatch + * @param ifRange - using an if-Range HTTP functionality + * @return -1 means return the entire object with an HTTP 200 (not a subrange) + */ + private int conditionPassed( S3ConditionalHeaders ifCond, Date lastModified, String ETag, boolean ifRange ) + { + if (null == ifCond) return 200; + + if (0 > ifCond.ifModifiedSince( lastModified )) + return 304; + + if (0 > ifCond.ifUnmodifiedSince( lastModified )) + return (ifRange ? -1 : 412); + + if (0 > ifCond.ifMatchEtag( ETag )) + return (ifRange ? -1 : 412); + + if (0 > ifCond.ifNoneMatchEtag( ETag )) + return 412; + + return 200; + } } diff --git a/client/WEB-INF/web.xml b/client/WEB-INF/web.xml index c6fd30fa3ac..0d75165659e 100644 --- a/client/WEB-INF/web.xml +++ b/client/WEB-INF/web.xml @@ -25,7 +25,7 @@ contextConfigLocation - classpath:applicationContext.xml + classpath:applicationContext.xml, classpath:componentContext.xml diff --git a/client/tomcatconf/api-discovery_commands.properties.in b/client/tomcatconf/api-discovery_commands.properties.in deleted file mode 100644 index 49ddfde42d8..00000000000 --- a/client/tomcatconf/api-discovery_commands.properties.in +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# bitmap of permissions at the end of each classname, 1 = ADMIN, 2 = -# RESOURCE_DOMAIN_ADMIN, 4 = DOMAIN_ADMIN, 8 = USER -# Please standardize naming conventions to camel-case (even for acronyms). - -# CloudStack API Discovery service command -listApis=15 diff --git a/client/tomcatconf/applicationContext.xml.in b/client/tomcatconf/applicationContext.xml.in index aabf9636d95..186a1685fbd 100644 --- a/client/tomcatconf/applicationContext.xml.in +++ b/client/tomcatconf/applicationContext.xml.in @@ -1,3 +1,21 @@ + + + diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in new file mode 100644 index 00000000000..45068b663cb --- /dev/null +++ b/client/tomcatconf/componentContext.xml.in @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/client/tomcatconf/components.xml.in b/client/tomcatconf/components.xml.in index b9feed15a88..bb39839c820 100755 --- a/client/tomcatconf/components.xml.in +++ b/client/tomcatconf/components.xml.in @@ -53,12 +53,9 @@ under the License. true - + - - - diff --git a/core/src/com/cloud/storage/resource/CifsSecondaryStorageResource.java b/core/src/com/cloud/storage/resource/CifsSecondaryStorageResource.java index c606fca1fbf..0df2a8466f2 100755 --- a/core/src/com/cloud/storage/resource/CifsSecondaryStorageResource.java +++ b/core/src/com/cloud/storage/resource/CifsSecondaryStorageResource.java @@ -40,8 +40,8 @@ import com.cloud.agent.api.PingStorageCommand; import com.cloud.agent.api.ReadyAnswer; import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.SecStorageFirewallCfgCommand; -import com.cloud.agent.api.SecStorageSetupCommand; import com.cloud.agent.api.SecStorageFirewallCfgCommand.PortConfig; +import com.cloud.agent.api.SecStorageSetupCommand; import com.cloud.agent.api.SecStorageVMSetupCommand; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupStorageCommand; @@ -54,7 +54,6 @@ import com.cloud.agent.api.storage.UploadCommand; import com.cloud.agent.api.storage.ssCommand; import com.cloud.host.Host; import com.cloud.host.Host.Type; -import com.cloud.resource.ServerResource; import com.cloud.resource.ServerResourceBase; import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; @@ -65,7 +64,7 @@ import com.cloud.storage.template.TemplateInfo; import com.cloud.storage.template.UploadManager; import com.cloud.storage.template.UploadManagerImpl; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.component.ComponentLocator; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.net.NfsUtils; @@ -81,10 +80,10 @@ import com.cloud.utils.script.Script; public class CifsSecondaryStorageResource extends ServerResourceBase implements SecondaryStorageResource { private static final Logger s_logger = Logger.getLogger(CifsSecondaryStorageResource.class); int _timeout; - + String _instance; String _parent; - + String _dc; String _pod; String _guid; @@ -94,27 +93,27 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements StorageLayer _storage; boolean _inSystemVM = false; boolean _sslCopy = false; - + Random _rand = new Random(System.currentTimeMillis()); - + DownloadManager _dlMgr; UploadManager _upldMgr; - private String _configSslScr; - private String _configAuthScr; - private String _configIpFirewallScr; - private String _publicIp; - private String _hostname; - private String _localgw; - private String _eth1mask; - private String _eth1ip; - + private String _configSslScr; + private String _configAuthScr; + private String _configIpFirewallScr; + private String _publicIp; + private String _hostname; + private String _localgw; + private String _eth1mask; + private String _eth1ip; + @Override public void disconnected() { if (_parent != null && !_inSystemVM) { Script script = new Script(!_inSystemVM, "umount", _timeout, s_logger); script.add(_parent); script.execute(); - + File file = new File(_parent); file.delete(); } @@ -133,104 +132,104 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements } else if(cmd instanceof DeleteEntityDownloadURLCommand){ return _upldMgr.handleDeleteEntityDownloadURLCommand((DeleteEntityDownloadURLCommand)cmd); } else if (cmd instanceof GetStorageStatsCommand) { - return execute((GetStorageStatsCommand)cmd); + return execute((GetStorageStatsCommand)cmd); } else if (cmd instanceof CheckHealthCommand) { return new CheckHealthAnswer((CheckHealthCommand)cmd, true); } else if (cmd instanceof DeleteTemplateCommand) { - return execute((DeleteTemplateCommand) cmd); + return execute((DeleteTemplateCommand) cmd); } else if (cmd instanceof ReadyCommand) { return new ReadyAnswer((ReadyCommand)cmd); } else if (cmd instanceof SecStorageFirewallCfgCommand){ - return execute((SecStorageFirewallCfgCommand)cmd); + return execute((SecStorageFirewallCfgCommand)cmd); } else if (cmd instanceof SecStorageVMSetupCommand){ - return execute((SecStorageVMSetupCommand)cmd); + return execute((SecStorageVMSetupCommand)cmd); } else if (cmd instanceof SecStorageSetupCommand){ return new Answer(cmd, true, "success"); } else { return Answer.createUnsupportedCommandAnswer(cmd); } } - + private Answer execute(SecStorageVMSetupCommand cmd) { - if (!_inSystemVM){ - return new Answer(cmd, true, null); - } - boolean success = true; - StringBuilder result = new StringBuilder(); - for (String cidr: cmd.getAllowedInternalSites()) { - String tmpresult = allowOutgoingOnPrivate(cidr); - if (tmpresult != null) { - result.append(", ").append(tmpresult); - success = false; - } - } - if (success) { - if (cmd.getCopyPassword() != null && cmd.getCopyUserName() != null) { - String tmpresult = configureAuth(cmd.getCopyUserName(), cmd.getCopyPassword()); - if (tmpresult != null) { - result.append("Failed to configure auth for copy ").append(tmpresult); - success = false; - } - } - } - return new Answer(cmd, success, result.toString()); + if (!_inSystemVM){ + return new Answer(cmd, true, null); + } + boolean success = true; + StringBuilder result = new StringBuilder(); + for (String cidr: cmd.getAllowedInternalSites()) { + String tmpresult = allowOutgoingOnPrivate(cidr); + if (tmpresult != null) { + result.append(", ").append(tmpresult); + success = false; + } + } + if (success) { + if (cmd.getCopyPassword() != null && cmd.getCopyUserName() != null) { + String tmpresult = configureAuth(cmd.getCopyUserName(), cmd.getCopyPassword()); + if (tmpresult != null) { + result.append("Failed to configure auth for copy ").append(tmpresult); + success = false; + } + } + } + return new Answer(cmd, success, result.toString()); + + } - } - private String allowOutgoingOnPrivate(String destCidr) { - - Script command = new Script("/bin/bash", s_logger); - String intf = "eth1"; - command.add("-c"); - command.add("iptables -I OUTPUT -o " + intf + " -d " + destCidr + " -p tcp -m state --state NEW -m tcp -j ACCEPT"); - String result = command.execute(); - if (result != null) { - s_logger.warn("Error in allowing outgoing to " + destCidr + ", err=" + result ); - return "Error in allowing outgoing to " + destCidr + ", err=" + result; - } - addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr); - return null; - } - - + Script command = new Script("/bin/bash", s_logger); + String intf = "eth1"; + command.add("-c"); + command.add("iptables -I OUTPUT -o " + intf + " -d " + destCidr + " -p tcp -m state --state NEW -m tcp -j ACCEPT"); - private Answer execute(SecStorageFirewallCfgCommand cmd) { - if (!_inSystemVM){ - return new Answer(cmd, true, null); - } + String result = command.execute(); + if (result != null) { + s_logger.warn("Error in allowing outgoing to " + destCidr + ", err=" + result ); + return "Error in allowing outgoing to " + destCidr + ", err=" + result; + } + addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr); + return null; + } - List ipList = new ArrayList(); - - for (PortConfig pCfg:cmd.getPortConfigs()){ - if (pCfg.isAdd()) { - ipList.add(pCfg.getSourceIp()); - } - } - boolean success = true; - String result; - result = configureIpFirewall(ipList); - if (result !=null) - success = false; - return new Answer(cmd, success, result); - } - protected GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) { + private Answer execute(SecStorageFirewallCfgCommand cmd) { + if (!_inSystemVM){ + return new Answer(cmd, true, null); + } + + List ipList = new ArrayList(); + + for (PortConfig pCfg:cmd.getPortConfigs()){ + if (pCfg.isAdd()) { + ipList.add(pCfg.getSourceIp()); + } + } + boolean success = true; + String result; + result = configureIpFirewall(ipList); + if (result !=null) + success = false; + + return new Answer(cmd, success, result); + } + + protected GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) { final long usedSize = getUsedSize(); final long totalSize = getTotalSize(); if (usedSize == -1 || totalSize == -1) { - return new GetStorageStatsAnswer(cmd, "Unable to get storage stats"); + return new GetStorageStatsAnswer(cmd, "Unable to get storage stats"); } else { - return new GetStorageStatsAnswer(cmd, totalSize, usedSize) ; + return new GetStorageStatsAnswer(cmd, totalSize, usedSize) ; } } - + @Override public String getRootDir(ssCommand cmd){ return null; } - + protected Answer execute(final DeleteTemplateCommand cmd) { String relativeTemplatePath = cmd.getTemplatePath(); String parent = _parent; @@ -278,15 +277,15 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements } return new Answer(cmd, true, null); } - + protected long getUsedSize() { - return _storage.getUsedSpace(_parent); + return _storage.getUsedSpace(_parent); } - + protected long getTotalSize() { - return _storage.getTotalSpace(_parent); + return _storage.getTotalSpace(_parent); } - + protected long convertFilesystemSize(final String size) { if (size == null || size.isEmpty()) { return -1; @@ -305,25 +304,25 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements return (long)(Double.parseDouble(size.substring(0, size.length() - 1)) * multiplier); } - + @Override public Type getType() { return Host.Type.SecondaryStorage; } - + @Override public PingCommand getCurrentStatus(final long id) { return new PingStorageCommand(Host.Type.Storage, id, new HashMap()); } - + @Override public boolean configure(String name, Map params) throws ConfigurationException { - _eth1ip = (String)params.get("eth1ip"); + _eth1ip = (String)params.get("eth1ip"); if (_eth1ip != null) { //can only happen inside service vm - params.put("private.network.device", "eth1"); + params.put("private.network.device", "eth1"); } else { - s_logger.warn("Wait, what's going on? eth1ip is null!!"); + s_logger.warn("Wait, what's going on? eth1ip is null!!"); } String eth2ip = (String) params.get("eth2ip"); if (eth2ip != null) { @@ -331,23 +330,23 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements } _publicIp = (String) params.get("eth2ip"); _hostname = (String) params.get("name"); - + super.configure(name, params); - + _params = params; String value = (String)params.get("scripts.timeout"); _timeout = NumbersUtil.parseInt(value, 1440) * 1000; - + _storage = (StorageLayer)params.get(StorageLayer.InstanceConfigKey); if (_storage == null) { value = (String)params.get(StorageLayer.ClassConfigKey); if (value == null) { value = "com.cloud.storage.JavaStorageLayer"; } - + try { Class clazz = Class.forName(value); - _storage = (StorageLayer)ComponentLocator.inject(clazz); + _storage = (StorageLayer)ComponentContext.inject(clazz); _storage.configure("StorageLayer", params); } catch (ClassNotFoundException e) { throw new ConfigurationException("Unable to find class " + value); @@ -362,30 +361,30 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements if (_configSslScr != null) { s_logger.info("config_auth.sh found in " + _configAuthScr); } - + _configIpFirewallScr = Script.findScript(getDefaultScriptsDir(), "ipfirewall.sh"); if (_configIpFirewallScr != null) { s_logger.info("_configIpFirewallScr found in " + _configIpFirewallScr); } - + _guid = (String)params.get("guid"); if (_guid == null) { throw new ConfigurationException("Unable to find the guid"); } - + _dc = (String)params.get("zone"); if (_dc == null) { throw new ConfigurationException("Unable to find the zone"); } _pod = (String)params.get("pod"); - + _instance = (String)params.get("instance"); _mountParent = (String)params.get("mount.parent"); if (_mountParent == null) { _mountParent = File.separator + "mnt"; } - + if (_instance != null) { _mountParent = _mountParent + File.separator + _instance; } @@ -394,63 +393,63 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements if (_nfsPath == null) { throw new ConfigurationException("Unable to find mount.path"); } - - + + String inSystemVM = (String)params.get("secondary.storage.vm"); if (inSystemVM == null || "true".equalsIgnoreCase(inSystemVM)) { - _inSystemVM = true; + _inSystemVM = true; _localgw = (String)params.get("localgw"); if (_localgw != null) { //can only happen inside service vm - _eth1mask = (String)params.get("eth1mask"); - String internalDns1 = (String)params.get("dns1"); - String internalDns2 = (String)params.get("dns2"); + _eth1mask = (String)params.get("eth1mask"); + String internalDns1 = (String)params.get("dns1"); + String internalDns2 = (String)params.get("dns2"); - if (internalDns1 == null) { - s_logger.warn("No DNS entry found during configuration of NfsSecondaryStorage"); - } else { - addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, internalDns1); - } - - String mgmtHost = (String)params.get("host"); - String nfsHost = NfsUtils.getHostPart(_nfsPath); - if (nfsHost == null) { - s_logger.error("Invalid or corrupt nfs url " + _nfsPath); - throw new CloudRuntimeException("Unable to determine host part of nfs path"); - } - try { - InetAddress nfsHostAddr = InetAddress.getByName(nfsHost); - nfsHost = nfsHostAddr.getHostAddress(); - } catch (UnknownHostException uhe) { - s_logger.error("Unable to resolve nfs host " + nfsHost); - throw new CloudRuntimeException("Unable to resolve nfs host to an ip address " + nfsHost); - } - addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, nfsHost); - addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, mgmtHost); - if (internalDns2 != null) { - addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, internalDns2); - } + if (internalDns1 == null) { + s_logger.warn("No DNS entry found during configuration of NfsSecondaryStorage"); + } else { + addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, internalDns1); + } + + String mgmtHost = (String)params.get("host"); + String nfsHost = NfsUtils.getHostPart(_nfsPath); + if (nfsHost == null) { + s_logger.error("Invalid or corrupt nfs url " + _nfsPath); + throw new CloudRuntimeException("Unable to determine host part of nfs path"); + } + try { + InetAddress nfsHostAddr = InetAddress.getByName(nfsHost); + nfsHost = nfsHostAddr.getHostAddress(); + } catch (UnknownHostException uhe) { + s_logger.error("Unable to resolve nfs host " + nfsHost); + throw new CloudRuntimeException("Unable to resolve nfs host to an ip address " + nfsHost); + } + addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, nfsHost); + addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, mgmtHost); + if (internalDns2 != null) { + addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, internalDns2); + } } String useSsl = (String)params.get("sslcopy"); if (useSsl != null) { - _sslCopy = Boolean.parseBoolean(useSsl); - if (_sslCopy) { - configureSSL(); - } + _sslCopy = Boolean.parseBoolean(useSsl); + if (_sslCopy) { + configureSSL(); + } } - startAdditionalServices(); - _params.put("install.numthreads", "50"); - _params.put("secondary.storage.vm", "true"); + startAdditionalServices(); + _params.put("install.numthreads", "50"); + _params.put("secondary.storage.vm", "true"); } _parent = mount(_nfsPath, _mountParent); if (_parent == null) { throw new ConfigurationException("Unable to create mount point"); } - - + + s_logger.info("Mount point established at " + _parent); - + try { _params.put("template.parent", _parent); _params.put(StorageLayer.InstanceConfigKey, _storage); @@ -464,98 +463,98 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements } return true; } - + private void startAdditionalServices() { - Script command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("if [ -f /etc/init.d/ssh ]; then service ssh restart; else service sshd restart; fi "); - String result = command.execute(); - if (result != null) { - s_logger.warn("Error in starting sshd service err=" + result ); - } - command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("iptables -I INPUT -i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT"); - result = command.execute(); - if (result != null) { - s_logger.warn("Error in opening up ssh port err=" + result ); - } - } - - private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) { - s_logger.debug("addRouteToInternalIp: localgw=" + localgw + ", eth1ip=" + eth1ip + ", eth1mask=" + eth1mask + ",destIp=" + destIpOrCidr); - if (destIpOrCidr == null) { - s_logger.debug("addRouteToInternalIp: destIp is null"); - return; - } - if (!NetUtils.isValidIp(destIpOrCidr) && !NetUtils.isValidCIDR(destIpOrCidr)){ - s_logger.warn(" destIp is not a valid ip address or cidr destIp=" + destIpOrCidr); - return; - } - boolean inSameSubnet = false; - if (NetUtils.isValidIp(destIpOrCidr)) { - if (eth1ip != null && eth1mask != null) { - inSameSubnet = NetUtils.sameSubnet(eth1ip, destIpOrCidr, eth1mask); - } else { - s_logger.warn("addRouteToInternalIp: unable to determine same subnet: _eth1ip=" + eth1ip + ", dest ip=" + destIpOrCidr + ", _eth1mask=" + eth1mask); - } - } else { - inSameSubnet = NetUtils.isNetworkAWithinNetworkB(destIpOrCidr, NetUtils.ipAndNetMaskToCidr(eth1ip, eth1mask)); - } - if (inSameSubnet) { - s_logger.debug("addRouteToInternalIp: dest ip " + destIpOrCidr + " is in the same subnet as eth1 ip " + eth1ip); - return; - } - Script command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("ip route delete " + destIpOrCidr); - command.execute(); - command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("ip route add " + destIpOrCidr + " via " + localgw); - String result = command.execute(); - if (result != null) { - s_logger.warn("Error in configuring route to internal ip err=" + result ); - } else { - s_logger.debug("addRouteToInternalIp: added route to internal ip=" + destIpOrCidr + " via " + localgw); - } + Script command = new Script("/bin/bash", s_logger); + command.add("-c"); + command.add("if [ -f /etc/init.d/ssh ]; then service ssh restart; else service sshd restart; fi "); + String result = command.execute(); + if (result != null) { + s_logger.warn("Error in starting sshd service err=" + result ); + } + command = new Script("/bin/bash", s_logger); + command.add("-c"); + command.add("iptables -I INPUT -i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT"); + result = command.execute(); + if (result != null) { + s_logger.warn("Error in opening up ssh port err=" + result ); + } } - private void configureSSL() { - Script command = new Script(_configSslScr); - command.add(_publicIp); - command.add(_hostname); - String result = command.execute(); - if (result != null) { - s_logger.warn("Unable to configure httpd to use ssl"); - } - } - - private String configureAuth(String user, String passwd) { - Script command = new Script(_configAuthScr); - command.add(user); - command.add(passwd); - String result = command.execute(); - if (result != null) { - s_logger.warn("Unable to configure httpd to use auth"); - } - return result; - } - - private String configureIpFirewall(List ipList){ - Script command = new Script(_configIpFirewallScr); - for (String ip : ipList){ - command.add(ip); - } - - String result = command.execute(); - if (result != null) { - s_logger.warn("Unable to configure firewall for command : " +command); - } - return result; - } - - protected String mount(String path, String parent) { + private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) { + s_logger.debug("addRouteToInternalIp: localgw=" + localgw + ", eth1ip=" + eth1ip + ", eth1mask=" + eth1mask + ",destIp=" + destIpOrCidr); + if (destIpOrCidr == null) { + s_logger.debug("addRouteToInternalIp: destIp is null"); + return; + } + if (!NetUtils.isValidIp(destIpOrCidr) && !NetUtils.isValidCIDR(destIpOrCidr)){ + s_logger.warn(" destIp is not a valid ip address or cidr destIp=" + destIpOrCidr); + return; + } + boolean inSameSubnet = false; + if (NetUtils.isValidIp(destIpOrCidr)) { + if (eth1ip != null && eth1mask != null) { + inSameSubnet = NetUtils.sameSubnet(eth1ip, destIpOrCidr, eth1mask); + } else { + s_logger.warn("addRouteToInternalIp: unable to determine same subnet: _eth1ip=" + eth1ip + ", dest ip=" + destIpOrCidr + ", _eth1mask=" + eth1mask); + } + } else { + inSameSubnet = NetUtils.isNetworkAWithinNetworkB(destIpOrCidr, NetUtils.ipAndNetMaskToCidr(eth1ip, eth1mask)); + } + if (inSameSubnet) { + s_logger.debug("addRouteToInternalIp: dest ip " + destIpOrCidr + " is in the same subnet as eth1 ip " + eth1ip); + return; + } + Script command = new Script("/bin/bash", s_logger); + command.add("-c"); + command.add("ip route delete " + destIpOrCidr); + command.execute(); + command = new Script("/bin/bash", s_logger); + command.add("-c"); + command.add("ip route add " + destIpOrCidr + " via " + localgw); + String result = command.execute(); + if (result != null) { + s_logger.warn("Error in configuring route to internal ip err=" + result ); + } else { + s_logger.debug("addRouteToInternalIp: added route to internal ip=" + destIpOrCidr + " via " + localgw); + } + } + + private void configureSSL() { + Script command = new Script(_configSslScr); + command.add(_publicIp); + command.add(_hostname); + String result = command.execute(); + if (result != null) { + s_logger.warn("Unable to configure httpd to use ssl"); + } + } + + private String configureAuth(String user, String passwd) { + Script command = new Script(_configAuthScr); + command.add(user); + command.add(passwd); + String result = command.execute(); + if (result != null) { + s_logger.warn("Unable to configure httpd to use auth"); + } + return result; + } + + private String configureIpFirewall(List ipList){ + Script command = new Script(_configIpFirewallScr); + for (String ip : ipList){ + command.add(ip); + } + + String result = command.execute(); + if (result != null) { + s_logger.warn("Unable to configure firewall for command : " +command); + } + return result; + } + + protected String mount(String path, String parent) { String mountPoint = null; for (int i = 0; i < 10; i++) { String mntPt = parent + File.separator + Integer.toHexString(_rand.nextInt(Integer.MAX_VALUE)); @@ -568,29 +567,29 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements } s_logger.debug("Unable to create mount: " + mntPt); } - + if (mountPoint == null) { s_logger.warn("Unable to create a mount point"); return null; } - + Script script = null; String result = null; script = new Script(!_inSystemVM, "umount", _timeout, s_logger); script.add(path); result = script.execute(); - + if( _parent != null ) { script = new Script("rmdir", _timeout, s_logger); script.add(_parent); result = script.execute(); } - + Script command = new Script(!_inSystemVM, "mount", _timeout, s_logger); command.add("-t", "cifs"); if (_inSystemVM) { - //Fedora Core 12 errors out with any -o option executed from java - //command.add("-o", "soft,timeo=133,retrans=2147483647,tcp,acdirmax=0,acdirmin=0"); + //Fedora Core 12 errors out with any -o option executed from java + //command.add("-o", "soft,timeo=133,retrans=2147483647,tcp,acdirmax=0,acdirmin=0"); } String tok[] = path.split(":"); //command.add(path); @@ -601,25 +600,25 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements s_logger.warn("Unable to mount " + path + " due to " + result); File file = new File(mountPoint); if (file.exists()) - file.delete(); + file.delete(); return null; } - - - + + + // XXX: Adding the check for creation of snapshots dir here. Might have to move it somewhere more logical later. if (!checkForSnapshotsDir(mountPoint)) { - return null; + return null; } - + // Create the volumes dir if (!checkForVolumesDir(mountPoint)) { - return null; + return null; } - + return mountPoint; } - + @Override public boolean start() { return true; @@ -633,14 +632,14 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements @Override public StartupCommand[] initialize() { /*disconnected(); - + _parent = mount(_nfsPath, _mountParent); - + if( _parent == null ) { s_logger.warn("Unable to mount the nfs server"); return null; } - + try { _params.put("template.parent", _parent); _params.put(StorageLayer.InstanceConfigKey, _storage); @@ -650,12 +649,12 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements s_logger.warn("Caught problem while configuring folers", e); return null; }*/ - + final StartupStorageCommand cmd = new StartupStorageCommand(_parent, StoragePoolType.NetworkFilesystem, getTotalSize(), new HashMap()); - + cmd.setResourceType(Storage.StorageResourceType.SECONDARY_STORAGE); cmd.setIqn(null); - + fillNetworkInformation(cmd); cmd.setDataCenter(_dc); cmd.setPod(_pod); @@ -687,38 +686,38 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements String snapshotsDirLocation = mountPoint + File.separator + "snapshots"; return createDir("snapshots", snapshotsDirLocation, mountPoint); } - - protected boolean checkForVolumesDir(String mountPoint) { - String volumesDirLocation = mountPoint + "/" + "volumes"; - return createDir("volumes", volumesDirLocation, mountPoint); - } - - protected boolean createDir(String dirName, String dirLocation, String mountPoint) { - boolean dirExists = false; - - File dir = new File(dirLocation); - if (dir.exists()) { - if (dir.isDirectory()) { - s_logger.debug(dirName + " already exists on secondary storage, and is mounted at " + mountPoint); - dirExists = true; - } else { - if (dir.delete() && _storage.mkdir(dirLocation)) { - dirExists = true; - } - } - } else if (_storage.mkdir(dirLocation)) { - dirExists = true; - } - if (dirExists) { - s_logger.info(dirName + " directory created/exists on Secondary Storage."); - } else { - s_logger.info(dirName + " directory does not exist on Secondary Storage."); - } - - return dirExists; + protected boolean checkForVolumesDir(String mountPoint) { + String volumesDirLocation = mountPoint + "/" + "volumes"; + return createDir("volumes", volumesDirLocation, mountPoint); } - + + protected boolean createDir(String dirName, String dirLocation, String mountPoint) { + boolean dirExists = false; + + File dir = new File(dirLocation); + if (dir.exists()) { + if (dir.isDirectory()) { + s_logger.debug(dirName + " already exists on secondary storage, and is mounted at " + mountPoint); + dirExists = true; + } else { + if (dir.delete() && _storage.mkdir(dirLocation)) { + dirExists = true; + } + } + } else if (_storage.mkdir(dirLocation)) { + dirExists = true; + } + + if (dirExists) { + s_logger.info(dirName + " directory created/exists on Secondary Storage."); + } else { + s_logger.info(dirName + " directory does not exist on Secondary Storage."); + } + + return dirExists; + } + @Override protected String getDefaultScriptsDir() { return "./scripts/storage/secondary"; diff --git a/core/src/com/cloud/storage/resource/LocalSecondaryStorageResource.java b/core/src/com/cloud/storage/resource/LocalSecondaryStorageResource.java index d9c69f8b151..b86fe6c6c2d 100644 --- a/core/src/com/cloud/storage/resource/LocalSecondaryStorageResource.java +++ b/core/src/com/cloud/storage/resource/LocalSecondaryStorageResource.java @@ -19,7 +19,6 @@ package com.cloud.storage.resource; import java.util.HashMap; import java.util.Map; - import javax.naming.ConfigurationException; import org.apache.log4j.Logger; @@ -36,11 +35,11 @@ import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.SecStorageSetupCommand; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupStorageCommand; +import com.cloud.agent.api.storage.DownloadCommand; +import com.cloud.agent.api.storage.DownloadProgressCommand; import com.cloud.agent.api.storage.ListTemplateAnswer; import com.cloud.agent.api.storage.ListTemplateCommand; import com.cloud.agent.api.storage.ssCommand; -import com.cloud.agent.api.storage.DownloadCommand; -import com.cloud.agent.api.storage.DownloadProgressCommand; import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.resource.ServerResourceBase; @@ -50,39 +49,38 @@ import com.cloud.storage.StorageLayer; import com.cloud.storage.template.DownloadManager; import com.cloud.storage.template.DownloadManagerImpl; import com.cloud.storage.template.TemplateInfo; -import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.component.ComponentContext; public class LocalSecondaryStorageResource extends ServerResourceBase implements SecondaryStorageResource { private static final Logger s_logger = Logger.getLogger(LocalSecondaryStorageResource.class); int _timeout; - + String _instance; String _parent; - + String _dc; String _pod; String _guid; - + StorageLayer _storage; - + DownloadManager _dlMgr; - + @Override public void disconnected() { } - + @Override public String getRootDir(ssCommand cmd){ return getRootDir(); - + } - + public String getRootDir() { return _parent; } - + @Override public Answer executeRequest(Command cmd) { if (cmd instanceof DownloadProgressCommand) { @@ -103,7 +101,7 @@ public class LocalSecondaryStorageResource extends ServerResourceBase implements return Answer.createUnsupportedCommandAnswer(cmd); } } - + private Answer execute(ComputeChecksumCommand cmd) { return new Answer(cmd, false, null); } @@ -119,13 +117,13 @@ public class LocalSecondaryStorageResource extends ServerResourceBase implements public Type getType() { return Host.Type.LocalSecondaryStorage; } - + @Override public PingCommand getCurrentStatus(final long id) { return new PingStorageCommand(Host.Type.Storage, id, new HashMap()); } - - + + @Override @SuppressWarnings("unchecked") public boolean configure(String name, Map params) throws ConfigurationException { @@ -135,30 +133,30 @@ public class LocalSecondaryStorageResource extends ServerResourceBase implements if (_guid == null) { throw new ConfigurationException("Unable to find the guid"); } - + _dc = (String)params.get("zone"); if (_dc == null) { throw new ConfigurationException("Unable to find the zone"); } _pod = (String)params.get("pod"); - + _instance = (String)params.get("instance"); _parent = (String)params.get("mount.path"); if (_parent == null) { throw new ConfigurationException("No directory specified."); } - + _storage = (StorageLayer)params.get(StorageLayer.InstanceConfigKey); if (_storage == null) { String value = (String)params.get(StorageLayer.ClassConfigKey); if (value == null) { value = "com.cloud.storage.JavaStorageLayer"; } - + try { Class clazz = (Class)Class.forName(value); - _storage = ComponentLocator.inject(clazz); + _storage = ComponentContext.inject(clazz); } catch (ClassNotFoundException e) { throw new ConfigurationException("Unable to find class " + value); } @@ -168,15 +166,15 @@ public class LocalSecondaryStorageResource extends ServerResourceBase implements s_logger.warn("Unable to create the directory " + _parent); throw new ConfigurationException("Unable to create the directory " + _parent); } - + s_logger.info("Mount point established at " + _parent); params.put("template.parent", _parent); params.put(StorageLayer.InstanceConfigKey, _storage); - + _dlMgr = new DownloadManagerImpl(); _dlMgr.configure("DownloadManager", params); - + return true; } @@ -192,7 +190,7 @@ public class LocalSecondaryStorageResource extends ServerResourceBase implements @Override public StartupCommand[] initialize() { - + final StartupStorageCommand cmd = new StartupStorageCommand(_parent, StoragePoolType.Filesystem, 1024l*1024l*1024l*1024l, _dlMgr.gatherTemplateInfo(_parent)); cmd.setResourceType(Storage.StorageResourceType.LOCAL_SECONDARY_STORAGE); cmd.setIqn("local://"); @@ -202,10 +200,10 @@ public class LocalSecondaryStorageResource extends ServerResourceBase implements cmd.setGuid(_guid); cmd.setName(_guid); cmd.setVersion(LocalSecondaryStorageResource.class.getPackage().getImplementationVersion()); - + return new StartupCommand [] {cmd}; } - + @Override protected String getDefaultScriptsDir() { return "scripts/storage/secondary"; diff --git a/core/src/com/cloud/storage/resource/NfsSecondaryStorageResource.java b/core/src/com/cloud/storage/resource/NfsSecondaryStorageResource.java index a4bea9df2b4..7c105489d72 100755 --- a/core/src/com/cloud/storage/resource/NfsSecondaryStorageResource.java +++ b/core/src/com/cloud/storage/resource/NfsSecondaryStorageResource.java @@ -71,7 +71,6 @@ import com.cloud.agent.api.SecStorageFirewallCfgCommand.PortConfig; import com.cloud.agent.api.SecStorageSetupAnswer; import com.cloud.agent.api.SecStorageSetupCommand; import com.cloud.agent.api.SecStorageSetupCommand.Certificates; -import com.cloud.agent.api.StartupSecondaryStorageCommand; import com.cloud.agent.api.SecStorageVMSetupCommand; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupSecondaryStorageCommand; @@ -109,7 +108,7 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.S3Utils; import com.cloud.utils.S3Utils.FileNamingStrategy; import com.cloud.utils.S3Utils.ObjectNamingStrategy; -import com.cloud.utils.component.ComponentLocator; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.script.OutputInterpreter; @@ -117,7 +116,7 @@ import com.cloud.utils.script.Script; import com.cloud.vm.SecondaryStorageVm; public class NfsSecondaryStorageResource extends ServerResourceBase implements - SecondaryStorageResource { +SecondaryStorageResource { private static final Logger s_logger = Logger .getLogger(NfsSecondaryStorageResource.class); @@ -126,7 +125,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements private static final String SNAPSHOT_ROOT_DIR = "snapshots"; int _timeout; - + String _instance; String _dc; String _pod; @@ -136,23 +135,23 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements StorageLayer _storage; boolean _inSystemVM = false; boolean _sslCopy = false; - + DownloadManager _dlMgr; UploadManager _upldMgr; - private String _configSslScr; - private String _configAuthScr; - private String _configIpFirewallScr; - private String _publicIp; - private String _hostname; - private String _localgw; - private String _eth1mask; - private String _eth1ip; - private String _storageIp; - private String _storageNetmask; - private String _storageGateway; - private List nfsIps = new ArrayList(); - final private String _parent = "/mnt/SecStorage"; - final private String _tmpltDir = "/var/cloudstack/template"; + private String _configSslScr; + private String _configAuthScr; + private String _configIpFirewallScr; + private String _publicIp; + private String _hostname; + private String _localgw; + private String _eth1mask; + private String _eth1ip; + private String _storageIp; + private String _storageNetmask; + private String _storageGateway; + private final List nfsIps = new ArrayList(); + final private String _parent = "/mnt/SecStorage"; + final private String _tmpltDir = "/var/cloudstack/template"; final private String _tmpltpp = "template.properties"; @Override public void disconnected() { @@ -171,19 +170,19 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements } else if(cmd instanceof DeleteEntityDownloadURLCommand){ return _upldMgr.handleDeleteEntityDownloadURLCommand((DeleteEntityDownloadURLCommand)cmd); } else if (cmd instanceof GetStorageStatsCommand) { - return execute((GetStorageStatsCommand)cmd); + return execute((GetStorageStatsCommand)cmd); } else if (cmd instanceof CheckHealthCommand) { return new CheckHealthAnswer((CheckHealthCommand)cmd, true); } else if (cmd instanceof DeleteTemplateCommand) { - return execute((DeleteTemplateCommand) cmd); + return execute((DeleteTemplateCommand) cmd); } else if (cmd instanceof DeleteVolumeCommand) { - return execute((DeleteVolumeCommand) cmd); + return execute((DeleteVolumeCommand) cmd); }else if (cmd instanceof ReadyCommand) { return new ReadyAnswer((ReadyCommand)cmd); } else if (cmd instanceof SecStorageFirewallCfgCommand){ - return execute((SecStorageFirewallCfgCommand)cmd); + return execute((SecStorageFirewallCfgCommand)cmd); } else if (cmd instanceof SecStorageVMSetupCommand){ - return execute((SecStorageVMSetupCommand)cmd); + return execute((SecStorageVMSetupCommand)cmd); } else if (cmd instanceof SecStorageSetupCommand){ return execute((SecStorageSetupCommand)cmd); } else if (cmd instanceof ComputeChecksumCommand){ @@ -218,7 +217,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements return Answer.createUnsupportedCommandAnswer(cmd); } } - + @SuppressWarnings("unchecked") private String determineS3TemplateDirectory(final Long accountId, final Long templateId) { @@ -254,7 +253,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements "Unable to create directory " + "download directory %1$s for download of template id " + "%2$s from S3.", downloadDirectory.getName(), - templateId); + templateId); s_logger.error(errMsg); return new Answer(cmd, false, errMsg); } @@ -262,11 +261,11 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements getDirectory(s3, s3.getBucketName(), determineS3TemplateDirectory(accountId, templateId), downloadDirectory, new FileNamingStrategy() { - @Override - public String determineFileName(final String key) { - return substringAfterLast(key, S3Utils.SEPARATOR); - } - }); + @Override + public String determineFileName(final String key) { + return substringAfterLast(key, S3Utils.SEPARATOR); + } + }); return new Answer(cmd, true, format("Successfully downloaded " + "template id %1$s from S3 to directory %2$s", templateId, @@ -395,23 +394,23 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements final String bucket = s3.getBucketName(); putDirectory(s3, bucket, _storage.getFile(templatePath), new FilenameFilter() { - @Override - public boolean accept(final File directory, - final String fileName) { - return !fileName.startsWith("."); - } - }, new ObjectNamingStrategy() { - @Override - public String determineKey(final File file) { - s_logger.debug(String - .format("Determining key using account id %1$s and template id %2$s", - accountId, templateId)); - return join( - asList(determineS3TemplateDirectory( - accountId, templateId), file - .getName()), S3Utils.SEPARATOR); - } - }); + @Override + public boolean accept(final File directory, + final String fileName) { + return !fileName.startsWith("."); + } + }, new ObjectNamingStrategy() { + @Override + public String determineKey(final File file) { + s_logger.debug(String + .format("Determining key using account id %1$s and template id %2$s", + accountId, templateId)); + return join( + asList(determineS3TemplateDirectory( + accountId, templateId), file + .getName()), S3Utils.SEPARATOR); + } + }); return new Answer( cmd, @@ -623,7 +622,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements command.add("-c"); command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" + swift.getUserName() + " -K " + swift.getKey() - + " delete " + container + " " + object); + + " delete " + container + " " + object); OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser(); String result = command.execute(parser); if (result != null) { @@ -678,61 +677,61 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements executeWithNoWaitLock(determineSnapshotLockId(accountId, volumeId), new Callable() { + @Override + public Void call() throws Exception { + + final String directoryName = determineSnapshotLocalDirectory( + secondaryStorageUrl, accountId, volumeId); + + String result = createLocalDir(directoryName); + if (result != null) { + throw new InternalErrorException( + format("Failed to create directory %1$s during S3 snapshot download.", + directoryName)); + } + + final String snapshotFileName = determineSnapshotBackupFilename(cmd + .getSnapshotUuid()); + final String key = determineSnapshotS3Key( + accountId, volumeId, snapshotFileName); + final File targetFile = S3Utils.getFile(s3, + s3.getBucketName(), key, + _storage.getFile(directoryName), + new FileNamingStrategy() { + @Override - public Void call() throws Exception { - - final String directoryName = determineSnapshotLocalDirectory( - secondaryStorageUrl, accountId, volumeId); - - String result = createLocalDir(directoryName); - if (result != null) { - throw new InternalErrorException( - format("Failed to create directory %1$s during S3 snapshot download.", - directoryName)); - } - - final String snapshotFileName = determineSnapshotBackupFilename(cmd - .getSnapshotUuid()); - final String key = determineSnapshotS3Key( - accountId, volumeId, snapshotFileName); - final File targetFile = S3Utils.getFile(s3, - s3.getBucketName(), key, - _storage.getFile(directoryName), - new FileNamingStrategy() { - - @Override - public String determineFileName( - String key) { - return snapshotFileName; - } - - }); - - if (cmd.getParent() != null) { - - final String parentPath = join( - File.pathSeparator, directoryName, - determineSnapshotBackupFilename(cmd - .getParent())); - result = setVhdParent( - targetFile.getAbsolutePath(), - parentPath); - if (result != null) { - throw new InternalErrorException( - format("Failed to set the parent for backup %1$s to %2$s due to %3$s.", - targetFile - .getAbsolutePath(), - parentPath, result)); - } - - } - - return null; - + public String determineFileName( + String key) { + return snapshotFileName; } }); + if (cmd.getParent() != null) { + + final String parentPath = join( + File.pathSeparator, directoryName, + determineSnapshotBackupFilename(cmd + .getParent())); + result = setVhdParent( + targetFile.getAbsolutePath(), + parentPath); + if (result != null) { + throw new InternalErrorException( + format("Failed to set the parent for backup %1$s to %2$s due to %3$s.", + targetFile + .getAbsolutePath(), + parentPath, result)); + } + + } + + return null; + + } + + }); + return new Answer( cmd, true, @@ -820,7 +819,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements } private Answer execute(ComputeChecksumCommand cmd) { - + String relativeTemplatePath = cmd.getTemplatePath(); String parent = getRootDir(cmd); @@ -841,8 +840,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements if(s_logger.isDebugEnabled()){ s_logger.debug("parent path " +parent+ " relative template path " +relativeTemplatePath ); } - - + + try { digest = MessageDigest.getInstance("MD5"); is = new FileInputStream(f); @@ -855,7 +854,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements if(s_logger.isDebugEnabled()){ s_logger.debug("Successfully calculated checksum for file " +absoluteTemplatePath+ " - " +checksum ); } - + }catch(IOException e) { String logMsg = "Unable to process file for MD5 - " + absoluteTemplatePath; s_logger.error(logMsg); @@ -865,11 +864,11 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements } finally { try { - if(is != null) - is.close(); + if(is != null) + is.close(); } catch (IOException e) { if(s_logger.isDebugEnabled()){ - s_logger.debug("Could not close the file " +absoluteTemplatePath); + s_logger.debug("Could not close the file " +absoluteTemplatePath); } return new Answer(cmd, false, checksum); } @@ -879,38 +878,38 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements } private void configCerts(Certificates certs) { - if (certs == null) { - configureSSL(); - } else { - String prvKey = certs.getPrivKey(); - String pubCert = certs.getPrivCert(); - String certChain = certs.getCertChain(); - - try { - File prvKeyFile = File.createTempFile("prvkey", null); - String prvkeyPath = prvKeyFile.getAbsolutePath(); - BufferedWriter out = new BufferedWriter(new FileWriter(prvKeyFile)); - out.write(prvKey); - out.close(); - - File pubCertFile = File.createTempFile("pubcert", null); - String pubCertFilePath = pubCertFile.getAbsolutePath(); - - out = new BufferedWriter(new FileWriter(pubCertFile)); - out.write(pubCert); - out.close(); - - configureSSL(prvkeyPath, pubCertFilePath, null); - - prvKeyFile.delete(); - pubCertFile.delete(); - - } catch (IOException e) { - s_logger.debug("Failed to config ssl: " + e.toString()); - } - } + if (certs == null) { + configureSSL(); + } else { + String prvKey = certs.getPrivKey(); + String pubCert = certs.getPrivCert(); + String certChain = certs.getCertChain(); + + try { + File prvKeyFile = File.createTempFile("prvkey", null); + String prvkeyPath = prvKeyFile.getAbsolutePath(); + BufferedWriter out = new BufferedWriter(new FileWriter(prvKeyFile)); + out.write(prvKey); + out.close(); + + File pubCertFile = File.createTempFile("pubcert", null); + String pubCertFilePath = pubCertFile.getAbsolutePath(); + + out = new BufferedWriter(new FileWriter(pubCertFile)); + out.write(pubCert); + out.close(); + + configureSSL(prvkeyPath, pubCertFilePath, null); + + prvKeyFile.delete(); + pubCertFile.delete(); + + } catch (IOException e) { + s_logger.debug("Failed to config ssl: " + e.toString()); + } + } } - + private Answer execute(SecStorageSetupCommand cmd) { if (!_inSystemVM){ return new Answer(cmd, true, null); @@ -930,7 +929,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements mount(root, nfsPath); configCerts(cmd.getCerts()); - + nfsIps.add(nfsHostIp); return new SecStorageSetupAnswer(dir); } catch (Exception e) { @@ -940,7 +939,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements } } - + private String deleteSnapshotBackupFromLocalFileSystem( final String secondaryStorageUrl, final Long accountId, final Long volumeId, final String name, final Boolean deleteAllFlag) { @@ -1072,7 +1071,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements return new Answer(cmd, false, errMsg); } } - + Map swiftListTemplate(SwiftTO swift) { String[] containers = swiftList(swift, "", ""); if (containers == null) { @@ -1103,9 +1102,9 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements } } return tmpltInfos; - + } - + private Answer execute(ListTemplateCommand cmd) { if (!_inSystemVM){ return new Answer(cmd, true, null); @@ -1119,50 +1118,50 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements return new ListTemplateAnswer(cmd.getSecUrl(), templateInfos); } } - + private Answer execute(ListVolumeCommand cmd) { if (!_inSystemVM){ return new Answer(cmd, true, null); } - + String root = getRootDir(cmd.getSecUrl()); Map templateInfos = _dlMgr.gatherVolumeInfo(root); return new ListVolumeAnswer(cmd.getSecUrl(), templateInfos); - - } - - private Answer execute(SecStorageVMSetupCommand cmd) { - if (!_inSystemVM){ - return new Answer(cmd, true, null); - } - boolean success = true; - StringBuilder result = new StringBuilder(); - for (String cidr: cmd.getAllowedInternalSites()) { - if (nfsIps.contains(cidr)) { - /* - * if the internal download ip is the same with secondary storage ip, adding internal sites will flush - * ip route to nfs through storage ip. - */ - continue; - } - String tmpresult = allowOutgoingOnPrivate(cidr); - if (tmpresult != null) { - result.append(", ").append(tmpresult); - success = false; - } - } - if (success) { - if (cmd.getCopyPassword() != null && cmd.getCopyUserName() != null) { - String tmpresult = configureAuth(cmd.getCopyUserName(), cmd.getCopyPassword()); - if (tmpresult != null) { - result.append("Failed to configure auth for copy ").append(tmpresult); - success = false; - } - } - } - return new Answer(cmd, success, result.toString()); - } + } + + private Answer execute(SecStorageVMSetupCommand cmd) { + if (!_inSystemVM){ + return new Answer(cmd, true, null); + } + boolean success = true; + StringBuilder result = new StringBuilder(); + for (String cidr: cmd.getAllowedInternalSites()) { + if (nfsIps.contains(cidr)) { + /* + * if the internal download ip is the same with secondary storage ip, adding internal sites will flush + * ip route to nfs through storage ip. + */ + continue; + } + String tmpresult = allowOutgoingOnPrivate(cidr); + if (tmpresult != null) { + result.append(", ").append(tmpresult); + success = false; + } + } + if (success) { + if (cmd.getCopyPassword() != null && cmd.getCopyUserName() != null) { + String tmpresult = configureAuth(cmd.getCopyUserName(), cmd.getCopyPassword()); + if (tmpresult != null) { + result.append("Failed to configure auth for copy ").append(tmpresult); + success = false; + } + } + } + return new Answer(cmd, success, result.toString()); + + } private String setVhdParent(String lFullPath, String pFullPath) { Script command = new Script("/bin/bash", s_logger); @@ -1217,55 +1216,55 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements } public String allowOutgoingOnPrivate(String destCidr) { - - Script command = new Script("/bin/bash", s_logger); - String intf = "eth1"; - command.add("-c"); - command.add("iptables -I OUTPUT -o " + intf + " -d " + destCidr + " -p tcp -m state --state NEW -m tcp -j ACCEPT"); - String result = command.execute(); - if (result != null) { - s_logger.warn("Error in allowing outgoing to " + destCidr + ", err=" + result ); - return "Error in allowing outgoing to " + destCidr + ", err=" + result; - } - - addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr); - - return null; - } - - private Answer execute(SecStorageFirewallCfgCommand cmd) { - if (!_inSystemVM){ - return new Answer(cmd, true, null); - } + Script command = new Script("/bin/bash", s_logger); + String intf = "eth1"; + command.add("-c"); + command.add("iptables -I OUTPUT -o " + intf + " -d " + destCidr + " -p tcp -m state --state NEW -m tcp -j ACCEPT"); - List ipList = new ArrayList(); - - for (PortConfig pCfg:cmd.getPortConfigs()){ - if (pCfg.isAdd()) { - ipList.add(pCfg.getSourceIp()); - } - } - boolean success = true; - String result; - result = configureIpFirewall(ipList, cmd.getIsAppendAIp()); - if (result !=null) - success = false; + String result = command.execute(); + if (result != null) { + s_logger.warn("Error in allowing outgoing to " + destCidr + ", err=" + result ); + return "Error in allowing outgoing to " + destCidr + ", err=" + result; + } - return new Answer(cmd, success, result); - } + addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr); - protected GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) { - String rootDir = getRootDir(cmd.getSecUrl()); + return null; + } + + private Answer execute(SecStorageFirewallCfgCommand cmd) { + if (!_inSystemVM){ + return new Answer(cmd, true, null); + } + + List ipList = new ArrayList(); + + for (PortConfig pCfg:cmd.getPortConfigs()){ + if (pCfg.isAdd()) { + ipList.add(pCfg.getSourceIp()); + } + } + boolean success = true; + String result; + result = configureIpFirewall(ipList, cmd.getIsAppendAIp()); + if (result !=null) + success = false; + + return new Answer(cmd, success, result); + } + + protected GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) { + String rootDir = getRootDir(cmd.getSecUrl()); final long usedSize = getUsedSize(rootDir); final long totalSize = getTotalSize(rootDir); if (usedSize == -1 || totalSize == -1) { - return new GetStorageStatsAnswer(cmd, "Unable to get storage stats"); + return new GetStorageStatsAnswer(cmd, "Unable to get storage stats"); } else { - return new GetStorageStatsAnswer(cmd, totalSize, usedSize) ; + return new GetStorageStatsAnswer(cmd, totalSize, usedSize) ; } } - + protected Answer execute(final DeleteTemplateCommand cmd) { String relativeTemplatePath = cmd.getTemplatePath(); String parent = getRootDir(cmd); @@ -1313,7 +1312,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements } return new Answer(cmd, true, null); } - + protected Answer execute(final DeleteVolumeCommand cmd) { String relativeVolumePath = cmd.getVolumePath(); String parent = getRootDir(cmd); @@ -1361,7 +1360,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements } return new Answer(cmd, true, null); } - + Answer execute(CleanupSnapshotBackupCommand cmd) { String parent = getRootDir(cmd.getSecondaryStoragePoolURL()); if (!parent.endsWith(File.separator)) { @@ -1409,22 +1408,22 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements throw new CloudRuntimeException(msg); } } - - + + @Override public String getRootDir(ssCommand cmd){ return getRootDir(cmd.getSecUrl()); - + } - + protected long getUsedSize(String rootDir) { return _storage.getUsedSpace(rootDir); } - + protected long getTotalSize(String rootDir) { - return _storage.getTotalSpace(rootDir); + return _storage.getTotalSpace(rootDir); } - + protected long convertFilesystemSize(final String size) { if (size == null || size.isEmpty()) { return -1; @@ -1443,29 +1442,29 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements return (long)(Double.parseDouble(size.substring(0, size.length() - 1)) * multiplier); } - + @Override public Type getType() { - if(SecondaryStorageVm.Role.templateProcessor.toString().equals(_role)) - return Host.Type.SecondaryStorage; - - return Host.Type.SecondaryStorageCmdExecutor; + if(SecondaryStorageVm.Role.templateProcessor.toString().equals(_role)) + return Host.Type.SecondaryStorage; + + return Host.Type.SecondaryStorageCmdExecutor; } - + @Override public PingCommand getCurrentStatus(final long id) { return new PingStorageCommand(Host.Type.Storage, id, new HashMap()); } - + @Override public boolean configure(String name, Map params) throws ConfigurationException { - _eth1ip = (String)params.get("eth1ip"); + _eth1ip = (String)params.get("eth1ip"); _eth1mask = (String)params.get("eth1mask"); if (_eth1ip != null) { //can only happen inside service vm - params.put("private.network.device", "eth1"); + params.put("private.network.device", "eth1"); } else { - s_logger.warn("Wait, what's going on? eth1ip is null!!"); + s_logger.warn("Wait, what's going on? eth1ip is null!!"); } String eth2ip = (String) params.get("eth2ip"); if (eth2ip != null) { @@ -1473,29 +1472,29 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements } _publicIp = (String) params.get("eth2ip"); _hostname = (String) params.get("name"); - + _storageIp = (String) params.get("storageip"); if (_storageIp == null) { - s_logger.warn("Wait, there is no storageip in /proc/cmdline, something wrong!"); + s_logger.warn("Wait, there is no storageip in /proc/cmdline, something wrong!"); } _storageNetmask = (String) params.get("storagenetmask"); _storageGateway = (String) params.get("storagegateway"); super.configure(name, params); - + _params = params; String value = (String)params.get("scripts.timeout"); _timeout = NumbersUtil.parseInt(value, 1440) * 1000; - + _storage = (StorageLayer)params.get(StorageLayer.InstanceConfigKey); if (_storage == null) { value = (String)params.get(StorageLayer.ClassConfigKey); if (value == null) { value = "com.cloud.storage.JavaStorageLayer"; } - + try { Class clazz = Class.forName(value); - _storage = (StorageLayer)ComponentLocator.inject(clazz); + _storage = (StorageLayer)ComponentContext.inject(clazz); _storage.configure("StorageLayer", params); } catch (ClassNotFoundException e) { throw new ConfigurationException("Unable to find class " + value); @@ -1511,34 +1510,34 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements if (_configSslScr != null) { s_logger.info("config_auth.sh found in " + _configAuthScr); } - + _configIpFirewallScr = Script.findScript(getDefaultScriptsDir(), "ipfirewall.sh"); if (_configIpFirewallScr != null) { s_logger.info("_configIpFirewallScr found in " + _configIpFirewallScr); } - + _role = (String)params.get("role"); if(_role == null) - _role = SecondaryStorageVm.Role.templateProcessor.toString(); + _role = SecondaryStorageVm.Role.templateProcessor.toString(); s_logger.info("Secondary storage runs in role " + _role); - + _guid = (String)params.get("guid"); if (_guid == null) { throw new ConfigurationException("Unable to find the guid"); } - + _dc = (String)params.get("zone"); if (_dc == null) { throw new ConfigurationException("Unable to find the zone"); } _pod = (String)params.get("pod"); - + _instance = (String)params.get("instance"); - - + + String inSystemVM = (String)params.get("secondary.storage.vm"); if (inSystemVM == null || "true".equalsIgnoreCase(inSystemVM)) { - _inSystemVM = true; + _inSystemVM = true; _localgw = (String)params.get("localgw"); if (_localgw != null) { // can only happen inside service vm String mgmtHost = (String) params.get("host"); @@ -1557,12 +1556,12 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements } } - - startAdditionalServices(); - _params.put("install.numthreads", "50"); - _params.put("secondary.storage.vm", "true"); + + startAdditionalServices(); + _params.put("install.numthreads", "50"); + _params.put("secondary.storage.vm", "true"); } - + try { _params.put(StorageLayer.InstanceConfigKey, _storage); _dlMgr = new DownloadManagerImpl(); @@ -1575,114 +1574,114 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements } return true; } - + private void startAdditionalServices() { - Script command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("if [ -f /etc/init.d/ssh ]; then service ssh restart; else service sshd restart; fi "); - String result = command.execute(); - if (result != null) { - s_logger.warn("Error in starting sshd service err=" + result ); - } - command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("iptables -I INPUT -i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT"); - result = command.execute(); - if (result != null) { - s_logger.warn("Error in opening up ssh port err=" + result ); - } - } - - private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) { - s_logger.debug("addRouteToInternalIp: localgw=" + localgw + ", eth1ip=" + eth1ip + ", eth1mask=" + eth1mask + ",destIp=" + destIpOrCidr); - if (destIpOrCidr == null) { - s_logger.debug("addRouteToInternalIp: destIp is null"); - return; - } - if (!NetUtils.isValidIp(destIpOrCidr) && !NetUtils.isValidCIDR(destIpOrCidr)){ - s_logger.warn(" destIp is not a valid ip address or cidr destIp=" + destIpOrCidr); - return; - } - boolean inSameSubnet = false; - if (NetUtils.isValidIp(destIpOrCidr)) { - if (eth1ip != null && eth1mask != null) { - inSameSubnet = NetUtils.sameSubnet(eth1ip, destIpOrCidr, eth1mask); - } else { - s_logger.warn("addRouteToInternalIp: unable to determine same subnet: _eth1ip=" + eth1ip + ", dest ip=" + destIpOrCidr + ", _eth1mask=" + eth1mask); - } - } else { - inSameSubnet = NetUtils.isNetworkAWithinNetworkB(destIpOrCidr, NetUtils.ipAndNetMaskToCidr(eth1ip, eth1mask)); - } - if (inSameSubnet) { - s_logger.debug("addRouteToInternalIp: dest ip " + destIpOrCidr + " is in the same subnet as eth1 ip " + eth1ip); - return; - } - Script command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("ip route delete " + destIpOrCidr); - command.execute(); - command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("ip route add " + destIpOrCidr + " via " + localgw); - String result = command.execute(); - if (result != null) { - s_logger.warn("Error in configuring route to internal ip err=" + result ); - } else { - s_logger.debug("addRouteToInternalIp: added route to internal ip=" + destIpOrCidr + " via " + localgw); - } + Script command = new Script("/bin/bash", s_logger); + command.add("-c"); + command.add("if [ -f /etc/init.d/ssh ]; then service ssh restart; else service sshd restart; fi "); + String result = command.execute(); + if (result != null) { + s_logger.warn("Error in starting sshd service err=" + result ); + } + command = new Script("/bin/bash", s_logger); + command.add("-c"); + command.add("iptables -I INPUT -i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT"); + result = command.execute(); + if (result != null) { + s_logger.warn("Error in opening up ssh port err=" + result ); + } } - private void configureSSL() { - Script command = new Script(_configSslScr); - command.add("-i", _publicIp); - command.add("-h", _hostname); - String result = command.execute(); - if (result != null) { - s_logger.warn("Unable to configure httpd to use ssl"); - } - } - - private void configureSSL(String prvkeyPath, String prvCertPath, String certChainPath) { - Script command = new Script(_configSslScr); - command.add("-i", _publicIp); - command.add("-h", _hostname); - command.add("-k", prvkeyPath); - command.add("-p", prvCertPath); - if (certChainPath != null) { - command.add("-t", certChainPath); - } - String result = command.execute(); - if (result != null) { - s_logger.warn("Unable to configure httpd to use ssl"); - } - } - - private String configureAuth(String user, String passwd) { - Script command = new Script(_configAuthScr); - command.add(user); - command.add(passwd); - String result = command.execute(); - if (result != null) { - s_logger.warn("Unable to configure httpd to use auth"); - } - return result; - } - - private String configureIpFirewall(List ipList, boolean isAppend){ - Script command = new Script(_configIpFirewallScr); - command.add(String.valueOf(isAppend)); - for (String ip : ipList){ - command.add(ip); - } - - String result = command.execute(); - if (result != null) { - s_logger.warn("Unable to configure firewall for command : " +command); - } - return result; - } - - protected String mount(String root, String nfsPath) { + private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) { + s_logger.debug("addRouteToInternalIp: localgw=" + localgw + ", eth1ip=" + eth1ip + ", eth1mask=" + eth1mask + ",destIp=" + destIpOrCidr); + if (destIpOrCidr == null) { + s_logger.debug("addRouteToInternalIp: destIp is null"); + return; + } + if (!NetUtils.isValidIp(destIpOrCidr) && !NetUtils.isValidCIDR(destIpOrCidr)){ + s_logger.warn(" destIp is not a valid ip address or cidr destIp=" + destIpOrCidr); + return; + } + boolean inSameSubnet = false; + if (NetUtils.isValidIp(destIpOrCidr)) { + if (eth1ip != null && eth1mask != null) { + inSameSubnet = NetUtils.sameSubnet(eth1ip, destIpOrCidr, eth1mask); + } else { + s_logger.warn("addRouteToInternalIp: unable to determine same subnet: _eth1ip=" + eth1ip + ", dest ip=" + destIpOrCidr + ", _eth1mask=" + eth1mask); + } + } else { + inSameSubnet = NetUtils.isNetworkAWithinNetworkB(destIpOrCidr, NetUtils.ipAndNetMaskToCidr(eth1ip, eth1mask)); + } + if (inSameSubnet) { + s_logger.debug("addRouteToInternalIp: dest ip " + destIpOrCidr + " is in the same subnet as eth1 ip " + eth1ip); + return; + } + Script command = new Script("/bin/bash", s_logger); + command.add("-c"); + command.add("ip route delete " + destIpOrCidr); + command.execute(); + command = new Script("/bin/bash", s_logger); + command.add("-c"); + command.add("ip route add " + destIpOrCidr + " via " + localgw); + String result = command.execute(); + if (result != null) { + s_logger.warn("Error in configuring route to internal ip err=" + result ); + } else { + s_logger.debug("addRouteToInternalIp: added route to internal ip=" + destIpOrCidr + " via " + localgw); + } + } + + private void configureSSL() { + Script command = new Script(_configSslScr); + command.add("-i", _publicIp); + command.add("-h", _hostname); + String result = command.execute(); + if (result != null) { + s_logger.warn("Unable to configure httpd to use ssl"); + } + } + + private void configureSSL(String prvkeyPath, String prvCertPath, String certChainPath) { + Script command = new Script(_configSslScr); + command.add("-i", _publicIp); + command.add("-h", _hostname); + command.add("-k", prvkeyPath); + command.add("-p", prvCertPath); + if (certChainPath != null) { + command.add("-t", certChainPath); + } + String result = command.execute(); + if (result != null) { + s_logger.warn("Unable to configure httpd to use ssl"); + } + } + + private String configureAuth(String user, String passwd) { + Script command = new Script(_configAuthScr); + command.add(user); + command.add(passwd); + String result = command.execute(); + if (result != null) { + s_logger.warn("Unable to configure httpd to use auth"); + } + return result; + } + + private String configureIpFirewall(List ipList, boolean isAppend){ + Script command = new Script(_configIpFirewallScr); + command.add(String.valueOf(isAppend)); + for (String ip : ipList){ + command.add(ip); + } + + String result = command.execute(); + if (result != null) { + s_logger.warn("Unable to configure firewall for command : " +command); + } + return result; + } + + protected String mount(String root, String nfsPath) { File file = new File(root); if (!file.exists()) { if (_storage.mkdir(root)) { @@ -1691,8 +1690,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements s_logger.debug("Unable to create mount point: " + root); return null; } - } - + } + Script script = null; String result = null; script = new Script(!_inSystemVM, "mount", _timeout, s_logger); @@ -1705,12 +1704,12 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements return root; } } - + Script command = new Script(!_inSystemVM, "mount", _timeout, s_logger); command.add("-t", "nfs"); if (_inSystemVM) { - //Fedora Core 12 errors out with any -o option executed from java - command.add("-o", "soft,timeo=133,retrans=2147483647,tcp,acdirmax=0,acdirmin=0"); + //Fedora Core 12 errors out with any -o option executed from java + command.add("-o", "soft,timeo=133,retrans=2147483647,tcp,acdirmax=0,acdirmin=0"); } command.add(nfsPath); command.add(root); @@ -1719,23 +1718,23 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements s_logger.warn("Unable to mount " + nfsPath + " due to " + result); file = new File(root); if (file.exists()) - file.delete(); + file.delete(); return null; } - + // XXX: Adding the check for creation of snapshots dir here. Might have to move it somewhere more logical later. if (!checkForSnapshotsDir(root)) { - return null; + return null; } - + // Create the volumes dir if (!checkForVolumesDir(root)) { - return null; + return null; } - + return root; } - + @Override public boolean start() { return true; @@ -1748,12 +1747,12 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements @Override public StartupCommand[] initialize() { - + final StartupSecondaryStorageCommand cmd = new StartupSecondaryStorageCommand(); fillNetworkInformation(cmd); if(_publicIp != null) cmd.setPublicIpAddress(_publicIp); - + Script command = new Script("/bin/bash", s_logger); command.add("-c"); command.add("ln -sf " + _parent + " /var/www/html/copy"); @@ -1769,38 +1768,38 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements String snapshotsDirLocation = mountPoint + File.separator + "snapshots"; return createDir("snapshots", snapshotsDirLocation, mountPoint); } - - protected boolean checkForVolumesDir(String mountPoint) { - String volumesDirLocation = mountPoint + "/" + "volumes"; - return createDir("volumes", volumesDirLocation, mountPoint); - } - - protected boolean createDir(String dirName, String dirLocation, String mountPoint) { - boolean dirExists = false; - - File dir = new File(dirLocation); - if (dir.exists()) { - if (dir.isDirectory()) { - s_logger.debug(dirName + " already exists on secondary storage, and is mounted at " + mountPoint); - dirExists = true; - } else { - if (dir.delete() && _storage.mkdir(dirLocation)) { - dirExists = true; - } - } - } else if (_storage.mkdir(dirLocation)) { - dirExists = true; - } - if (dirExists) { - s_logger.info(dirName + " directory created/exists on Secondary Storage."); - } else { - s_logger.info(dirName + " directory does not exist on Secondary Storage."); - } - - return dirExists; + protected boolean checkForVolumesDir(String mountPoint) { + String volumesDirLocation = mountPoint + "/" + "volumes"; + return createDir("volumes", volumesDirLocation, mountPoint); } - + + protected boolean createDir(String dirName, String dirLocation, String mountPoint) { + boolean dirExists = false; + + File dir = new File(dirLocation); + if (dir.exists()) { + if (dir.isDirectory()) { + s_logger.debug(dirName + " already exists on secondary storage, and is mounted at " + mountPoint); + dirExists = true; + } else { + if (dir.delete() && _storage.mkdir(dirLocation)) { + dirExists = true; + } + } + } else if (_storage.mkdir(dirLocation)) { + dirExists = true; + } + + if (dirExists) { + s_logger.info(dirName + " directory created/exists on Secondary Storage."); + } else { + s_logger.info(dirName + " directory does not exist on Secondary Storage."); + } + + return dirExists; + } + @Override protected String getDefaultScriptsDir() { return "./scripts/storage/secondary"; diff --git a/core/src/com/cloud/storage/template/DownloadManagerImpl.java b/core/src/com/cloud/storage/template/DownloadManagerImpl.java index f8b075d4a06..d5ed71e8459 100755 --- a/core/src/com/cloud/storage/template/DownloadManagerImpl.java +++ b/core/src/com/cloud/storage/template/DownloadManagerImpl.java @@ -29,8 +29,8 @@ import java.security.NoSuchAlgorithmException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; -import java.util.Enumeration; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.UUID; @@ -43,7 +43,6 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; -import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.DownloadAnswer; import com.cloud.agent.api.storage.DownloadCommand; import com.cloud.agent.api.storage.DownloadCommand.Proxy; @@ -60,10 +59,6 @@ import com.cloud.storage.template.Processor.FormatInfo; import com.cloud.storage.template.TemplateDownloader.DownloadCompleteCallback; import com.cloud.storage.template.TemplateDownloader.Status; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.component.Adapter; -import com.cloud.utils.component.Adapters; -import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.component.LegacyComponentLocator.ComponentInfo; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; @@ -72,7 +67,7 @@ import com.cloud.utils.script.Script; public class DownloadManagerImpl implements DownloadManager { private String _name; StorageLayer _storage; - Adapters _processors; + Map _processors; public class Completion implements DownloadCompleteCallback { private final String jobId; @@ -94,14 +89,14 @@ public class DownloadManagerImpl implements DownloadManager { private final boolean hvm; private final ImageFormat format; private String tmpltPath; - private String description; + private final String description; private String checksum; - private Long accountId; - private String installPathPrefix; + private final Long accountId; + private final String installPathPrefix; private long templatesize; private long templatePhysicalSize; - private long id; - private ResourceType resourceType; + private final long id; + private final ResourceType resourceType; public DownloadJob(TemplateDownloader td, String jobId, long id, String tmpltName, ImageFormat format, boolean hvm, Long accountId, String descr, String cksum, String installPathPrefix, ResourceType resourceType) { super(); @@ -160,10 +155,10 @@ public class DownloadManagerImpl implements DownloadManager { } public ResourceType getResourceType() { - return resourceType; - } + return resourceType; + } - public void setTmpltPath(String tmpltPath) { + public void setTmpltPath(String tmpltPath) { this.tmpltPath = tmpltPath; } @@ -205,9 +200,9 @@ public class DownloadManagerImpl implements DownloadManager { public long getTemplatePhysicalSize() { return templatePhysicalSize; } - + public void setCheckSum(String checksum) { - this.checksum = checksum; + this.checksum = checksum; } } @@ -216,7 +211,7 @@ public class DownloadManagerImpl implements DownloadManager { private String _volumeDir; private String createTmpltScr; private String createVolScr; - private Adapters processors; + private List processors; private ExecutorService threadPool; @@ -278,9 +273,9 @@ public class DownloadManagerImpl implements DownloadManager { break; } } - + private String computeCheckSum(File f) { - byte[] buffer = new byte[8192]; + byte[] buffer = new byte[8192]; int read = 0; MessageDigest digest; String checksum = null; @@ -296,16 +291,16 @@ public class DownloadManagerImpl implements DownloadManager { checksum = String.format("%032x",bigInt); return checksum; }catch(IOException e) { - return null; + return null; }catch (NoSuchAlgorithmException e) { - return null; + return null; } finally { try { - if(is != null) - is.close(); + if(is != null) + is.close(); } catch (IOException e) { - return null; + return null; } } } @@ -320,17 +315,17 @@ public class DownloadManagerImpl implements DownloadManager { TemplateDownloader td = dnld.getTemplateDownloader(); String resourcePath = null; ResourceType resourceType = dnld.getResourceType(); - + // once template path is set, remove the parent dir so that the template is installed with a relative path String finalResourcePath = ""; if (resourceType == ResourceType.TEMPLATE){ - finalResourcePath += _templateDir + File.separator + dnld.getAccountId() + File.separator + dnld.getId() + File.separator; - resourcePath = dnld.getInstallPathPrefix() + dnld.getAccountId() + File.separator + dnld.getId() + File.separator;// dnld.getTmpltName(); + finalResourcePath += _templateDir + File.separator + dnld.getAccountId() + File.separator + dnld.getId() + File.separator; + resourcePath = dnld.getInstallPathPrefix() + dnld.getAccountId() + File.separator + dnld.getId() + File.separator;// dnld.getTmpltName(); }else { - finalResourcePath += _volumeDir + File.separator + dnld.getId() + File.separator; - resourcePath = dnld.getInstallPathPrefix() + dnld.getId() + File.separator;// dnld.getTmpltName(); + finalResourcePath += _volumeDir + File.separator + dnld.getId() + File.separator; + resourcePath = dnld.getInstallPathPrefix() + dnld.getId() + File.separator;// dnld.getTmpltName(); } - + _storage.mkdirs(resourcePath); dnld.setTmpltPath(finalResourcePath); @@ -389,9 +384,9 @@ public class DownloadManagerImpl implements DownloadManager { // Set permissions for template/volume.properties String propertiesFile = resourcePath; if (resourceType == ResourceType.TEMPLATE){ - propertiesFile += "/template.properties"; + propertiesFile += "/template.properties"; }else{ - propertiesFile += "/volume.properties"; + propertiesFile += "/volume.properties"; } File templateProperties = new File(propertiesFile); _storage.setWorldReadableAndWriteable(templateProperties); @@ -405,9 +400,9 @@ public class DownloadManagerImpl implements DownloadManager { return "Unable to download due to " + e.getMessage(); } - Enumeration en = _processors.enumeration(); - while (en.hasMoreElements()) { - Processor processor = en.nextElement(); + Iterator en = _processors.values().iterator(); + while (en.hasNext()) { + Processor processor = en.next(); FormatInfo info = null; try { @@ -423,7 +418,7 @@ public class DownloadManagerImpl implements DownloadManager { break; } } - + if (!loc.save()) { s_logger.warn("Cleaning up because we're unable to save the formats"); loc.purge(); @@ -450,9 +445,9 @@ public class DownloadManagerImpl implements DownloadManager { String jobId = uuid.toString(); String tmpDir = ""; if(resourceType == ResourceType.TEMPLATE){ - tmpDir = installPathPrefix + File.separator + accountId + File.separator + id; + tmpDir = installPathPrefix + File.separator + accountId + File.separator + id; }else { - tmpDir = installPathPrefix + File.separator + id; + tmpDir = installPathPrefix + File.separator + id; } try { @@ -463,7 +458,7 @@ public class DownloadManagerImpl implements DownloadManager { } // TO DO - define constant for volume properties. File file = ResourceType.TEMPLATE == resourceType ? _storage.getFile(tmpDir + File.separator + TemplateLocation.Filename) : - _storage.getFile(tmpDir + File.separator + "volume.properties"); + _storage.getFile(tmpDir + File.separator + "volume.properties"); if ( file.exists() ) { file.delete(); } @@ -524,9 +519,9 @@ public class DownloadManagerImpl implements DownloadManager { } return 0; } - + public String getDownloadCheckSum(String jobId) { - DownloadJob dj = jobs.get(jobId); + DownloadJob dj = jobs.get(jobId); if (dj != null) { return dj.getChecksum(); } @@ -589,7 +584,7 @@ public class DownloadManagerImpl implements DownloadManager { @Override public DownloadAnswer handleDownloadCommand(SecondaryStorageResource resource, DownloadCommand cmd) { - ResourceType resourceType = cmd.getResourceType(); + ResourceType resourceType = cmd.getResourceType(); if (cmd instanceof DownloadProgressCommand) { return handleDownloadProgressCmd( resource, (DownloadProgressCommand) cmd); } @@ -604,9 +599,9 @@ public class DownloadManagerImpl implements DownloadManager { String installPathPrefix = null; if (ResourceType.TEMPLATE == resourceType){ - installPathPrefix = resource.getRootDir(cmd) + File.separator + _templateDir; + installPathPrefix = resource.getRootDir(cmd) + File.separator + _templateDir; }else { - installPathPrefix = resource.getRootDir(cmd) + File.separator + _volumeDir; + installPathPrefix = resource.getRootDir(cmd) + File.separator + _volumeDir; } String user = null; @@ -693,10 +688,10 @@ public class DownloadManagerImpl implements DownloadManager { } - + private List listVolumes(String rootdir) { List result = new ArrayList(); - + Script script = new Script(listVolScr, s_logger); script.add("-r", rootdir); ZfsPathParser zpp = new ZfsPathParser(rootdir); @@ -705,12 +700,12 @@ public class DownloadManagerImpl implements DownloadManager { s_logger.info("found " + zpp.getPaths().size() + " volumes" + zpp.getPaths()); return result; } - - - + + + private List listTemplates(String rootdir) { List result = new ArrayList(); - + Script script = new Script(listTmpltScr, s_logger); script.add("-r", rootdir); ZfsPathParser zpp = new ZfsPathParser(rootdir); @@ -724,11 +719,11 @@ public class DownloadManagerImpl implements DownloadManager { public Map gatherTemplateInfo(String rootDir) { Map result = new HashMap(); String templateDir = rootDir + File.separator + _templateDir; - + if (! _storage.exists(templateDir)) { _storage.mkdirs(templateDir); } - + List publicTmplts = listTemplates(templateDir); for (String tmplt : publicTmplts) { String path = tmplt.substring(0, tmplt.lastIndexOf(File.separator)); @@ -746,18 +741,18 @@ public class DownloadManagerImpl implements DownloadManager { } TemplateInfo tInfo = loc.getTemplateInfo(); - + if ((tInfo.size == tInfo.physicalSize) && (tInfo.installPath.endsWith(ImageFormat.OVA.getFileExtension()))) { - try { - Processor processor = _processors.get("VMDK Processor"); - VmdkProcessor vmdkProcessor = (VmdkProcessor)processor; - long vSize = vmdkProcessor.getTemplateVirtualSize(path, tInfo.installPath.substring(tInfo.installPath.lastIndexOf(File.separator) + 1)); - tInfo.size = vSize; - loc.updateVirtualSize(vSize); - loc.save(); - } catch (Exception e) { - s_logger.error("Unable to get the virtual size of the template: " + tInfo.installPath + " due to " + e.getMessage()); - } + try { + Processor processor = _processors.get("VMDK Processor"); + VmdkProcessor vmdkProcessor = (VmdkProcessor)processor; + long vSize = vmdkProcessor.getTemplateVirtualSize(path, tInfo.installPath.substring(tInfo.installPath.lastIndexOf(File.separator) + 1)); + tInfo.size = vSize; + loc.updateVirtualSize(vSize); + loc.save(); + } catch (Exception e) { + s_logger.error("Unable to get the virtual size of the template: " + tInfo.installPath + " due to " + e.getMessage()); + } } result.put(tInfo.templateName, tInfo); @@ -777,52 +772,52 @@ public class DownloadManagerImpl implements DownloadManager { return result; } - @Override - public Map gatherVolumeInfo(String rootDir) { - Map result = new HashMap(); - String volumeDir = rootDir + File.separator + _volumeDir; - - if (! _storage.exists(volumeDir)) { - _storage.mkdirs(volumeDir); - } - - List vols = listVolumes(volumeDir); - for (String vol : vols) { - String path = vol.substring(0, vol.lastIndexOf(File.separator)); - TemplateLocation loc = new TemplateLocation(_storage, path); - try { - if (!loc.load()) { - s_logger.warn("Post download installation was not completed for " + path); - //loc.purge(); - _storage.cleanup(path, volumeDir); - continue; - } - } catch (IOException e) { - s_logger.warn("Unable to load volume location " + path, e); - continue; - } + @Override + public Map gatherVolumeInfo(String rootDir) { + Map result = new HashMap(); + String volumeDir = rootDir + File.separator + _volumeDir; - TemplateInfo vInfo = loc.getTemplateInfo(); - - if ((vInfo.size == vInfo.physicalSize) && (vInfo.installPath.endsWith(ImageFormat.OVA.getFileExtension()))) { - try { - Processor processor = _processors.get("VMDK Processor"); - VmdkProcessor vmdkProcessor = (VmdkProcessor)processor; - long vSize = vmdkProcessor.getTemplateVirtualSize(path, vInfo.installPath.substring(vInfo.installPath.lastIndexOf(File.separator) + 1)); - vInfo.size = vSize; - loc.updateVirtualSize(vSize); - loc.save(); - } catch (Exception e) { - s_logger.error("Unable to get the virtual size of the volume: " + vInfo.installPath + " due to " + e.getMessage()); - } - } + if (! _storage.exists(volumeDir)) { + _storage.mkdirs(volumeDir); + } + + List vols = listVolumes(volumeDir); + for (String vol : vols) { + String path = vol.substring(0, vol.lastIndexOf(File.separator)); + TemplateLocation loc = new TemplateLocation(_storage, path); + try { + if (!loc.load()) { + s_logger.warn("Post download installation was not completed for " + path); + //loc.purge(); + _storage.cleanup(path, volumeDir); + continue; + } + } catch (IOException e) { + s_logger.warn("Unable to load volume location " + path, e); + continue; + } + + TemplateInfo vInfo = loc.getTemplateInfo(); + + if ((vInfo.size == vInfo.physicalSize) && (vInfo.installPath.endsWith(ImageFormat.OVA.getFileExtension()))) { + try { + Processor processor = _processors.get("VMDK Processor"); + VmdkProcessor vmdkProcessor = (VmdkProcessor)processor; + long vSize = vmdkProcessor.getTemplateVirtualSize(path, vInfo.installPath.substring(vInfo.installPath.lastIndexOf(File.separator) + 1)); + vInfo.size = vSize; + loc.updateVirtualSize(vSize); + loc.save(); + } catch (Exception e) { + s_logger.error("Unable to get the virtual size of the volume: " + vInfo.installPath + " due to " + e.getMessage()); + } + } + + result.put(vInfo.getId(), vInfo); + s_logger.debug("Added volume name: " + vInfo.templateName + ", path: " + vol); + } + return result; + } - result.put(vInfo.getId(), vInfo); - s_logger.debug("Added volume name: " + vInfo.templateName + ", path: " + vol); - } - return result; - } - private int deleteDownloadDirectories(File downloadPath, int deleted) { try { if (downloadPath.exists()) { @@ -881,7 +876,7 @@ public class DownloadManagerImpl implements DownloadManager { String value = null; - _storage = (StorageLayer) params.get(StorageLayer.InstanceConfigKey); + _storage = (StorageLayer)params.get(StorageLayer.InstanceConfigKey); if (_storage == null) { value = (String) params.get(StorageLayer.ClassConfigKey); if (value == null) { @@ -891,10 +886,14 @@ public class DownloadManagerImpl implements DownloadManager { Class clazz; try { clazz = (Class) Class.forName(value); + _storage = clazz.newInstance(); } catch (ClassNotFoundException e) { throw new ConfigurationException("Unable to instantiate " + value); + } catch (InstantiationException e) { + throw new ConfigurationException("Unable to instantiate " + value); + } catch (IllegalAccessException e) { + throw new ConfigurationException("Unable to instantiate " + value); } - _storage = ComponentLocator.inject(clazz); } String useSsl = (String)params.get("sslcopy"); if (useSsl != null) { @@ -943,29 +942,27 @@ public class DownloadManagerImpl implements DownloadManager { } s_logger.info("createvolume.sh found in " + createVolScr); - List> processors = new ArrayList>(); + _processors = new HashMap(); Processor processor = new VhdProcessor(); processor.configure("VHD Processor", params); - processors.add(new ComponentInfo("VHD Processor", VhdProcessor.class, processor)); + _processors.put("VHD Processor", processor); processor = new IsoProcessor(); processor.configure("ISO Processor", params); - processors.add(new ComponentInfo("ISO Processor", IsoProcessor.class, processor)); + _processors.put("ISO Processor", processor); processor = new QCOW2Processor(); processor.configure("QCOW2 Processor", params); - processors.add(new ComponentInfo("QCOW2 Processor", QCOW2Processor.class, processor)); + _processors.put("QCOW2 Processor", processor); processor = new VmdkProcessor(); processor.configure("VMDK Processor", params); - processors.add(new ComponentInfo("VMDK Processor", VmdkProcessor.class, processor)); + _processors.put("VMDK Processor", processor); processor = new RawImageProcessor(); processor.configure("Raw Image Processor", params); - processors.add(new ComponentInfo("Raw Image Processor", RawImageProcessor.class, processor)); - - _processors = new Adapters("processors", processors); + _processors.put("Raw Image Processor", processor); _templateDir = (String) params.get("public.templates.root.dir"); if (_templateDir == null) { @@ -1047,5 +1044,5 @@ public class DownloadManagerImpl implements DownloadManager { return; } } - + } diff --git a/core/src/com/cloud/storage/template/UploadManagerImpl.java b/core/src/com/cloud/storage/template/UploadManagerImpl.java index 2dd1751aeaa..0de1c6ccf67 100755 --- a/core/src/com/cloud/storage/template/UploadManagerImpl.java +++ b/core/src/com/cloud/storage/template/UploadManagerImpl.java @@ -21,6 +21,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.text.SimpleDateFormat; import java.util.Date; +import java.util.List; import java.util.Map; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; @@ -46,15 +47,13 @@ import com.cloud.storage.resource.SecondaryStorageResource; import com.cloud.storage.template.TemplateUploader.Status; import com.cloud.storage.template.TemplateUploader.UploadCompleteCallback; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.component.Adapters; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; public class UploadManagerImpl implements UploadManager { - public class Completion implements UploadCompleteCallback { + public class Completion implements UploadCompleteCallback { private final String jobId; public Completion(String jobId) { @@ -66,180 +65,180 @@ public class UploadManagerImpl implements UploadManager { setUploadStatus(jobId, status); } } - - private static class UploadJob { - private final TemplateUploader tu; - private final String jobId; - private final String name; - private final ImageFormat format; - private String tmpltPath; - private String description; - private String checksum; - private Long accountId; - private String installPathPrefix; - private long templatesize; - private long id; - public UploadJob(TemplateUploader tu, String jobId, long id, String name, ImageFormat format, boolean hvm, Long accountId, String descr, String cksum, String installPathPrefix) { - super(); - this.tu = tu; - this.jobId = jobId; - this.name = name; - this.format = format; - this.accountId = accountId; - this.description = descr; - this.checksum = cksum; - this.installPathPrefix = installPathPrefix; - this.templatesize = 0; - this.id = id; - } + private static class UploadJob { + private final TemplateUploader tu; + private final String jobId; + private final String name; + private final ImageFormat format; + private String tmpltPath; + private String description; + private String checksum; + private Long accountId; + private String installPathPrefix; + private long templatesize; + private long id; - public TemplateUploader getTd() { - return tu; - } + public UploadJob(TemplateUploader tu, String jobId, long id, String name, ImageFormat format, boolean hvm, Long accountId, String descr, String cksum, String installPathPrefix) { + super(); + this.tu = tu; + this.jobId = jobId; + this.name = name; + this.format = format; + this.accountId = accountId; + this.description = descr; + this.checksum = cksum; + this.installPathPrefix = installPathPrefix; + this.templatesize = 0; + this.id = id; + } - public String getDescription() { - return description; - } + public TemplateUploader getTd() { + return tu; + } - public String getChecksum() { - return checksum; - } + public String getDescription() { + return description; + } - public UploadJob(TemplateUploader td, String jobId, UploadCommand cmd) { - this.tu = td; - this.jobId = jobId; - this.name = cmd.getName(); - this.format = cmd.getFormat(); - } + public String getChecksum() { + return checksum; + } - public TemplateUploader getTemplateUploader() { - return tu; - } + public UploadJob(TemplateUploader td, String jobId, UploadCommand cmd) { + this.tu = td; + this.jobId = jobId; + this.name = cmd.getName(); + this.format = cmd.getFormat(); + } - public String getJobId() { - return jobId; - } + public TemplateUploader getTemplateUploader() { + return tu; + } - public String getTmpltName() { - return name; - } + public String getJobId() { + return jobId; + } - public ImageFormat getFormat() { - return format; - } + public String getTmpltName() { + return name; + } - public Long getAccountId() { - return accountId; - } + public ImageFormat getFormat() { + return format; + } - public long getId() { - return id; - } + public Long getAccountId() { + return accountId; + } - public void setTmpltPath(String tmpltPath) { - this.tmpltPath = tmpltPath; - } + public long getId() { + return id; + } - public String getTmpltPath() { - return tmpltPath; - } + public void setTmpltPath(String tmpltPath) { + this.tmpltPath = tmpltPath; + } - public String getInstallPathPrefix() { - return installPathPrefix; - } + public String getTmpltPath() { + return tmpltPath; + } - public void cleanup() { - if (tu != null) { - String upldPath = tu.getUploadLocalPath(); - if (upldPath != null) { - File f = new File(upldPath); - f.delete(); - } - } - } + public String getInstallPathPrefix() { + return installPathPrefix; + } - public void setTemplatesize(long templatesize) { - this.templatesize = templatesize; - } + public void cleanup() { + if (tu != null) { + String upldPath = tu.getUploadLocalPath(); + if (upldPath != null) { + File f = new File(upldPath); + f.delete(); + } + } + } + + public void setTemplatesize(long templatesize) { + this.templatesize = templatesize; + } + + public long getTemplatesize() { + return templatesize; + } + } + public static final Logger s_logger = Logger.getLogger(UploadManagerImpl.class); + private ExecutorService threadPool; + private final Map jobs = new ConcurrentHashMap(); + private String parentDir; + private List _processors; + private String publicTemplateRepo; + private final String extractMountPoint = "/mnt/SecStorage/extractmnt"; + private StorageLayer _storage; + private int installTimeoutPerGig; + private boolean _sslCopy; + private String _name; + private boolean hvm; + + + @Override + public String uploadPublicTemplate(long id, String url, String name, + ImageFormat format, Long accountId, String descr, + String cksum, String installPathPrefix, String userName, + String passwd, long templateSizeInBytes) { - public long getTemplatesize() { - return templatesize; - } - } - public static final Logger s_logger = Logger.getLogger(UploadManagerImpl.class); - private ExecutorService threadPool; - private final Map jobs = new ConcurrentHashMap(); - private String parentDir; - private Adapters _processors; - private String publicTemplateRepo; - private String extractMountPoint = "/mnt/SecStorage/extractmnt"; - private StorageLayer _storage; - private int installTimeoutPerGig; - private boolean _sslCopy; - private String _name; - private boolean hvm; - - - @Override - public String uploadPublicTemplate(long id, String url, String name, - ImageFormat format, Long accountId, String descr, - String cksum, String installPathPrefix, String userName, - String passwd, long templateSizeInBytes) { - UUID uuid = UUID.randomUUID(); String jobId = uuid.toString(); String completePath = parentDir + File.separator + installPathPrefix; s_logger.debug("Starting upload from " + completePath); - + URI uri; - try { - uri = new URI(url); - } catch (URISyntaxException e) { - s_logger.error("URI is incorrect: " + url); - throw new CloudRuntimeException("URI is incorrect: " + url); - } - TemplateUploader tu; - if ((uri != null) && (uri.getScheme() != null)) { - if (uri.getScheme().equalsIgnoreCase("ftp")) { - tu = new FtpTemplateUploader(completePath, url, new Completion(jobId), templateSizeInBytes); - } else { - s_logger.error("Scheme is not supported " + url); - throw new CloudRuntimeException("Scheme is not supported " + url); - } - } else { - s_logger.error("Unable to download from URL: " + url); - throw new CloudRuntimeException("Unable to download from URL: " + url); - } - UploadJob uj = new UploadJob(tu, jobId, id, name, format, hvm, accountId, descr, cksum, installPathPrefix); - jobs.put(jobId, uj); - threadPool.execute(tu); + try { + uri = new URI(url); + } catch (URISyntaxException e) { + s_logger.error("URI is incorrect: " + url); + throw new CloudRuntimeException("URI is incorrect: " + url); + } + TemplateUploader tu; + if ((uri != null) && (uri.getScheme() != null)) { + if (uri.getScheme().equalsIgnoreCase("ftp")) { + tu = new FtpTemplateUploader(completePath, url, new Completion(jobId), templateSizeInBytes); + } else { + s_logger.error("Scheme is not supported " + url); + throw new CloudRuntimeException("Scheme is not supported " + url); + } + } else { + s_logger.error("Unable to download from URL: " + url); + throw new CloudRuntimeException("Unable to download from URL: " + url); + } + UploadJob uj = new UploadJob(tu, jobId, id, name, format, hvm, accountId, descr, cksum, installPathPrefix); + jobs.put(jobId, uj); + threadPool.execute(tu); - return jobId; - - } + return jobId; - @Override - public String getUploadError(String jobId) { + } + + @Override + public String getUploadError(String jobId) { UploadJob uj = jobs.get(jobId); if (uj != null) { return uj.getTemplateUploader().getUploadError(); } return null; - } + } - @Override - public int getUploadPct(String jobId) { - UploadJob uj = jobs.get(jobId); + @Override + public int getUploadPct(String jobId) { + UploadJob uj = jobs.get(jobId); if (uj != null) { return uj.getTemplateUploader().getUploadPercent(); } return 0; - } + } - @Override - public Status getUploadStatus(String jobId) { + @Override + public Status getUploadStatus(String jobId) { UploadJob job = jobs.get(jobId); if (job != null) { TemplateUploader tu = job.getTemplateUploader(); @@ -248,8 +247,8 @@ public class UploadManagerImpl implements UploadManager { } } return Status.UNKNOWN; - } - + } + public static UploadVO.Status convertStatus(Status tds) { switch (tds) { case ABORTED: @@ -277,11 +276,11 @@ public class UploadManagerImpl implements UploadManager { public com.cloud.storage.UploadVO.Status getUploadStatus2(String jobId) { return convertStatus(getUploadStatus(jobId)); } - @Override - public String getPublicTemplateRepo() { - // TODO Auto-generated method stub - return null; - } + @Override + public String getPublicTemplateRepo() { + // TODO Auto-generated method stub + return null; + } private UploadAnswer handleUploadProgressCmd(UploadProgressCommand cmd) { String jobId = cmd.getJobId(); @@ -290,7 +289,7 @@ public class UploadManagerImpl implements UploadManager { if (jobId != null) uj = jobs.get(jobId); if (uj == null) { - return new UploadAnswer(null, 0, "Cannot find job", com.cloud.storage.UploadVO.Status.UNKNOWN, "", "", 0); + return new UploadAnswer(null, 0, "Cannot find job", com.cloud.storage.UploadVO.Status.UNKNOWN, "", "", 0); } TemplateUploader td = uj.getTemplateUploader(); switch (cmd.getRequest()) { @@ -300,7 +299,7 @@ public class UploadManagerImpl implements UploadManager { td.stopUpload(); sleep(); break; - /*case RESTART: + /*case RESTART: td.stopUpload(); sleep(); threadPool.execute(td); @@ -316,10 +315,10 @@ public class UploadManagerImpl implements UploadManager { return new UploadAnswer(jobId, getUploadPct(jobId), getUploadError(jobId), getUploadStatus2(jobId), getUploadLocalPath(jobId), getInstallPath(jobId), getUploadTemplateSize(jobId)); } - + @Override public UploadAnswer handleUploadCommand(SecondaryStorageResource resource, UploadCommand cmd) { - s_logger.warn("Handling the upload " +cmd.getInstallPath() + " " + cmd.getId()); + s_logger.warn("Handling the upload " +cmd.getInstallPath() + " " + cmd.getId()); if (cmd instanceof UploadProgressCommand) { return handleUploadProgressCmd((UploadProgressCommand) cmd); } @@ -327,9 +326,9 @@ public class UploadManagerImpl implements UploadManager { String user = null; String password = null; String jobId = uploadPublicTemplate(cmd.getId(), cmd.getUrl(), cmd.getName(), - cmd.getFormat(), cmd.getAccountId(), cmd.getDescription(), - cmd.getChecksum(), cmd.getInstallPath(), user, password, - cmd.getTemplateSizeInBytes()); + cmd.getFormat(), cmd.getAccountId(), cmd.getDescription(), + cmd.getChecksum(), cmd.getInstallPath(), user, password, + cmd.getTemplateSizeInBytes()); sleep(); if (jobId == null) { return new UploadAnswer(null, 0, "Internal Error", com.cloud.storage.UploadVO.Status.UPLOAD_ERROR, "", "", 0); @@ -337,18 +336,18 @@ public class UploadManagerImpl implements UploadManager { return new UploadAnswer(jobId, getUploadPct(jobId), getUploadError(jobId), getUploadStatus2(jobId), getUploadLocalPath(jobId), getInstallPath(jobId), getUploadTemplateSize(jobId)); } - + @Override public CreateEntityDownloadURLAnswer handleCreateEntityURLCommand(CreateEntityDownloadURLCommand cmd){ - - boolean isApacheUp = checkAndStartApache(); - if (!isApacheUp){ - String errorString = "Error in starting Apache server "; + + boolean isApacheUp = checkAndStartApache(); + if (!isApacheUp){ + String errorString = "Error in starting Apache server "; s_logger.error(errorString); return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); - } + } // Create the directory structure so that its visible under apache server root - String extractDir = "/var/www/html/userdata/"; + String extractDir = "/var/www/html/userdata/"; Script command = new Script("mkdir", s_logger); command.add("-p"); command.add(extractDir); @@ -358,19 +357,19 @@ public class UploadManagerImpl implements UploadManager { s_logger.error(errorString); return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); } - + // Create a random file under the directory for security reasons. String uuid = cmd.getExtractLinkUUID(); - command = new Script("touch", s_logger); - command.add(extractDir + uuid); - result = command.execute(); - if (result != null) { - String errorString = "Error in creating file " +uuid+ " ,error: " + result; - s_logger.warn(errorString); - return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); - } + command = new Script("touch", s_logger); + command.add(extractDir + uuid); + result = command.execute(); + if (result != null) { + String errorString = "Error in creating file " +uuid+ " ,error: " + result; + s_logger.warn(errorString); + return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); + } + - // Create a symbolic link from the actual directory to the template location. The entity would be directly visible under /var/www/html/userdata/cmd.getInstallPath(); command = new Script("/bin/bash", s_logger); command.add("-c"); @@ -381,11 +380,11 @@ public class UploadManagerImpl implements UploadManager { s_logger.error(errorString); return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); } - + return new CreateEntityDownloadURLAnswer("", CreateEntityDownloadURLAnswer.RESULT_SUCCESS); - + } - + @Override public DeleteEntityDownloadURLAnswer handleDeleteEntityDownloadURLCommand(DeleteEntityDownloadURLCommand cmd){ @@ -394,8 +393,8 @@ public class UploadManagerImpl implements UploadManager { String path = cmd.getPath(); Script command = new Script("/bin/bash", s_logger); command.add("-c"); - - //We just need to remove the UUID.vhd + + //We just need to remove the UUID.vhd String extractUrl = cmd.getExtractUrl(); command.add("unlink /var/www/html/userdata/" +extractUrl.substring(extractUrl.lastIndexOf(File.separator) + 1)); String result = command.execute(); @@ -404,7 +403,7 @@ public class UploadManagerImpl implements UploadManager { s_logger.warn(errorString); return new DeleteEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); } - + // If its a volume also delete the Hard link since it was created only for the purpose of download. if(cmd.getType() == Upload.Type.VOLUME){ command = new Script("/bin/bash", s_logger); @@ -418,31 +417,31 @@ public class UploadManagerImpl implements UploadManager { return new DeleteEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); } } - + return new DeleteEntityDownloadURLAnswer("", CreateEntityDownloadURLAnswer.RESULT_SUCCESS); } - private String getInstallPath(String jobId) { - // TODO Auto-generated method stub - return null; - } + private String getInstallPath(String jobId) { + // TODO Auto-generated method stub + return null; + } - private String getUploadLocalPath(String jobId) { - // TODO Auto-generated method stub - return null; - } + private String getUploadLocalPath(String jobId) { + // TODO Auto-generated method stub + return null; + } - private long getUploadTemplateSize(String jobId){ - UploadJob uj = jobs.get(jobId); + private long getUploadTemplateSize(String jobId){ + UploadJob uj = jobs.get(jobId); if (uj != null) { return uj.getTemplatesize(); } return 0; - } + } - @Override - public boolean configure(String name, Map params) - throws ConfigurationException { + @Override + public boolean configure(String name, Map params) + throws ConfigurationException { _name = name; String value = null; @@ -457,21 +456,25 @@ public class UploadManagerImpl implements UploadManager { Class clazz; try { clazz = (Class) Class.forName(value); + _storage = clazz.newInstance(); } catch (ClassNotFoundException e) { throw new ConfigurationException("Unable to instantiate " + value); + } catch (InstantiationException e) { + throw new ConfigurationException("Unable to instantiate " + value); + } catch (IllegalAccessException e) { + throw new ConfigurationException("Unable to instantiate " + value); } - _storage = ComponentLocator.inject(clazz); } String useSsl = (String)params.get("sslcopy"); if (useSsl != null) { - _sslCopy = Boolean.parseBoolean(useSsl); - + _sslCopy = Boolean.parseBoolean(useSsl); + } String inSystemVM = (String)params.get("secondary.storage.vm"); if (inSystemVM != null && "true".equalsIgnoreCase(inSystemVM)) { - s_logger.info("UploadManager: starting additional services since we are inside system vm"); - startAdditionalServices(); - //blockOutgoingOnPrivate(); + s_logger.info("UploadManager: starting additional services since we are inside system vm"); + startAdditionalServices(); + //blockOutgoingOnPrivate(); } value = (String) params.get("install.timeout.pergig"); @@ -489,53 +492,53 @@ public class UploadManagerImpl implements UploadManager { threadPool = Executors.newFixedThreadPool(numInstallThreads); return true; - } - - private void startAdditionalServices() { - - - Script command = new Script("rm", s_logger); - command.add("-rf"); - command.add(extractMountPoint); - String result = command.execute(); - if (result != null) { - s_logger.warn("Error in creating file " +extractMountPoint+ " ,error: " + result ); - return; - } - - command = new Script("touch", s_logger); - command.add(extractMountPoint); - result = command.execute(); - if (result != null) { - s_logger.warn("Error in creating file " +extractMountPoint+ " ,error: " + result ); - return; - } - - command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("ln -sf " + parentDir + " " +extractMountPoint); - result = command.execute(); - if (result != null) { - s_logger.warn("Error in linking err=" + result ); - return; - } - - } + } - @Override - public String getName() { - return _name; - } + private void startAdditionalServices() { - @Override - public boolean start() { - return true; - } - @Override - public boolean stop() { - return true; - } + Script command = new Script("rm", s_logger); + command.add("-rf"); + command.add(extractMountPoint); + String result = command.execute(); + if (result != null) { + s_logger.warn("Error in creating file " +extractMountPoint+ " ,error: " + result ); + return; + } + + command = new Script("touch", s_logger); + command.add(extractMountPoint); + result = command.execute(); + if (result != null) { + s_logger.warn("Error in creating file " +extractMountPoint+ " ,error: " + result ); + return; + } + + command = new Script("/bin/bash", s_logger); + command.add("-c"); + command.add("ln -sf " + parentDir + " " +extractMountPoint); + result = command.execute(); + if (result != null) { + s_logger.warn("Error in linking err=" + result ); + return; + } + + } + + @Override + public String getName() { + return _name; + } + + @Override + public boolean start() { + return true; + } + + @Override + public boolean stop() { + return true; + } /** * Get notified of change of job status. Executed in context of uploader thread @@ -582,7 +585,7 @@ public class UploadManagerImpl implements UploadManager { tu.setStatus(Status.UNRECOVERABLE_ERROR); tu.setUploadError("Failed post upload script: " + result); } else { - s_logger.warn("Upload completed successfully at " + new SimpleDateFormat().format(new Date())); + s_logger.warn("Upload completed successfully at " + new SimpleDateFormat().format(new Date())); tu.setStatus(Status.POST_UPLOAD_FINISHED); tu.setUploadError("Upload completed successfully at " + new SimpleDateFormat().format(new Date())); } @@ -596,9 +599,9 @@ public class UploadManagerImpl implements UploadManager { } } - private String postUpload(String jobId) { - return null; - } + private String postUpload(String jobId) { + return null; + } private void sleep() { try { @@ -608,21 +611,21 @@ public class UploadManagerImpl implements UploadManager { } } - private boolean checkAndStartApache() { - - //Check whether the Apache server is running - Script command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("if [ -d /etc/apache2 ] ; then service apache2 status | grep pid; else service httpd status | grep pid; fi "); - String result = command.execute(); - - //Apache Server is not running. Try to start it. - if (result != null) { - - /*s_logger.warn("Apache server not running, trying to start it"); + private boolean checkAndStartApache() { + + //Check whether the Apache server is running + Script command = new Script("/bin/bash", s_logger); + command.add("-c"); + command.add("if [ -d /etc/apache2 ] ; then service apache2 status | grep pid; else service httpd status | grep pid; fi "); + String result = command.execute(); + + //Apache Server is not running. Try to start it. + if (result != null) { + + /*s_logger.warn("Apache server not running, trying to start it"); String port = Integer.toString(TemplateConstants.DEFAULT_TMPLT_COPY_PORT); String intf = TemplateConstants.DEFAULT_TMPLT_COPY_INTF; - + command = new Script("/bin/bash", s_logger); command.add("-c"); command.add("iptables -D INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j DROP;" + @@ -636,23 +639,23 @@ public class UploadManagerImpl implements UploadManager { "iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j DROP;" + "iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j HTTP;" + "iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j HTTP;"); - + result = command.execute(); if (result != null) { s_logger.warn("Error in opening up httpd port err=" + result ); return false; }*/ - - command = new Script("/bin/bash", s_logger); - command.add("-c"); - command.add("if [ -d /etc/apache2 ] ; then service apache2 start; else service httpd start; fi "); - result = command.execute(); - if (result != null) { - s_logger.warn("Error in starting httpd service err=" + result ); - return false; - } - } - - return true; - } + + command = new Script("/bin/bash", s_logger); + command.add("-c"); + command.add("if [ -d /etc/apache2 ] ; then service apache2 start; else service httpd start; fi "); + result = command.execute(); + if (result != null) { + s_logger.warn("Error in starting httpd service err=" + result ); + return false; + } + } + + return true; + } } diff --git a/docs/en-US/accessing-vms.xml b/docs/en-US/accessing-vms.xml index 7053996e3c3..ce780cff080 100644 --- a/docs/en-US/accessing-vms.xml +++ b/docs/en-US/accessing-vms.xml @@ -22,26 +22,19 @@ under the License. -->
- Accessing VMs - Any user can access their own virtual machines. The administrator can access all VMs running in the cloud. - To access a VM through the &PRODUCT; UI: - - Log in to the &PRODUCT; UI as a user or admin. - Click Instances, then click the name of a running VM. - Click the View Console - - - - - consoleicon.png: button to view the console. - - - - To access a VM directly over the network: - - The VM must have some port open to incoming traffic. For example, in a basic zone, a new VM might be assigned to a security group which allows incoming traffic. This depends on what security group you picked when creating the VM. In other cases, you can open a port by setting up a port forwarding policy. See IP Forwarding and Firewalling. - If a port is open but you can not access the VM using ssh, it’s possible that ssh is not already enabled on the VM. This will depend on whether ssh is enabled in the template you picked when creating the VM. Access the VM through the &PRODUCT; UI and enable ssh on the machine using the commands for the VM’s operating system. - If the network has an external firewall device, you will need to create a firewall rule to allow access. See IP Forwarding and Firewalling. - + Accessing VMs + Any user can access their own virtual machines. The administrator can access all VMs running in the cloud. + To access a VM through the &PRODUCT; UI: + + Log in to the &PRODUCT; UI as a user or admin. + Click Instances, then click the name of a running VM. + Click the View Console button . + + To access a VM directly over the network: + + The VM must have some port open to incoming traffic. For example, in a basic zone, a new VM might be assigned to a security group which allows incoming traffic. This depends on what security group you picked when creating the VM. In other cases, you can open a port by setting up a port forwarding policy. See IP Forwarding and Firewalling. + If a port is open but you can not access the VM using ssh, it’s possible that ssh is not already enabled on the VM. This will depend on whether ssh is enabled in the template you picked when creating the VM. Access the VM through the &PRODUCT; UI and enable ssh on the machine using the commands for the VM’s operating system. + If the network has an external firewall device, you will need to create a firewall rule to allow access. See IP Forwarding and Firewalling. +
diff --git a/docs/en-US/autoscale.xml b/docs/en-US/autoscale.xml new file mode 100644 index 00000000000..d63281f9e7e --- /dev/null +++ b/docs/en-US/autoscale.xml @@ -0,0 +1,284 @@ + + +%BOOK_ENTITIES; +]> + + +
+ Configuring AutoScale + AutoScaling allows you to scale your back-end services or application VMs up or down + seamlessly and automatically according to the conditions you define. With AutoScaling enabled, + you can ensure that the number of VMs you are using seamlessly scale up when demand increases, + and automatically decreases when demand subsides. Thus it helps you save compute costs by + terminating underused VMs automatically and launching new VMs when you need them, without the + need for manual intervention. + NetScaler AutoScaling is designed to seamlessly launch or terminate VMs based on + user-defined conditions. Conditions for triggering a scaleup or scaledown action can vary from a + simple use case like monitoring the CPU usage of a server to a complex use case of monitoring a + combination of server's responsiveness and its CPU usage. For example, you can configure + AutoScaling to launch an additional VM whenever CPU usage exceeds 80 percent for 15 minutes, or + to remove a VM whenever CPU usage is less than 20 percent for 30 minutes. + &PRODUCT; uses the NetScaler load balancer to monitor all aspects of a system's health and + work in unison with &PRODUCT; to initiate scale-up or scale-down actions. The supported + NetScaler version is 10.0. + + Prerequisites + Before you configure an AutoScale rule, consider the following: + + + + Ensure that the necessary template is prepared before configuring AutoScale. When a VM + is deployed by using a template and when it comes up, the application should be up and + running. + + If the application is not running, the NetScaler device considers the VM as + ineffective and continues provisioning the VMs unconditionally until the resource limit is + exhausted. + + + + Deploy the templates you prepared. Ensure that the applications come up on the first + boot and is ready to take the traffic. Observe the time requires to deploy the template. + Consider this time when you specify the quiet time while configuring AutoScale. + + + The AutoScale feature supports the SNMP counters that can be used to define conditions + for taking scale up or scale down actions. To monitor the SNMP-based counter, ensure that + the SNMP agent is installed in the template used for creating the AutoScale VMs, and the + SNMP operations work with the configured SNMP community and port by using standard SNMP + managers. For example, see to configure SNMP on a RHEL + machine. + + + Ensure that the endpointe.url parameter present in the Global Settings is set to the + Management Server API URL. For example, http://10.102.102.22:8080/client/api. In a + multi-node Management Server deployment, use the virtual IP address configured in the load + balancer for the management server’s cluster. Additionally, ensure that the NetScaler device + has access to this IP address to provide AutoScale support. + If you update the endpointe.url, disable the AutoScale functionality of the load + balancer rules in the system, then enable them back to reflect the changes. For more + information see + + + If the API Key and Secret Key are regenerated for an AutoScale user, ensure that the + AutoScale functionality of the load balancers that the user participates in are disabled and + then enabled to reflect the configuration changes in the NetScaler. + + + In an advanced Zone, ensure that at least one VM should be present before configuring a + load balancer rule with AutoScale. Having one VM in the network ensures that the network is + in implemented state for configuring AutoScale. + + + + Configuration + Specify the following: + + + + + + + autoscaleateconfig.png: Configuring AutoScale + + + + + Template: A template consists of a base OS image and + application. A template is used to provision the new instance of an application on a scaleup + action. When a VM is deployed from a template, the VM can start taking the traffic from the + load balancer without any admin intervention. For example, if the VM is deployed for a Web + service, it should have the Web server running, the database connected, and so on. + + + Compute offering: A predefined set of virtual hardware + attributes, including CPU speed, number of CPUs, and RAM size, that the user can select when + creating a new virtual machine instance. Choose one of the compute offerings to be used + while provisioning a VM instance as part of scaleup action. + + + Min Instance: The minimum number of active VM instances + that is assigned to a load balancing rule. The active VM instances are the application + instances that are up and serving the traffic, and are being load balanced. This parameter + ensures that a load balancing rule has at least the configured number of active VM instances + are available to serve the traffic. + + If an application, such as SAP, running on a VM instance is down for some reason, the + VM is then not counted as part of Min Instance parameter, and the AutoScale feature + initiates a scaleup action if the number of active VM instances is below the configured + value. Similarly, when an application instance comes up from its earlier down state, this + application instance is counted as part of the active instance count and the AutoScale + process initiates a scaledown action when the active instance count breaches the Max + instance value. + + + + Max Instance: Maximum number of active VM instances + that should be assigned to a load balancing rule. This + parameter defines the upper limit of active VM instances that can be assigned to a load + balancing rule. + Specifying a large value for the maximum instance parameter might result in provisioning + large number of VM instances, which in turn leads to a single load balancing rule exhausting + the VM instances limit specified at the account or domain level. + + If an application, such as SAP, running on a VM instance is down for some reason, the + VM is not counted as part of Max Instance parameter. So there may be scenarios where the + number of VMs provisioned for a scaleup action might be more than the configured Max + Instance value. Once the application instances in the VMs are up from an earlier down + state, the AutoScale feature starts aligning to the configured Max Instance value. + + + + Specify the following scale-up and scale-down policies: + + + Duration: The duration, in seconds, for which the + conditions you specify must be true to trigger a scaleup action. The conditions defined + should hold true for the entire duration you specify for an AutoScale action to be invoked. + + + + Counter: The performance counters expose the state of + the monitored instances. By default, &PRODUCT; offers four performance counters: Three SNMP + counters and one NetScaler counter. The SNMP counters are Linux User CPU, Linux System CPU, + and Linux CPU Idle. The NetScaler counter is ResponseTime. The root administrator can add + additional counters into &PRODUCT; by using the &PRODUCT; API. + + + Operator: The following five relational operators are + supported in AutoScale feature: Greater than, Less than, Less than or equal to, Greater than + or equal to, and Equal to. + + + Threshold: Threshold value to be used for the counter. + Once the counter defined above breaches the threshold value, the AutoScale feature initiates + a scaleup or scaledown action. + + + Add: Click Add to add the condition. + + + Additionally, if you want to configure the advanced settings, click Show advanced settings, + and specify the following: + + + Polling interval: Frequency in which the conditions, + combination of counter, operator and threshold, are to be evaluated before taking a scale up + or down action. The default polling interval is 30 seconds. + + + Quiet Time: This is the cool down period after an + AutoScale action is initiated. The time includes the time taken to complete provisioning a + VM instance from its template and the time taken by an application to be ready to serve + traffic. This quiet time allows the fleet to come up to a stable state before any action can + take place. The default is 300 seconds. + + + Destroy VM Grace Period: The duration in seconds, after + a scaledown action is initiated, to wait before the VM is destroyed as part of scaledown + action. This is to ensure graceful close of any pending sessions or transactions being + served by the VM marked for destroy. The default is 120 seconds. + + + Security Groups: Security groups provide a way to + isolate traffic to the VM instances. A security group is a group of VMs that filter their + incoming and outgoing traffic according to a set of rules, called ingress and egress rules. + These rules filter network traffic according to the IP address that is attempting to + communicate with the VM. + + + Disk Offerings: A predefined set of disk size for + primary data storage. + + + SNMP Community: The SNMP community string to be used by + the NetScaler device to query the configured counter value from the provisioned VM + instances. Default is public. + + + SNMP Port: The port number on which the SNMP agent that + run on the provisioned VMs is listening. Default port is 161. + + + User: This is the user that the NetScaler device use to + invoke scaleup and scaledown API calls to the cloud. If no option is specified, the user who + configures AutoScaling is applied. Specify another user name to override. + + + Apply: Click Apply to create the AutoScale + configuration. + + + + Disabling and Enabling an AutoScale Configuration + If you want to perform any maintenance operation on the AutoScale VM instances, disable + the AutoScale configuration. When the AutoScale configuration is disabled, no scaleup or + scaledown action is performed. You can use this downtime for the maintenance activities. To + disable the AutoScale configuration, click the Disable AutoScale + + + + + EnableDisable.png: button to enable or disable AutoScale. + + button. + + The button toggles between enable and disable, depending on whether AutoScale is currently + enabled or not. After the maintenance operations are done, you can enable the AutoScale + configuration back. To enable, open the AutoScale configuration page again, then click the + Enable AutoScale + + + + + EnableDisable.png: button to enable or disable AutoScale. + + button. + + Updating an AutoScale Configuration + You can update the various parameters and add or delete the conditions in a scaleup or + scaledown rule. Before you update an AutoScale configuration, ensure that you disable the + AutoScale load balancer rule by clicking the Disable AutoScale button. + + After you modify the required AutoScale parameters, click Apply. To apply the new AutoScale + policies, open the AutoScale configuration page again, then click the Enable AutoScale + button. + + Runtime Considerations + + + + + An administrator should not assign a VM to a load balancing rule which is configured for + AutoScale. + + + Before a VM provisioning is completed if NetScaler is shutdown or restarted, the + provisioned VM cannot be a part of the load balancing rule though the intent was to assign + it to a load balancing rule. To workaround, rename the AutoScale provisioned VMs based on + the rule name or ID so at any point of time the VMs can be reconciled to its load balancing + rule. + + + Making API calls outside the context of AutoScale, such as destroyVM, on an autoscaled + VM leaves the load balancing configuration in an inconsistent state. Though VM is destroyed + from the load balancer rule, NetScaler continues to show the VM as a service assigned to a + rule. + + +
diff --git a/docs/en-US/aws-api-examples.xml b/docs/en-US/aws-api-examples.xml new file mode 100644 index 00000000000..ee3b44a5bde --- /dev/null +++ b/docs/en-US/aws-api-examples.xml @@ -0,0 +1,145 @@ + + +%BOOK_ENTITIES; +]> + + + +
+ Examples + There are many tools available to interface with a AWS compatible API. In this section we provide + a few examples that users of &PRODUCT; can build upon. + +
+ Boto Examples + Boto is one of them. It is a Python package available at https://github.com/boto/boto. + In this section we provide two examples of Python scripts that use Boto and have been tested with the + &PRODUCT; AWS API Interface. + First is an EC2 example. Replace the Access and Secret Keys with your own and + update the endpoint. + + + An EC2 Boto example + #!/usr/bin/env python + +import sys +import os +import boto +import boto.ec2 + +region = boto.ec2.regioninfo.RegionInfo(name="ROOT",endpoint="localhost") +apikey='GwNnpUPrO6KgIdZu01z_ZhhZnKjtSdRwuYd4DvpzvFpyxGMvrzno2q05MB0ViBoFYtdqKd' +secretkey='t4eXLEYWw7chBhDlaKf38adCMSHx_wlds6JfSx3z9fSpSOm0AbP9Moj0oGIzy2LSC8iw' + +def main(): + '''Establish connection to EC2 cloud''' + conn =boto.connect_ec2(aws_access_key_id=apikey, + aws_secret_access_key=secretkey, + is_secure=False, + region=region, + port=7080, + path="/awsapi", + api_version="2010-11-15") + + '''Get list of images that I own''' + images = conn.get_all_images() + print images + myimage = images[0] + '''Pick an instance type''' + vm_type='m1.small' + reservation = myimage.run(instance_type=vm_type,security_groups=['default']) + +if __name__ == '__main__': + main() + + + + Second is an S3 example. Replace the Access and Secret keys with your own, + as well as the endpoint of the service. Be sure to also update the file paths to something + that exists on your machine. + + + An S3 Boto Example + #!/usr/bin/env python + +import sys +import os +from boto.s3.key import Key +from boto.s3.connection import S3Connection +from boto.s3.connection import OrdinaryCallingFormat + +apikey='ChOw-pwdcCFy6fpeyv6kUaR0NnhzmG3tE7HLN2z3OB_s-ogF5HjZtN4rnzKnq2UjtnHeg_yLA5gOw' +secretkey='IMY8R7CJQiSGFk4cHwfXXN3DUFXz07cCiU80eM3MCmfLs7kusgyOfm0g9qzXRXhoAPCH-IRxXc3w' + +cf=OrdinaryCallingFormat() + +def main(): + '''Establish connection to S3 service''' + conn =S3Connection(aws_access_key_id=apikey,aws_secret_access_key=secretkey, \ + is_secure=False, \ + host='localhost', \ + port=7080, \ + calling_format=cf, \ + path="/awsapi/rest/AmazonS3") + + try: + bucket=conn.create_bucket('cloudstack') + k = Key(bucket) + k.key = 'test' + try: + k.set_contents_from_filename('/Users/runseb/Desktop/s3cs.py') + except: + print 'could not write file' + pass + except: + bucket = conn.get_bucket('cloudstack') + k = Key(bucket) + k.key = 'test' + try: + k.get_contents_to_filename('/Users/runseb/Desktop/foobar') + except: + print 'Could not get file' + pass + + try: + bucket1=conn.create_bucket('teststring') + k=Key(bucket1) + k.key('foobar') + k.set_contents_from_string('This is my silly test') + except: + bucket1=conn.get_bucket('teststring') + k = Key(bucket1) + k.key='foobar' + k.get_contents_as_string() + +if __name__ == '__main__': + main() + + + + +
+ +
+ JClouds Examples + +
+ +
diff --git a/docs/en-US/aws-ec2-configuration.xml b/docs/en-US/aws-ec2-configuration.xml index d6c4066d1d8..7d26027ba35 100644 --- a/docs/en-US/aws-ec2-configuration.xml +++ b/docs/en-US/aws-ec2-configuration.xml @@ -23,26 +23,88 @@ -->
- Enabling the AWS API Compatible Interface - - The software that provides AWS API compatibility is installed along with &PRODUCT;. However, you must enable the feature and perform some setup steps. - - - Set the global configuration parameter enable.ec2.api to true. See . - Create a set of &PRODUCT; service offerings with names that match the Amazon service offerings. - You can do this through the &PRODUCT; UI as described in the Administration Guide. - Be sure you have included the Amazon default service offering, m1.small. - If you did not already do so when you set the configuration parameter in step 1, restart the Management Server. - # service cloud-management restart - (Optional) The AWS API listens for requests on port 7080. If you prefer AWS API to listen on another port, you can change it as follows: - - Edit the files /etc/cloud/management/server.xml, /etc/cloud/management/server-nonssl.xml, and /etc/cloud/management/server-ssl.xml. - In each file, find the tag <Service name="Catalina7080">. Under this tag, locate <Connector executor="tomcatThreadPool-internal" port= ....<. - Change the port to whatever port you want to use, then save the files. - Restart the Management Server. - If you re-install CloudStack, you will have to make these changes again. + Enabling the EC2 and S3 Compatible Interface + + The software that provides AWS API compatibility is installed along with &PRODUCT;. You must enable the services and perform some setup steps prior to using it. + + + Set the global configuration parameters for each service to true. + See . + Create a set of &PRODUCT; service offerings with names that match the Amazon service offerings. + You can do this through the &PRODUCT; UI as described in the Administration Guide. + Be sure you have included the Amazon default service offering, m1.small. As well as any EC2 instance types that you will use. - - - + If you did not already do so when you set the configuration parameter in step 1, + restart the Management Server. + # service cloud-management restart + + + The following sections provides details to perform these steps + +
+ Enabling the Services + To enable the EC2 and S3 compatible services you need to set the configuration variables enable.ec2.api + and enable.s3.api to true. You do not have to enable both at the same time. Enable the ones you need. + This can be done via the &PRODUCT; GUI by going in Global Settings or via the API. + The snapshot below shows you how to use the GUI to enable these services + + + + + + + + Use the GUI to set the configuration variable to true + + + + + Using the &PRODUCT; API, the easiest is to use the so-called integration port on which you can make + unauthenticated calls. In Global Settings set the port to 8096 and subsequently call the updateConfiguration method. + The following urls shows you how: + + + + http://localhost:8096/client/api?command=updateConfiguration&name=enable.ec2.api&value=true + http://localhost:8096/client/api?command=updateConfiguration&name=enable.ec2.api&value=true + + + + Once you have enabled the services, restart the server. +
+ +
+ Creating EC2 Compatible Service Offerings + You will also need to define compute service offerings with names compatible with the + Amazon EC2 instance types API names (e.g m1.small,m1.large). This can be done via the &PRODUCT; GUI. + Go under Service Offerings select Compute offering and either create + a new compute offering or modify an existing one, ensuring that the name matches an EC2 instance type API name. The snapshot below shows you how: + + + + + + + Use the GUI to set the name of a compute service offering to an EC2 instance + type API name. + + + +
+
+ Modifying the AWS API Port + + (Optional) The AWS API listens for requests on port 7080. If you prefer AWS API to listen on another port, you can change it as follows: + + Edit the files /etc/cloud/management/server.xml, /etc/cloud/management/server-nonssl.xml, + and /etc/cloud/management/server-ssl.xml. + In each file, find the tag <Service name="Catalina7080">. Under this tag, + locate <Connector executor="tomcatThreadPool-internal" port= ....<. + Change the port to whatever port you want to use, then save the files. + Restart the Management Server. + + If you re-install &PRODUCT;, you will have to re-enable the services and if need be update the port. + +
+
diff --git a/docs/en-US/aws-ec2-introduction.xml b/docs/en-US/aws-ec2-introduction.xml index a4df086d465..538c09d5ad1 100644 --- a/docs/en-US/aws-ec2-introduction.xml +++ b/docs/en-US/aws-ec2-introduction.xml @@ -23,16 +23,19 @@ -->
- Amazon Web Services EC2 Compatible Interface + Amazon Web Services Compatible Interface &PRODUCT; can translate Amazon Web Services (AWS) API calls to native &PRODUCT; API calls so that users can continue using existing AWS-compatible tools. This translation service runs as a separate web application in the same tomcat server as the management server of &PRODUCT;, - listening on the same port. This Amazon EC2-compatible API is accessible through a SOAP web - service. + listening on a different port. The Amazon Web Services (AWS) compatible interface provides the + EC2 SOAP and Query APIs as well as the S3 REST API. This service was previously enabled by separate software called CloudBridge. It is now fully integrated with the &PRODUCT; management server. + + The compatible interface for the EC2 Query API and the S3 API are Work In Progress. The S3 compatible API offers a way to store data on the management server file system, it is not an implementation of the S3 backend. + Limitations @@ -42,7 +45,9 @@ Available in fresh installations of &PRODUCT;. Not available through upgrade of previous versions. - If you need to support features such as elastic IP, set up a Citrix NetScaler to provide this service. The commands such as ec2-associate-address will not work without EIP setup. Users running VMs in this zone will be using the NetScaler-enabled network offering (DefaultSharedNetscalerEIP and ELBNetworkOffering). + Features such as Elastic IP (EIP) and Elastic Load Balacing (ELB) are only available in an infrastructure + with a Citrix NetScaler device. Users accessing a Zone with a NetScaler device will need to use a + NetScaler-enabled network offering (DefaultSharedNetscalerEIP and ELBNetworkOffering).
diff --git a/docs/en-US/aws-ec2-requirements.xml b/docs/en-US/aws-ec2-requirements.xml index 59fb5b6f5ab..62e94b1ac9f 100644 --- a/docs/en-US/aws-ec2-requirements.xml +++ b/docs/en-US/aws-ec2-requirements.xml @@ -23,13 +23,14 @@ -->
- System Requirements + Supported API Version - This interface complies with Amazon's WDSL version dated November 15, 2010, available at + The EC2 interface complies with Amazon's WDSL version dated November 15, 2010, available at http://ec2.amazonaws.com/doc/2010-11-15/. - Compatible with the EC2 command-line + The interface is compatible with the EC2 command-line tools EC2 tools v. 1.3.6230, which can be downloaded at http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip. -
\ No newline at end of file + Work is underway to support a more recent version of the EC2 API + diff --git a/docs/en-US/aws-ec2-supported-commands.xml b/docs/en-US/aws-ec2-supported-commands.xml index 9494218cd1c..7cdbcad8095 100644 --- a/docs/en-US/aws-ec2-supported-commands.xml +++ b/docs/en-US/aws-ec2-supported-commands.xml @@ -24,7 +24,7 @@
Supported AWS API Calls - The following Amazon EC2 commands are supported by &PRODUCT; when the AWS API compatibility feature is enabled. + The following Amazon EC2 commands are supported by &PRODUCT; when the AWS API compatible interface is enabled. For a few commands, there are differences between the &PRODUCT; and Amazon EC2 versions, and these differences are noted. The underlying SOAP call for each command is also given, for those who have built tools using those calls. diff --git a/docs/en-US/aws-ec2-timeouts.xml b/docs/en-US/aws-ec2-timeouts.xml index c8b3ec6465f..73d0c16c4df 100644 --- a/docs/en-US/aws-ec2-timeouts.xml +++ b/docs/en-US/aws-ec2-timeouts.xml @@ -24,7 +24,7 @@
Using Timeouts to Ensure AWS API Command Completion - The Amazon EC2 command-line tools have a default connection timeout. When used with &PRODUCT;, a longer timeout might be needed for some commands. If you find that commands are not completing due to timeouts, you can gain more time for commands to finish by overriding the default timeouts on individual commands. You can add the following optional command-line parameters to any &PRODUCT;-supported EC2 command: + The Amazon EC2 command-line tools have a default connection timeout. When used with &PRODUCT;, a longer timeout might be needed for some commands. If you find that commands are not completing due to timeouts, you can specify a custom timeouts. You can add the following optional command-line parameters to any &PRODUCT;-supported EC2 command: @@ -47,4 +47,5 @@ Example: ec2-run-instances 2 –z us-test1 –n 1-3 --connection-timeout 120 --request-timeout 120 -
\ No newline at end of file + The timeouts optional arguments are not specific to &PRODUCT;. + diff --git a/docs/en-US/aws-ec2-user-setup.xml b/docs/en-US/aws-ec2-user-setup.xml index 8607378d88c..edc371ef376 100644 --- a/docs/en-US/aws-ec2-user-setup.xml +++ b/docs/en-US/aws-ec2-user-setup.xml @@ -22,76 +22,84 @@ under the License. -->
- AWS API User Setup Steps + AWS API User Setup In general, users need not be aware that they are using a translation service provided by &PRODUCT;. - They need only send AWS API calls to &PRODUCT;'s endpoint, and it will translate the calls to the native API. - Users of the Amazon EC2 compatible interface will be able to keep their existing EC2 tools + They only need to send AWS API calls to &PRODUCT;'s endpoint, and it will translate the calls to the native &PRODUCT; API. Users of the Amazon EC2 compatible interface will be able to keep their existing EC2 tools and scripts and use them with their &PRODUCT; deployment, by specifying the endpoint of the management server and using the proper user credentials. In order to do this, each user must perform the following configuration steps: - Generate user credentials and register with the service. + Generate user credentials. - Set up the environment variables for the EC2 command-line tools. + Register with the service. - For SOAP access, use the endpoint http://&PRODUCT;-management-server:7080/awsapi. - The &PRODUCT;-management-server can be specified by a fully-qualified domain name or IP address. + For convenience, set up environment variables for the EC2 SOAP command-line tools.
AWS API User Registration - Each user must perform a one-time registration. The user follows these steps: - - - Obtain the following by looking in the &PRODUCT; UI, using the API, or asking the cloud administrator: - - The &PRODUCT; server's publicly available DNS name or IP address - The user account's API key and Secret key - - - - - Generate a private key and a self-signed X.509 certificate. The user substitutes their own desired storage location for /path/to/… below. - - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /path/to/private_key.pem -out /path/to/cert.pem - - - - - Register the mapping from the X.509 certificate to the API/Secret keys. - Download the following script from http://download.cloud.com/releases/3.0.3/cloudstack-aws-api-register and run it. - Substitute the values that were obtained in step 1 in the URL below. - - -$ cloudstack-aws-api-register --apikey=User’s &PRODUCT; API key --secretkey=User’s &PRODUCT; Secret key --cert=/path/to/cert.pem --url=http://&PRODUCT;.server:7080/awsapi - - - + Each user must perform a one-time registration. The user follows these steps: + + + Obtain the following by looking in the &PRODUCT; UI, using the API, or asking the cloud administrator: + + + The &PRODUCT; server's publicly available DNS name or IP address + The user account's Access key and Secret key + + + + Generate a private key and a self-signed X.509 certificate. The user substitutes their own desired storage location for /path/to/… below. + + + $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /path/to/private_key.pem -out /path/to/cert.pem + + + + Register the user X.509 certificate and Access/Secret keys with the AWS compatible service. + If you have the source code of &PRODUCT; go to the awsapi-setup/setup directory and use the Python script + cloudstack-aws-api-register. If you do not have the source then download the script using the following command. + + + wget -O cloudstack-aws-api-register "https://git-wip-us.apache.org/repos/asf?p=incubator-cloudstack.git;a=blob_plain;f=awsapi-setup/setup/cloudstack-aws-api-register;hb=HEAD" + + + Then execute it, using the parameter values that were obtained in step 1. An example is shown below. + + $ cloudstack-aws-api-register --apikey=User’s &PRODUCT; API key --secretkey=User’s &PRODUCT; Secret key --cert=/path/to/cert.pem --url=http://&PRODUCT;.server:7080/awsapi + + + - A user with an existing AWS certificate could choose to use the same certificate with &PRODUCT;, but the public key would be uploaded to the &PRODUCT; management server database. + A user with an existing AWS certificate could choose to use the same certificate with &PRODUCT;, but note that the certificate would be uploaded to the &PRODUCT; management server database.
- AWS API Command-Line Tools Setup - To use the EC2 command-line tools, the user must perform these steps: - - Be sure you have the right version of EC2 Tools. - The supported version is available at http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip. - - - Set up the environment variables that will direct the tools to the server. As a best practice, you may wish to place these commands in a script that may be sourced before using the AWS API translation feature. - $ export EC2_CERT=/path/to/cert.pem -$ export EC2_PRIVATE_KEY=/path/to/private_key.pem -$ export EC2_URL=http://&PRODUCT;.server:7080/awsapi -$ export EC2_HOME=/path/to/EC2_tools_directory - - + AWS API Command-Line Tools Setup + To use the EC2 command-line tools, the user must perform these steps: + + + Be sure you have the right version of EC2 Tools. + The supported version is available at http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip. + + + + Set up the EC2 environment variables. This can be done every time you use the service or you can set them up in the proper shell profile. Replace the endpoint (i.e EC2_URL) with the proper address of your &PRODUCT; management server and port. In a bash shell do the following. + + + $ export EC2_CERT=/path/to/cert.pem + $ export EC2_PRIVATE_KEY=/path/to/private_key.pem + $ export EC2_URL=http://localhost:7080/awsapi + $ export EC2_HOME=/path/to/EC2_tools_directory + + +
-
\ No newline at end of file + diff --git a/docs/en-US/aws-interface-compatibility.xml b/docs/en-US/aws-interface-compatibility.xml index a03d447b50d..2c85c24b36a 100644 --- a/docs/en-US/aws-interface-compatibility.xml +++ b/docs/en-US/aws-interface-compatibility.xml @@ -23,11 +23,12 @@ --> - Amazon Web Service Interface Compatibility + Amazon Web Services Compatible Interface + diff --git a/docs/en-US/building-marvin.xml b/docs/en-US/building-marvin.xml new file mode 100644 index 00000000000..3dac9d65d60 --- /dev/null +++ b/docs/en-US/building-marvin.xml @@ -0,0 +1,46 @@ + + +%BOOK_ENTITIES; +]> + + + +
+ Building and Installing Marvin + Marvin is built with Maven and is dependent on APIdoc. To build it do the following in the root tree of &PRODUCT;: + mvn -P developer -l :cloud-apidoc + mvn -P developer -l :cloud-marvin + If successfull the build will have created the cloudstackAPI Python package under tools/marvin/marvin/cloudstackAPI as well as a gziped Marvin package under tools/marvin dist. To install the Python Marvin module do the following in tools/marvin: + sudo python ./setup.py install + The dependencies will be downloaded the Python module installed and you should be able to use Marvin in Python. Check that you can import the module before starting to use it. + $ python +Python 2.7.3 (default, Nov 17 2012, 19:54:34) +[GCC 4.2.1 Compatible Apple Clang 4.1 ((tags/Apple/clang-421.11.66))] on darwin +Type "help", "copyright", "credits" or "license" for more information. +>>> import marvin +>>> from marvin.cloudstackAPI import * +>>> + + You could also install it using pip or easy_install using the local distribution package in tools/marvin/dist : + pip install tools/marvin/dist/Marvin-0.1.0.tar.gz + Or: + easy_install tools/marvin/dist/Marvin-0.1.0.tar.gz + +
diff --git a/docs/en-US/configure-snmp-rhel.xml b/docs/en-US/configure-snmp-rhel.xml new file mode 100644 index 00000000000..bd227ff8ed5 --- /dev/null +++ b/docs/en-US/configure-snmp-rhel.xml @@ -0,0 +1,86 @@ + + +%BOOK_ENTITIES; +]> + +
+ Configuring SNMP Community String on a RHEL Server + The SNMP Community string is similar to a user id or password that provides access to a + network device, such as router. This string is sent along with all SNMP requests. If the + community string is correct, the device responds with the requested information. If the + community string is incorrect, the device discards the request and does not respond. + The NetScaler device uses SNMP to communicate with the VMs. You must install SNMP and + configure SNMP Community string for a secure communication between the NetScaler device and the + RHEL machine. + + + Ensure that you installed SNMP on RedHat. If not, run the following command: + yum install net-snmp-utils + + + Edit the /etc/snmp/snmpd.conf file to allow the SNMP polling from the NetScaler + device. + + + Map the community name into a security name (local and mynetwork, depending on where + the request is coming from): + + Use a strong password instead of public when you edit the following table. + + # sec.name source community +com2sec local localhost public +com2sec mynetwork 0.0.0.0 public + + Setting to 0.0.0.0 allows all IPs to poll the NetScaler server. + + + + Map the security names into group names: + # group.name sec.model sec.name +group MyRWGroup v1 local +group MyRWGroup v2c local +group MyROGroup v1 mynetwork +group MyROGroup v2c mynetwork + + + Create a view to allow the groups to have the permission to: + incl/excl subtree mask view all included .1 + + + Grant access with different write permissions to the two groups to the view you + created. + # context sec.model sec.level prefix read write notif + access MyROGroup "" any noauth exact all none none + access MyRWGroup "" any noauth exact all all all + + + + + Unblock SNMP in iptables. + iptables -A INPUT -p udp --dport 161 -j ACCEPT + + + Start the SNMP service: + service snmpd start + + + Ensure that the SNMP service is started automatically during the system startup: + chkconfig snmpd on + + +
diff --git a/docs/en-US/external-firewalls-and-load-balancers.xml b/docs/en-US/external-firewalls-and-load-balancers.xml index 64f5ac3551d..6ca49f0ef03 100644 --- a/docs/en-US/external-firewalls-and-load-balancers.xml +++ b/docs/en-US/external-firewalls-and-load-balancers.xml @@ -3,30 +3,31 @@ %BOOK_ENTITIES; ]> - + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. +-->
- External Firewalls and Load Balancers - &PRODUCT; is capable of replacing its Virtual Router with an external Juniper SRX device and an optional external NetScaler or F5 load balancer for gateway and load balancing services. In this case, the VMs use the SRX as their gateway. - - - - - + External Firewalls and Load Balancers + &PRODUCT; is capable of replacing its Virtual Router with an external Juniper SRX device and + an optional external NetScaler or F5 load balancer for gateway and load balancing services. In + this case, the VMs use the SRX as their gateway. + + + + +
diff --git a/docs/en-US/images/compute-service-offerings.png b/docs/en-US/images/compute-service-offerings.png new file mode 100644 index 00000000000..88eb6f80597 Binary files /dev/null and b/docs/en-US/images/compute-service-offerings.png differ diff --git a/docs/en-US/images/ec2-s3-configuration.png b/docs/en-US/images/ec2-s3-configuration.png new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs/en-US/images/view-console-button.png b/docs/en-US/images/view-console-button.png new file mode 100644 index 00000000000..b321ceadefe Binary files /dev/null and b/docs/en-US/images/view-console-button.png differ diff --git a/docs/en-US/marvin.xml b/docs/en-US/marvin.xml index 062616ac888..8fd2c96fe3f 100644 --- a/docs/en-US/marvin.xml +++ b/docs/en-US/marvin.xml @@ -29,4 +29,5 @@ Marvin's complete documenation is on the wiki at https://cwiki.apache.org/CLOUDSTACK/testing-with-python.htmlThe source code is located at tools/marvin + diff --git a/docs/en-US/ongoing-configuration-of-external-firewalls-loadbalancer.xml b/docs/en-US/ongoing-configuration-of-external-firewalls-loadbalancer.xml new file mode 100644 index 00000000000..c90c7ada622 --- /dev/null +++ b/docs/en-US/ongoing-configuration-of-external-firewalls-loadbalancer.xml @@ -0,0 +1,46 @@ + + +%BOOK_ENTITIES; +]> + +
+ Ongoing Configuration of External Firewalls and Load Balancers + Additional user actions (e.g. setting a port forward) will cause further programming of the + firewall and load balancer. A user may request additional public IP addresses and forward + traffic received at these IPs to specific VMs. This is accomplished by enabling static NAT for a + public IP address, assigning the IP to a VM, and specifying a set of protocols and port ranges + to open. When a static NAT rule is created, &PRODUCT; programs the zone's external firewall with + the following objects: + + + A static NAT rule that maps the public IP address to the private IP address of a + VM. + + + A security policy that allows traffic within the set of protocols and port ranges that + are specified. + + + A firewall filter counter that measures the number of bytes of incoming traffic to the + public IP. + + + The number of incoming and outgoing bytes through source NAT, static NAT, and load balancing + rules is measured and saved on each external element. This data is collected on a regular basis + and stored in the &PRODUCT; database. +
diff --git a/docs/en-US/system-service-offerings.xml b/docs/en-US/system-service-offerings.xml index c41aa2e293b..84d5f7ae7b5 100644 --- a/docs/en-US/system-service-offerings.xml +++ b/docs/en-US/system-service-offerings.xml @@ -26,4 +26,5 @@ System Service Offerings System service offerings provide a choice of CPU speed, number of CPUs, tags, and RAM size, just as other service offerings do. But rather than being used for virtual machine instances and exposed to users, system service offerings are used to change the default properties of virtual routers, console proxies, and other system VMs. System service offerings are visible only to the &PRODUCT; root administrator. &PRODUCT; provides default system service offerings. The &PRODUCT; root administrator can create additional custom system service offerings. When &PRODUCT; creates a virtual router for a guest network, it uses default settings which are defined in the system service offering associated with the network offering. You can upgrade the capabilities of the virtual router by applying a new network offering that contains a different system service offering. All virtual routers in that network will begin using the settings from the new service offering. + diff --git a/engine/api/pom.xml b/engine/api/pom.xml index cbb83e46add..99c01510f99 100644 --- a/engine/api/pom.xml +++ b/engine/api/pom.xml @@ -30,11 +30,6 @@ cloud-api ${project.version} - - org.apache.cloudstack - cloud-framework-ipc - ${project.version} - org.apache.cxf cxf-bundle-jaxrs diff --git a/engine/api/src/org/apache/cloudstack/engine/cloud/entity/api/VolumeEntity.java b/engine/api/src/org/apache/cloudstack/engine/cloud/entity/api/VolumeEntity.java index a63c2b47cea..47fb638401b 100755 --- a/engine/api/src/org/apache/cloudstack/engine/cloud/entity/api/VolumeEntity.java +++ b/engine/api/src/org/apache/cloudstack/engine/cloud/entity/api/VolumeEntity.java @@ -20,7 +20,7 @@ package org.apache.cloudstack.engine.cloud.entity.api; import org.apache.cloudstack.engine.datacenter.entity.api.StorageEntity; import org.apache.cloudstack.engine.entity.api.CloudStackEntity; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; @@ -76,9 +76,12 @@ public interface VolumeEntity extends CloudStackEntity { long getSize(); - DiskFormat getDiskType(); + VolumeDiskType getDiskType(); VolumeType getType(); StorageEntity getDataStore(); + + boolean createVolumeFromTemplate(long dataStoreId, VolumeDiskType diskType, TemplateEntity template); + boolean createVolume(long dataStoreId, VolumeDiskType diskType); } diff --git a/engine/api/src/org/apache/cloudstack/engine/datacenter/entity/api/HostEntity.java b/engine/api/src/org/apache/cloudstack/engine/datacenter/entity/api/HostEntity.java index 9da196ecd9a..99f3120f93a 100644 --- a/engine/api/src/org/apache/cloudstack/engine/datacenter/entity/api/HostEntity.java +++ b/engine/api/src/org/apache/cloudstack/engine/datacenter/entity/api/HostEntity.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.datacenter.entity.api; import com.cloud.hypervisor.Hypervisor.HypervisorType; diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java deleted file mode 100644 index 4487330c694..00000000000 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.engine.subsystem.api.storage; - -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; - -public interface DataObject { - public long getId(); - public String getUri(); - public DataStore getDataStore(); - public long getSize(); - public DataObjectType getType(); - public DiskFormat getFormat(); -} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObjectType.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObjectType.java deleted file mode 100644 index b4d1a57c88c..00000000000 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObjectType.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.engine.subsystem.api.storage; - -public enum DataObjectType { - VOLUME, - SNAPSHOT, - TEMPLATE -} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStore.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStore.java deleted file mode 100644 index ae1fade5ba3..00000000000 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStore.java +++ /dev/null @@ -1,9 +0,0 @@ -package org.apache.cloudstack.engine.subsystem.api.storage; - -public interface DataStore { - DataStoreDriver getDriver(); - DataStoreRole getRole(); - long getId(); - String getUri(); - Scope getScope(); -} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java deleted file mode 100644 index 4aba9bfdbff..00000000000 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.engine.subsystem.api.storage; - -import java.util.Set; - -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; - -public interface DataStoreDriver { - public String grantAccess(DataObject data, EndPoint ep); - public boolean revokeAccess(DataObject data, EndPoint ep); - public Set listObjects(DataStore store); - public void createAsync(DataObject data, AsyncCompletionCallback callback); - public void deleteAsync(DataObject data, AsyncCompletionCallback callback); - public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback); - public boolean canCopy(DataObject srcData, DataObject destData); -} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreRole.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreRole.java deleted file mode 100644 index a45ca7a6c8e..00000000000 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreRole.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.engine.subsystem.api.storage; - -import com.cloud.utils.exception.CloudRuntimeException; - -public enum DataStoreRole { - Primary("primary"), - Image("image"), - ImageCache("imagecache"), - Backup("backup"); - - public boolean isImageStore() { - return (this.role.equalsIgnoreCase("image") || this.role.equalsIgnoreCase("imagecache")) ? true : false; - } - - private final String role; - DataStoreRole(String type) { - this.role = type; - } - - public static DataStoreRole getRole(String role) { - if (role == null) { - throw new CloudRuntimeException("role can't be empty"); - } - if (role.equalsIgnoreCase("primary")) { - return Primary; - } else if (role.equalsIgnoreCase("image")) { - return Image; - } else if (role.equalsIgnoreCase("imagecache")) { - return ImageCache; - } else if (role.equalsIgnoreCase("backup")) { - return Backup; - } else { - throw new CloudRuntimeException("can't identify the role"); - } - } -} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java deleted file mode 100644 index 414207d5fd0..00000000000 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java +++ /dev/null @@ -1,11 +0,0 @@ -package org.apache.cloudstack.engine.subsystem.api.storage; - -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; - -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.Command; - -public interface EndPoint { - public Answer sendMessage(Command cmd); - public void sendMessageAsync(Command cmd, AsyncCompletionCallback callback); -} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreInfo.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreInfo.java index ec87cb5aa01..11bc26b3249 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreInfo.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreInfo.java @@ -20,13 +20,17 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; + + + import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.Volume; public interface PrimaryDataStoreInfo { public boolean isHypervisorSupported(HypervisorType hypervisor); public boolean isLocalStorageSupported(); - public boolean isVolumeDiskTypeSupported(DiskFormat diskType); + public boolean isVolumeDiskTypeSupported(VolumeDiskType diskType); public long getCapacity(); public long getAvailableCapacity(); @@ -36,4 +40,6 @@ public interface PrimaryDataStoreInfo { public String getName(); public String getType(); public PrimaryDataStoreLifeCycle getLifeCycle(); + PrimaryDataStoreProvider getProvider(); + } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java index cf29d9fea09..afdf085e532 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java @@ -18,6 +18,25 @@ */ package org.apache.cloudstack.engine.subsystem.api.storage; +import java.util.Map; -public interface PrimaryDataStoreLifeCycle extends DataStoreLifeCycle { +public interface PrimaryDataStoreLifeCycle { + public boolean initialize(Map dsInfos); + + public boolean attachCluster(ClusterScope scope); + + public boolean dettach(); + + public boolean unmanaged(); + + public boolean maintain(); + + public boolean cancelMaintain(); + + public boolean deleteDataStore(); + + /** + * @param dataStore + */ + void setDataStore(PrimaryDataStoreInfo dataStore); } diff --git a/server/src/com/cloud/cluster/CleanupMaid.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreProvider.java similarity index 56% rename from server/src/com/cloud/cluster/CleanupMaid.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreProvider.java index 1ff83f12286..9aafebf41e4 100644 --- a/server/src/com/cloud/cluster/CleanupMaid.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreProvider.java @@ -14,28 +14,21 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.cluster; +package org.apache.cloudstack.engine.subsystem.api.storage; + +import java.util.Map; + +public interface PrimaryDataStoreProvider { + public PrimaryDataStoreInfo getDataStore(long dataStoreId); + public long getId(); + public String getName(); -/** - * - * task. The state is serialized and stored. When cleanup is required - * CleanupMaid is instantiated from the stored data and cleanup() is called. - * - */ -public interface CleanupMaid { /** - * cleanup according the state that was stored. - * - * @return 0 indicates cleanup was successful. Negative number - * indicates the cleanup was unsuccessful but don't retry. Positive number - * indicates the cleanup was unsuccessful and retry in this many seconds. - */ - int cleanup(CheckPointManager checkPointMgr); - - - /** - * returned here is recorded. + * @param dsInfos * @return */ - String getCleanupProcedure(); + PrimaryDataStoreInfo registerDataStore(Map dsInfos); + + //LifeCycle of provider + public boolean configure(); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StorageOrchestrator.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StorageOrchestrator.java index fdb15c7331c..c1c1e901c66 100755 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StorageOrchestrator.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StorageOrchestrator.java @@ -22,7 +22,7 @@ import java.util.List; import org.apache.cloudstack.engine.cloud.entity.api.TemplateEntity; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; import com.cloud.deploy.DeploymentPlan; @@ -62,7 +62,7 @@ public interface StorageOrchestrator { */ void prepareAttachDiskToVM(long diskId, long vmId, String reservationId); - boolean createVolume(VolumeEntity volume, long dataStoreId, DiskFormat diskType); - boolean createVolumeFromTemplate(VolumeEntity volume, long dataStoreId, DiskFormat dis, TemplateEntity template); + boolean createVolume(VolumeEntity volume, long dataStoreId, VolumeDiskType diskType); + boolean createVolumeFromTemplate(VolumeEntity volume, long dataStoreId, VolumeDiskType dis, TemplateEntity template); VolumeEntity allocateVolumeInDb(long size, VolumeType type,String volName, Long templateId); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StorageSubSystem.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StorageSubSystem.java old mode 100755 new mode 100644 index eb78376305e..8043487d46b --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StorageSubSystem.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StorageSubSystem.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.subsystem.api.storage; import java.net.URI; diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java index 7c714eff5e5..4adc1a800f5 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java @@ -19,12 +19,28 @@ package org.apache.cloudstack.engine.subsystem.api.storage; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; +import java.util.Date; -public interface VolumeInfo extends DataObject { +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; +import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; + +import com.cloud.storage.Volume; + +public interface VolumeInfo { + public long getSize(); public String getUuid(); - public long getId(); - - public boolean isAttachedVM(); public String getPath(); + public PrimaryDataStoreInfo getDataStore() ; + public String getTemplateUuid(); + public String getTemplatePath(); + public VolumeType getType(); + public VolumeDiskType getDiskType(); + public long getId(); + public Volume.State getCurrentState(); + public Volume.State getDesiredState(); + public Date getCreatedDate(); + public Date getUpdatedDate(); + public String getOwner(); + public String getName(); + public boolean isAttachedVM(); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/DiskFormat.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/DiskFormat.java deleted file mode 100644 index c8371ab3448..00000000000 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/DiskFormat.java +++ /dev/null @@ -1,22 +0,0 @@ -package org.apache.cloudstack.engine.subsystem.api.storage.disktype; - -import com.cloud.utils.exception.CloudRuntimeException; - -public enum DiskFormat { - VMDK, - VHD, - ISO, - QCOW2; - public static DiskFormat getFormat(String format) { - if (VMDK.toString().equalsIgnoreCase(format)) { - return VMDK; - } else if (VHD.toString().equalsIgnoreCase(format)) { - return VHD; - } else if (QCOW2.toString().equalsIgnoreCase(format)) { - return QCOW2; - } else if (ISO.toString().equalsIgnoreCase(format)) { - return ISO; - } - throw new CloudRuntimeException("can't find format match: " + format); - } -} diff --git a/utils/test/com/cloud/utils/testcase/ComponentSetup.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/QCOW2.java similarity index 64% rename from utils/test/com/cloud/utils/testcase/ComponentSetup.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/QCOW2.java index ba9b9e74adf..b67a735d8f7 100644 --- a/utils/test/com/cloud/utils/testcase/ComponentSetup.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/QCOW2.java @@ -4,9 +4,9 @@ // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at +// with the License. You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an @@ -14,15 +14,13 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.utils.testcase; +package org.apache.cloudstack.engine.subsystem.api.storage.disktype; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; +import org.springframework.stereotype.Component; -@Retention(RetentionPolicy.RUNTIME) -public @interface ComponentSetup { - String managerName(); - String setupXml(); - String log4j() default "log4j-cloud"; +@Component +public class QCOW2 extends VolumeDiskTypeBase { + public QCOW2() { + this.type = "QCOW2"; + } } - diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/Unknown.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/Unknown.java new file mode 100644 index 00000000000..6600405922b --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/Unknown.java @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.engine.subsystem.api.storage.disktype; + +public class Unknown extends VolumeDiskTypeBase { + public Unknown() { + this.type = "Unknown"; + } +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VHD.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VHD.java new file mode 100644 index 00000000000..4f40e3e9bf8 --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VHD.java @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.engine.subsystem.api.storage.disktype; + +import org.springframework.stereotype.Component; + +@Component +public class VHD extends VolumeDiskTypeBase { + public VHD() { + this.type = "VHD"; + } +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VMDK.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VMDK.java new file mode 100644 index 00000000000..da0adfb33c0 --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VMDK.java @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.engine.subsystem.api.storage.disktype; + +import org.springframework.stereotype.Component; + +@Component +public class VMDK extends VolumeDiskTypeBase { + public VMDK() { + this.type = "VMDK"; + } +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VolumeDiskType.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VolumeDiskType.java new file mode 100644 index 00000000000..e670d6387c8 --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VolumeDiskType.java @@ -0,0 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.engine.subsystem.api.storage.disktype; + +public interface VolumeDiskType { +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VolumeDiskTypeBase.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VolumeDiskTypeBase.java new file mode 100644 index 00000000000..75f78031d52 --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VolumeDiskTypeBase.java @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.engine.subsystem.api.storage.disktype; + +public class VolumeDiskTypeBase implements VolumeDiskType { + protected String type = "Unknown"; + + @Override + public boolean equals(Object that) { + if (this == that) { + return true; + } + if (that instanceof String) { + if (getType().equalsIgnoreCase((String)that)) { + return true; + } + } else if (that instanceof VolumeDiskTypeBase) { + VolumeDiskTypeBase th = (VolumeDiskTypeBase)that; + if (this.getType().equalsIgnoreCase(th.getType())) { + return true; + } + } else { + return false; + } + return false; + } + + @Override + public String toString() { + return getType(); + } + + protected String getType() { + return this.type; + } +} diff --git a/server/src/com/cloud/configuration/CloudZonesComponentLibrary.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VolumeDiskTypeHelper.java similarity index 50% rename from server/src/com/cloud/configuration/CloudZonesComponentLibrary.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VolumeDiskTypeHelper.java index 13bb16f26b7..a2b5ede3c2b 100644 --- a/server/src/com/cloud/configuration/CloudZonesComponentLibrary.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/disktype/VolumeDiskTypeHelper.java @@ -14,24 +14,32 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.configuration; +package org.apache.cloudstack.engine.subsystem.api.storage.disktype; -import com.cloud.agent.StartupCommandProcessor; -import com.cloud.agent.manager.authn.impl.BasicAgentAuthManager; +import java.util.List; -import com.cloud.hypervisor.CloudZonesStartupProcessor; -import com.cloud.network.element.CloudZonesNetworkElement; -import com.cloud.network.element.NetworkElement; +import javax.inject.Inject; +import org.springframework.stereotype.Component; - -public class CloudZonesComponentLibrary extends PremiumComponentLibrary { - - @Override - protected void populateAdapters() { - super.populateAdapters(); - addAdapter(NetworkElement.class, "CloudZones", CloudZonesNetworkElement.class); - addAdapter(StartupCommandProcessor.class, "BasicAgentAuthorizer", BasicAgentAuthManager.class); - addAdapter(StartupCommandProcessor.class, "CloudZonesStartupProcessor", CloudZonesStartupProcessor.class); - } +@Component +public class VolumeDiskTypeHelper { + + static private List diskTypes; + static final private VolumeDiskType defaultType = new Unknown(); + + @Inject + public void setDiskTypes(List diskTypes) { + VolumeDiskTypeHelper.diskTypes = diskTypes; + } + + public static VolumeDiskType getDiskType(String type) { + for (VolumeDiskType diskType : diskTypes) { + if (diskType.equals(type)) { + return diskType; + } + } + + return VolumeDiskTypeHelper.defaultType; + } } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/BaseImage.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/BaseImage.java index 633a6d54cf1..9991cedfa27 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/BaseImage.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/BaseImage.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.subsystem.api.storage.type; public class BaseImage extends VolumeTypeBase { diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/DataDisk.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/DataDisk.java index 11b40ce361b..762233e940f 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/DataDisk.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/DataDisk.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.subsystem.api.storage.type; import org.springframework.stereotype.Component; diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/Iso.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/Iso.java index 22274d5f744..43611b461b1 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/Iso.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/Iso.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.subsystem.api.storage.type; import org.springframework.stereotype.Component; diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/RootDisk.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/RootDisk.java index 96da41684a5..723d59c66cf 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/RootDisk.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/RootDisk.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.subsystem.api.storage.type; import org.springframework.stereotype.Component; diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/Unknown.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/Unknown.java index ba9a6ce88f6..6f8904a5af2 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/Unknown.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/Unknown.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.subsystem.api.storage.type; public class Unknown extends VolumeTypeBase { diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/VolumeTypeBase.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/VolumeTypeBase.java index c82961d2928..6ffd9d7c9c8 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/VolumeTypeBase.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/VolumeTypeBase.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.subsystem.api.storage.type; public class VolumeTypeBase implements VolumeType { diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/VolumeTypeHelper.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/VolumeTypeHelper.java index a02f524a730..f29dd08721f 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/VolumeTypeHelper.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/type/VolumeTypeHelper.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.subsystem.api.storage.type; import java.util.List; diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManager.java b/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManager.java index 06496454f94..37545e8f469 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManager.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManager.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.cloud.entity.api; import org.apache.cloudstack.engine.cloud.entity.api.db.VMEntityVO; diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java index a29fa9f4e92..05c39bc0e48 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.cloud.entity.api; import org.apache.cloudstack.engine.cloud.entity.api.db.VMEntityVO; diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VirtualMachineEntityImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VirtualMachineEntityImpl.java index fba899e04b5..2d915d9abf9 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VirtualMachineEntityImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VirtualMachineEntityImpl.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.cloud.entity.api; import java.lang.reflect.Method; diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/ClusterEntityImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/ClusterEntityImpl.java index 20ffcb198c2..4ce69b529c3 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/ClusterEntityImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/ClusterEntityImpl.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.datacenter.entity.api; diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManager.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManager.java index f01db7e7cc2..953dc9a2a1a 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManager.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManager.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.datacenter.entity.api; diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java index 7a792c2c495..dc5b57f24bf 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.datacenter.entity.api; import javax.inject.Inject; diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/HostEntityImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/HostEntityImpl.java index 658a389a93d..1841889e9ed 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/HostEntityImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/HostEntityImpl.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.datacenter.entity.api; import java.lang.reflect.Method; diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/ClusterDaoImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/ClusterDaoImpl.java index bd4a5ae2939..f33c6730f1a 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/ClusterDaoImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/ClusterDaoImpl.java @@ -32,109 +32,107 @@ import org.apache.cloudstack.engine.datacenter.entity.api.db.HostPodVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; - import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Grouping; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; @Component(value="EngineClusterDao") @Local(value=ClusterDao.class) public class ClusterDaoImpl extends GenericDaoBase implements ClusterDao { - private static final Logger s_logger = Logger.getLogger(ClusterDaoImpl.class); - + private static final Logger s_logger = Logger.getLogger(ClusterDaoImpl.class); + protected final SearchBuilder PodSearch; protected final SearchBuilder HyTypeWithoutGuidSearch; protected final SearchBuilder AvailHyperSearch; protected final SearchBuilder ZoneSearch; protected final SearchBuilder ZoneHyTypeSearch; protected SearchBuilder StateChangeSearch; - protected SearchBuilder UUIDSearch; - + protected SearchBuilder UUIDSearch; + private static final String GET_POD_CLUSTER_MAP_PREFIX = "SELECT pod_id, id FROM cloud.cluster WHERE cluster.id IN( "; private static final String GET_POD_CLUSTER_MAP_SUFFIX = " )"; - + @Inject protected HostPodDao _hostPodDao; - + protected ClusterDaoImpl() { super(); - + HyTypeWithoutGuidSearch = createSearchBuilder(); HyTypeWithoutGuidSearch.and("hypervisorType", HyTypeWithoutGuidSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ); HyTypeWithoutGuidSearch.and("guid", HyTypeWithoutGuidSearch.entity().getGuid(), SearchCriteria.Op.NULL); HyTypeWithoutGuidSearch.done(); - + ZoneHyTypeSearch = createSearchBuilder(); ZoneHyTypeSearch.and("hypervisorType", ZoneHyTypeSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ); ZoneHyTypeSearch.and("dataCenterId", ZoneHyTypeSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); ZoneHyTypeSearch.done(); - + PodSearch = createSearchBuilder(); PodSearch.and("pod", PodSearch.entity().getPodId(), SearchCriteria.Op.EQ); PodSearch.and("name", PodSearch.entity().getName(), SearchCriteria.Op.EQ); PodSearch.done(); - + ZoneSearch = createSearchBuilder(); ZoneSearch.and("dataCenterId", ZoneSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); ZoneSearch.groupBy(ZoneSearch.entity().getHypervisorType()); ZoneSearch.done(); - + AvailHyperSearch = createSearchBuilder(); AvailHyperSearch.and("zoneId", AvailHyperSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); AvailHyperSearch.select(null, Func.DISTINCT, AvailHyperSearch.entity().getHypervisorType()); AvailHyperSearch.done(); - - UUIDSearch = createSearchBuilder(); - UUIDSearch.and("uuid", UUIDSearch.entity().getUuid(), SearchCriteria.Op.EQ); - UUIDSearch.done(); - + + UUIDSearch = createSearchBuilder(); + UUIDSearch.and("uuid", UUIDSearch.entity().getUuid(), SearchCriteria.Op.EQ); + UUIDSearch.done(); + StateChangeSearch = createSearchBuilder(); StateChangeSearch.and("id", StateChangeSearch.entity().getId(), SearchCriteria.Op.EQ); StateChangeSearch.and("state", StateChangeSearch.entity().getState(), SearchCriteria.Op.EQ); StateChangeSearch.done(); } - + @Override public List listByZoneId(long zoneId) { SearchCriteria sc = ZoneSearch.create(); sc.setParameters("dataCenterId", zoneId); return listBy(sc); } - + @Override public List listByPodId(long podId) { SearchCriteria sc = PodSearch.create(); sc.setParameters("pod", podId); - + return listBy(sc); } - + @Override public ClusterVO findBy(String name, long podId) { SearchCriteria sc = PodSearch.create(); sc.setParameters("pod", podId); sc.setParameters("name", name); - + return findOneBy(sc); } - + @Override public List listByHyTypeWithoutGuid(String hyType) { SearchCriteria sc = HyTypeWithoutGuidSearch.create(); sc.setParameters("hypervisorType", hyType); - + return listBy(sc); } - + @Override public List listByDcHyType(long dcId, String hyType) { SearchCriteria sc = ZoneHyTypeSearch.create(); @@ -142,7 +140,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C sc.setParameters("hypervisorType", hyType); return listBy(sc); } - + @Override public List getAvailableHypervisorInZone(Long zoneId) { SearchCriteria sc = AvailHyperSearch.create(); @@ -154,13 +152,13 @@ public class ClusterDaoImpl extends GenericDaoBase implements C for (ClusterVO cluster : clusters) { hypers.add(cluster.getHypervisorType()); } - + return hypers; } - + @Override public Map> getPodClusterIdMap(List clusterIds){ - Transaction txn = Transaction.currentTxn(); + Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; Map> result = new HashMap>(); @@ -173,20 +171,20 @@ public class ClusterDaoImpl extends GenericDaoBase implements C sql.delete(sql.length()-1, sql.length()); sql.append(GET_POD_CLUSTER_MAP_SUFFIX); } - + pstmt = txn.prepareAutoCloseStatement(sql.toString()); ResultSet rs = pstmt.executeQuery(); while (rs.next()) { - Long podId = rs.getLong(1); - Long clusterIdInPod = rs.getLong(2); + Long podId = rs.getLong(1); + Long clusterIdInPod = rs.getLong(2); if(result.containsKey(podId)){ - List clusterList = result.get(podId); - clusterList.add(clusterIdInPod); - result.put(podId, clusterList); + List clusterList = result.get(podId); + clusterList.add(clusterIdInPod); + result.put(podId, clusterList); }else{ - List clusterList = new ArrayList(); - clusterList.add(clusterIdInPod); - result.put(podId, clusterList); + List clusterList = new ArrayList(); + clusterList.add(clusterIdInPod); + result.put(podId, clusterList); } } return result; @@ -196,49 +194,49 @@ public class ClusterDaoImpl extends GenericDaoBase implements C throw new CloudRuntimeException("Caught: " + GET_POD_CLUSTER_MAP_PREFIX, e); } } - + @Override public List listDisabledClusters(long zoneId, Long podId) { - GenericSearchBuilder clusterIdSearch = createSearchBuilder(Long.class); - clusterIdSearch.selectField(clusterIdSearch.entity().getId()); - clusterIdSearch.and("dataCenterId", clusterIdSearch.entity().getDataCenterId(), Op.EQ); - if(podId != null){ - clusterIdSearch.and("podId", clusterIdSearch.entity().getPodId(), Op.EQ); - } - clusterIdSearch.and("allocationState", clusterIdSearch.entity().getAllocationState(), Op.EQ); - clusterIdSearch.done(); + GenericSearchBuilder clusterIdSearch = createSearchBuilder(Long.class); + clusterIdSearch.selectField(clusterIdSearch.entity().getId()); + clusterIdSearch.and("dataCenterId", clusterIdSearch.entity().getDataCenterId(), Op.EQ); + if(podId != null){ + clusterIdSearch.and("podId", clusterIdSearch.entity().getPodId(), Op.EQ); + } + clusterIdSearch.and("allocationState", clusterIdSearch.entity().getAllocationState(), Op.EQ); + clusterIdSearch.done(); - - SearchCriteria sc = clusterIdSearch.create(); + + SearchCriteria sc = clusterIdSearch.create(); sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId); if (podId != null) { - sc.addAnd("podId", SearchCriteria.Op.EQ, podId); - } + sc.addAnd("podId", SearchCriteria.Op.EQ, podId); + } sc.addAnd("allocationState", SearchCriteria.Op.EQ, Grouping.AllocationState.Disabled); return customSearch(sc, null); } @Override public List listClustersWithDisabledPods(long zoneId) { - - GenericSearchBuilder disabledPodIdSearch = _hostPodDao.createSearchBuilder(Long.class); - disabledPodIdSearch.selectField(disabledPodIdSearch.entity().getId()); - disabledPodIdSearch.and("dataCenterId", disabledPodIdSearch.entity().getDataCenterId(), Op.EQ); - disabledPodIdSearch.and("allocationState", disabledPodIdSearch.entity().getAllocationState(), Op.EQ); - GenericSearchBuilder clusterIdSearch = createSearchBuilder(Long.class); - clusterIdSearch.selectField(clusterIdSearch.entity().getId()); - clusterIdSearch.join("disabledPodIdSearch", disabledPodIdSearch, clusterIdSearch.entity().getPodId(), disabledPodIdSearch.entity().getId(), JoinBuilder.JoinType.INNER); - clusterIdSearch.done(); + GenericSearchBuilder disabledPodIdSearch = _hostPodDao.createSearchBuilder(Long.class); + disabledPodIdSearch.selectField(disabledPodIdSearch.entity().getId()); + disabledPodIdSearch.and("dataCenterId", disabledPodIdSearch.entity().getDataCenterId(), Op.EQ); + disabledPodIdSearch.and("allocationState", disabledPodIdSearch.entity().getAllocationState(), Op.EQ); - - SearchCriteria sc = clusterIdSearch.create(); + GenericSearchBuilder clusterIdSearch = createSearchBuilder(Long.class); + clusterIdSearch.selectField(clusterIdSearch.entity().getId()); + clusterIdSearch.join("disabledPodIdSearch", disabledPodIdSearch, clusterIdSearch.entity().getPodId(), disabledPodIdSearch.entity().getId(), JoinBuilder.JoinType.INNER); + clusterIdSearch.done(); + + + SearchCriteria sc = clusterIdSearch.create(); sc.setJoinParameters("disabledPodIdSearch", "dataCenterId", zoneId); sc.setJoinParameters("disabledPodIdSearch", "allocationState", Grouping.AllocationState.Disabled); - + return customSearch(sc, null); } - + @Override public boolean remove(Long id) { Transaction txn = Transaction.currentTxn(); @@ -246,30 +244,30 @@ public class ClusterDaoImpl extends GenericDaoBase implements C ClusterVO cluster = createForUpdate(); cluster.setName(null); cluster.setGuid(null); - + update(id, cluster); boolean result = super.remove(id); txn.commit(); return result; } - - - @Override - public ClusterVO findByUUID(String uuid) { - SearchCriteria sc = UUIDSearch.create(); - sc.setParameters("uuid", uuid); + + + @Override + public ClusterVO findByUUID(String uuid) { + SearchCriteria sc = UUIDSearch.create(); + sc.setParameters("uuid", uuid); return findOneBy(sc); - } + } - @Override - public boolean updateState(State currentState, Event event, State nextState, DataCenterResourceEntity clusterEntity, Object data) { - - ClusterVO vo = findById(clusterEntity.getId()); - - Date oldUpdatedTime = vo.getLastUpdated(); + @Override + public boolean updateState(State currentState, Event event, State nextState, DataCenterResourceEntity clusterEntity, Object data) { - SearchCriteria sc = StateChangeSearch.create(); + ClusterVO vo = findById(clusterEntity.getId()); + + Date oldUpdatedTime = vo.getLastUpdated(); + + SearchCriteria sc = StateChangeSearch.create(); sc.setParameters("id", vo.getId()); sc.setParameters("state", currentState); @@ -277,14 +275,14 @@ public class ClusterDaoImpl extends GenericDaoBase implements C builder.set(vo, "state", nextState); builder.set(vo, "lastUpdated", new Date()); - int rows = update((ClusterVO) vo, sc); - + int rows = update(vo, sc); + if (rows == 0 && s_logger.isDebugEnabled()) { - ClusterVO dbCluster = findByIdIncludingRemoved(vo.getId()); + ClusterVO dbCluster = findByIdIncludingRemoved(vo.getId()); if (dbCluster != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); str.append(": DB Data={id=").append(dbCluster.getId()).append("; state=").append(dbCluster.getState()).append(";updatedTime=") - .append(dbCluster.getLastUpdated()); + .append(dbCluster.getLastUpdated()); str.append(": New Data={id=").append(vo.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatedTime=").append(vo.getLastUpdated()); str.append(": stale Data={id=").append(vo.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatedTime=").append(oldUpdatedTime); } else { @@ -292,7 +290,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C } } return rows > 0; - - } - + + } + } diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/DataCenterDaoImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/DataCenterDaoImpl.java index 61a4bb3e4b1..3a0d2c89a0d 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/DataCenterDaoImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/DataCenterDaoImpl.java @@ -25,15 +25,12 @@ import javax.persistence.TableGenerator; import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity; import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity.State; import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity.State.Event; -import org.apache.cloudstack.engine.datacenter.entity.api.ZoneEntity; import org.apache.cloudstack.engine.datacenter.entity.api.db.DataCenterVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.org.Grouping; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; @@ -63,47 +60,47 @@ public class DataCenterDaoImpl extends GenericDaoBase implem protected SearchBuilder TokenSearch; protected SearchBuilder StateChangeSearch; protected SearchBuilder UUIDSearch; - + protected long _prefix; protected Random _rand = new Random(System.currentTimeMillis()); protected TableGenerator _tgMacAddress; - + @Inject protected DcDetailsDao _detailsDao; @Override public DataCenterVO findByName(String name) { - SearchCriteria sc = NameSearch.create(); - sc.setParameters("name", name); + SearchCriteria sc = NameSearch.create(); + sc.setParameters("name", name); return findOneBy(sc); } @Override public DataCenterVO findByUUID(String uuid) { - SearchCriteria sc = UUIDSearch.create(); - sc.setParameters("uuid", uuid); + SearchCriteria sc = UUIDSearch.create(); + sc.setParameters("uuid", uuid); return findOneBy(sc); } - + @Override public DataCenterVO findByToken(String zoneToken){ - SearchCriteria sc = TokenSearch.create(); - sc.setParameters("zoneToken", zoneToken); + SearchCriteria sc = TokenSearch.create(); + sc.setParameters("zoneToken", zoneToken); return findOneBy(sc); } - + @Override public List findZonesByDomainId(Long domainId){ - SearchCriteria sc = ListZonesByDomainIdSearch.create(); - sc.setParameters("domainId", domainId); + SearchCriteria sc = ListZonesByDomainIdSearch.create(); + sc.setParameters("domainId", domainId); return listBy(sc); } - + @Override public List findZonesByDomainId(Long domainId, String keyword){ - SearchCriteria sc = ListZonesByDomainIdSearch.create(); - sc.setParameters("domainId", domainId); - if (keyword != null) { + SearchCriteria sc = ListZonesByDomainIdSearch.create(); + sc.setParameters("domainId", domainId); + if (keyword != null) { SearchCriteria ssc = createSearchCriteria(); ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); ssc.addOr("description", SearchCriteria.Op.LIKE, "%" + keyword + "%"); @@ -111,12 +108,12 @@ public class DataCenterDaoImpl extends GenericDaoBase implem } return listBy(sc); } - + @Override public List findChildZones(Object[] ids, String keyword){ - SearchCriteria sc = ChildZonesSearch.create(); - sc.setParameters("domainid", ids); - if (keyword != null) { + SearchCriteria sc = ChildZonesSearch.create(); + sc.setParameters("domainid", ids); + if (keyword != null) { SearchCriteria ssc = createSearchCriteria(); ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); ssc.addOr("description", SearchCriteria.Op.LIKE, "%" + keyword + "%"); @@ -124,28 +121,28 @@ public class DataCenterDaoImpl extends GenericDaoBase implem } return listBy(sc); } - + @Override public List listPublicZones(String keyword){ - SearchCriteria sc = PublicZonesSearch.create(); - if (keyword != null) { + SearchCriteria sc = PublicZonesSearch.create(); + if (keyword != null) { SearchCriteria ssc = createSearchCriteria(); ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); ssc.addOr("description", SearchCriteria.Op.LIKE, "%" + keyword + "%"); sc.addAnd("name", SearchCriteria.Op.SC, ssc); } - //sc.setParameters("domainId", domainId); + //sc.setParameters("domainId", domainId); return listBy(sc); } - + @Override public List findByKeyword(String keyword){ - SearchCriteria ssc = createSearchCriteria(); - ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); - ssc.addOr("description", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + SearchCriteria ssc = createSearchCriteria(); + ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + ssc.addOr("description", SearchCriteria.Op.LIKE, "%" + keyword + "%"); return listBy(ssc); } - + @Override public String[] getNextAvailableMacAddressPair(long id) { @@ -155,7 +152,7 @@ public class DataCenterDaoImpl extends GenericDaoBase implem @Override public String[] getNextAvailableMacAddressPair(long id, long mask) { SequenceFetcher fetch = SequenceFetcher.getInstance(); - + long seq = fetch.getNextSequence(Long.class, _tgMacAddress, id); seq = seq | _prefix | ((id & 0x7f) << 32); seq |= mask; @@ -172,49 +169,49 @@ public class DataCenterDaoImpl extends GenericDaoBase implem if (!super.configure(name, params)) { return false; } - + String value = (String)params.get("mac.address.prefix"); _prefix = (long)NumbersUtil.parseInt(value, 06) << 40; return true; } - + protected DataCenterDaoImpl() { super(); NameSearch = createSearchBuilder(); NameSearch.and("name", NameSearch.entity().getName(), SearchCriteria.Op.EQ); NameSearch.done(); - + ListZonesByDomainIdSearch = createSearchBuilder(); ListZonesByDomainIdSearch.and("domainId", ListZonesByDomainIdSearch.entity().getDomainId(), SearchCriteria.Op.EQ); ListZonesByDomainIdSearch.done(); - + PublicZonesSearch = createSearchBuilder(); PublicZonesSearch.and("domainId", PublicZonesSearch.entity().getDomainId(), SearchCriteria.Op.NULL); PublicZonesSearch.done(); - + ChildZonesSearch = createSearchBuilder(); ChildZonesSearch.and("domainid", ChildZonesSearch.entity().getDomainId(), SearchCriteria.Op.IN); ChildZonesSearch.done(); - + DisabledZonesSearch = createSearchBuilder(); DisabledZonesSearch.and("allocationState", DisabledZonesSearch.entity().getAllocationState(), SearchCriteria.Op.EQ); DisabledZonesSearch.done(); - + TokenSearch = createSearchBuilder(); TokenSearch.and("zoneToken", TokenSearch.entity().getZoneToken(), SearchCriteria.Op.EQ); TokenSearch.done(); - + StateChangeSearch = createSearchBuilder(); StateChangeSearch.and("id", StateChangeSearch.entity().getId(), SearchCriteria.Op.EQ); StateChangeSearch.and("state", StateChangeSearch.entity().getState(), SearchCriteria.Op.EQ); StateChangeSearch.done(); - + UUIDSearch = createSearchBuilder(); UUIDSearch.and("uuid", UUIDSearch.entity().getUuid(), SearchCriteria.Op.EQ); UUIDSearch.done(); - + _tgMacAddress = _tgs.get("macAddress"); assert _tgMacAddress != null : "Couldn't get mac address table generator"; } @@ -231,7 +228,7 @@ public class DataCenterDaoImpl extends GenericDaoBase implem txn.commit(); return persisted; } - + @Override public void loadDetails(DataCenterVO zone) { Map details =_detailsDao.findDetails(zone.getId()); @@ -246,25 +243,25 @@ public class DataCenterDaoImpl extends GenericDaoBase implem } _detailsDao.persist(zone.getId(), details); } - + @Override public List listDisabledZones(){ - SearchCriteria sc = DisabledZonesSearch.create(); - sc.setParameters("allocationState", Grouping.AllocationState.Disabled); - - List dcs = listBy(sc); - - return dcs; + SearchCriteria sc = DisabledZonesSearch.create(); + sc.setParameters("allocationState", Grouping.AllocationState.Disabled); + + List dcs = listBy(sc); + + return dcs; } - + @Override public List listEnabledZones(){ - SearchCriteria sc = DisabledZonesSearch.create(); - sc.setParameters("allocationState", Grouping.AllocationState.Enabled); - - List dcs = listBy(sc); - - return dcs; + SearchCriteria sc = DisabledZonesSearch.create(); + sc.setParameters("allocationState", Grouping.AllocationState.Enabled); + + List dcs = listBy(sc); + + return dcs; } @Override @@ -277,36 +274,36 @@ public class DataCenterDaoImpl extends GenericDaoBase implem Long dcId = Long.parseLong(tokenOrIdOrName); return findById(dcId); } catch (NumberFormatException nfe) { - + } } } return result; } - + @Override public boolean remove(Long id) { Transaction txn = Transaction.currentTxn(); txn.start(); DataCenterVO zone = createForUpdate(); zone.setName(null); - + update(id, zone); boolean result = super.remove(id); txn.commit(); return result; } - - @Override - public boolean updateState(State currentState, Event event, State nextState, DataCenterResourceEntity zoneEntity, Object data) { - - DataCenterVO vo = findById(zoneEntity.getId()); - - Date oldUpdatedTime = vo.getLastUpdated(); - SearchCriteria sc = StateChangeSearch.create(); + @Override + public boolean updateState(State currentState, Event event, State nextState, DataCenterResourceEntity zoneEntity, Object data) { + + DataCenterVO vo = findById(zoneEntity.getId()); + + Date oldUpdatedTime = vo.getLastUpdated(); + + SearchCriteria sc = StateChangeSearch.create(); sc.setParameters("id", vo.getId()); sc.setParameters("state", currentState); @@ -314,14 +311,14 @@ public class DataCenterDaoImpl extends GenericDaoBase implem builder.set(vo, "state", nextState); builder.set(vo, "lastUpdated", new Date()); - int rows = update((DataCenterVO) vo, sc); - + int rows = update(vo, sc); + if (rows == 0 && s_logger.isDebugEnabled()) { - DataCenterVO dbDC = findByIdIncludingRemoved(vo.getId()); + DataCenterVO dbDC = findByIdIncludingRemoved(vo.getId()); if (dbDC != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); str.append(": DB Data={id=").append(dbDC.getId()).append("; state=").append(dbDC.getState()).append(";updatedTime=") - .append(dbDC.getLastUpdated()); + .append(dbDC.getLastUpdated()); str.append(": New Data={id=").append(vo.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatedTime=").append(vo.getLastUpdated()); str.append(": stale Data={id=").append(vo.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatedTime=").append(oldUpdatedTime); } else { @@ -329,8 +326,8 @@ public class DataCenterDaoImpl extends GenericDaoBase implem } } return rows > 0; - - } - - + + } + + } diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostDaoImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostDaoImpl.java index 41e6785a95a..f33bc21256e 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostDaoImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostDaoImpl.java @@ -31,8 +31,6 @@ import javax.persistence.TableGenerator; import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity; import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity.State; -import org.apache.cloudstack.engine.datacenter.entity.api.db.ClusterVO; -import org.apache.cloudstack.engine.datacenter.entity.api.db.DataCenterVO; import org.apache.cloudstack.engine.datacenter.entity.api.db.HostVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -40,21 +38,17 @@ import org.springframework.stereotype.Component; import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.host.HostTagVO; - import com.cloud.host.Status; -import com.cloud.host.Status.Event; import com.cloud.info.RunningHostCountInfo; import com.cloud.org.Managed; import com.cloud.resource.ResourceState; import com.cloud.utils.DateUtil; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.Attribute; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; -import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; @@ -104,7 +98,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected final SearchBuilder ManagedRoutingServersSearch; protected final SearchBuilder SecondaryStorageVMSearch; protected SearchBuilder StateChangeSearch; - + protected SearchBuilder UUIDSearch; protected final GenericSearchBuilder HostsInStatusSearch; @@ -119,7 +113,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Inject protected HostDetailsDao _detailsDao; @Inject protected HostTagsDao _hostTagsDao; @Inject protected ClusterDao _clusterDao; - + public HostDaoImpl() { @@ -148,7 +142,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao TypeDcSearch.and("type", TypeDcSearch.entity().getType(), SearchCriteria.Op.EQ); TypeDcSearch.and("dc", TypeDcSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); TypeDcSearch.done(); - + SecondaryStorageVMSearch = createSearchBuilder(); SecondaryStorageVMSearch.and("type", SecondaryStorageVMSearch.entity().getType(), SearchCriteria.Op.EQ); SecondaryStorageVMSearch.and("dc", SecondaryStorageVMSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); @@ -161,14 +155,14 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao TypeDcStatusSearch.and("status", TypeDcStatusSearch.entity().getStatus(), SearchCriteria.Op.EQ); TypeDcStatusSearch.and("resourceState", TypeDcStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ); TypeDcStatusSearch.done(); - + TypeClusterStatusSearch = createSearchBuilder(); TypeClusterStatusSearch.and("type", TypeClusterStatusSearch.entity().getType(), SearchCriteria.Op.EQ); TypeClusterStatusSearch.and("cluster", TypeClusterStatusSearch.entity().getClusterId(), SearchCriteria.Op.EQ); TypeClusterStatusSearch.and("status", TypeClusterStatusSearch.entity().getStatus(), SearchCriteria.Op.EQ); TypeClusterStatusSearch.and("resourceState", TypeClusterStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ); TypeClusterStatusSearch.done(); - + IdStatusSearch = createSearchBuilder(); IdStatusSearch.and("id", IdStatusSearch.entity().getId(), SearchCriteria.Op.EQ); IdStatusSearch.and("states", IdStatusSearch.entity().getStatus(), SearchCriteria.Op.IN); @@ -214,7 +208,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao StatusSearch = createSearchBuilder(); StatusSearch.and("status", StatusSearch.entity().getStatus(), SearchCriteria.Op.IN); StatusSearch.done(); - + ResourceStateSearch = createSearchBuilder(); ResourceStateSearch.and("resourceState", ResourceStateSearch.entity().getResourceState(), SearchCriteria.Op.IN); ResourceStateSearch.done(); @@ -299,7 +293,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao ManagedRoutingServersSearch.and("server", ManagedRoutingServersSearch.entity().getManagementServerId(), SearchCriteria.Op.NNULL); ManagedRoutingServersSearch.and("type", ManagedRoutingServersSearch.entity().getType(), SearchCriteria.Op.EQ); ManagedRoutingServersSearch.done(); - + RoutingSearch = createSearchBuilder(); RoutingSearch.and("type", RoutingSearch.entity().getType(), SearchCriteria.Op.EQ); RoutingSearch.done(); @@ -310,11 +304,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao _resourceStateAttr = _allAttributes.get("resourceState"); assert (_statusAttr != null && _msIdAttr != null && _pingTimeAttr != null) : "Couldn't find one of these attributes"; - - UUIDSearch = createSearchBuilder(); - UUIDSearch.and("uuid", UUIDSearch.entity().getUuid(), SearchCriteria.Op.EQ); - UUIDSearch.done(); - + + UUIDSearch = createSearchBuilder(); + UUIDSearch.and("uuid", UUIDSearch.entity().getUuid(), SearchCriteria.Op.EQ); + UUIDSearch.done(); + StateChangeSearch = createSearchBuilder(); StateChangeSearch.and("id", StateChangeSearch.entity().getId(), SearchCriteria.Op.EQ); StateChangeSearch.and("state", StateChangeSearch.entity().getState(), SearchCriteria.Op.EQ); @@ -331,52 +325,52 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao List hosts = listBy(sc); return hosts.size(); } - + @Override public HostVO findByGuid(String guid) { SearchCriteria sc = GuidSearch.create("guid", guid); return findOneBy(sc); } - + @Override @DB public List findAndUpdateDirectAgentToLoad(long lastPingSecondsAfter, Long limit, long managementServerId) { Transaction txn = Transaction.currentTxn(); txn.start(); - SearchCriteria sc = UnmanagedDirectConnectSearch.create(); - sc.setParameters("lastPinged", lastPingSecondsAfter); + SearchCriteria sc = UnmanagedDirectConnectSearch.create(); + sc.setParameters("lastPinged", lastPingSecondsAfter); //sc.setParameters("resourceStates", ResourceState.ErrorInMaintenance, ResourceState.Maintenance, ResourceState.PrepareForMaintenance, ResourceState.Disabled); sc.setJoinParameters("ClusterManagedSearch", "managed", Managed.ManagedState.Managed); List hosts = lockRows(sc, new Filter(HostVO.class, "clusterId", true, 0L, limit), true); - + for (HostVO host : hosts) { host.setManagementServerId(managementServerId); update(host.getId(), host); } - + txn.commit(); - + return hosts; } - + @Override @DB public List findAndUpdateApplianceToLoad(long lastPingSecondsAfter, long managementServerId) { - Transaction txn = Transaction.currentTxn(); - - txn.start(); - SearchCriteria sc = UnmanagedApplianceSearch.create(); - sc.setParameters("lastPinged", lastPingSecondsAfter); + Transaction txn = Transaction.currentTxn(); + + txn.start(); + SearchCriteria sc = UnmanagedApplianceSearch.create(); + sc.setParameters("lastPinged", lastPingSecondsAfter); sc.setParameters("types", Type.ExternalDhcp, Type.ExternalFirewall, Type.ExternalLoadBalancer, Type.PxeServer, Type.TrafficMonitor, Type.L2Networking); - List hosts = lockRows(sc, null, true); - - for (HostVO host : hosts) { - host.setManagementServerId(managementServerId); - update(host.getId(), host); - } - - txn.commit(); - - return hosts; + List hosts = lockRows(sc, null, true); + + for (HostVO host : hosts) { + host.setManagementServerId(managementServerId); + update(host.getId(), host); + } + + txn.commit(); + + return hosts; } @Override @@ -402,7 +396,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao ub = getUpdateBuilder(host); update(ub, sc, null); } - + @Override public List listByHostTag(Host.Type type, Long clusterId, Long podId, long dcId, String hostTag) { @@ -435,8 +429,8 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } - - + + @Override public List listAllUpAndEnabledNonHAHosts(Type type, Long clusterId, Long podId, long dcId, String haTag) { SearchBuilder hostTagSearch = null; @@ -446,42 +440,42 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao hostTagSearch.or("tagNull", hostTagSearch.entity().getTag(), SearchCriteria.Op.NULL); hostTagSearch.cp(); } - + SearchBuilder hostSearch = createSearchBuilder(); - + hostSearch.and("type", hostSearch.entity().getType(), SearchCriteria.Op.EQ); hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.EQ); hostSearch.and("podId", hostSearch.entity().getPodId(), SearchCriteria.Op.EQ); hostSearch.and("zoneId", hostSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); hostSearch.and("status", hostSearch.entity().getStatus(), SearchCriteria.Op.EQ); hostSearch.and("resourceState", hostSearch.entity().getResourceState(), SearchCriteria.Op.EQ); - + if (haTag != null && !haTag.isEmpty()) { hostSearch.join("hostTagSearch", hostTagSearch, hostSearch.entity().getId(), hostTagSearch.entity().getHostId(), JoinBuilder.JoinType.LEFTOUTER); } SearchCriteria sc = hostSearch.create(); - + if (haTag != null && !haTag.isEmpty()) { sc.setJoinParameters("hostTagSearch", "tag", haTag); } - + if (type != null) { sc.setParameters("type", type); } - + if (clusterId != null) { sc.setParameters("clusterId", clusterId); } - + if (podId != null) { sc.setParameters("podId", podId); } - + sc.setParameters("zoneId", dcId); sc.setParameters("status", Status.Up); sc.setParameters("resourceState", ResourceState.Enabled); - + return listBy(sc); } @@ -528,7 +522,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } return result; } - + @Override public void saveDetails(HostVO host) { Map details = host.getDetails(); @@ -650,12 +644,12 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } - @Override - public boolean updateState(State currentState, DataCenterResourceEntity.State.Event event, State nextState, DataCenterResourceEntity hostEntity, Object data) { - HostVO vo = findById(hostEntity.getId()); - Date oldUpdatedTime = vo.getLastUpdated(); + @Override + public boolean updateState(State currentState, DataCenterResourceEntity.State.Event event, State nextState, DataCenterResourceEntity hostEntity, Object data) { + HostVO vo = findById(hostEntity.getId()); + Date oldUpdatedTime = vo.getLastUpdated(); - SearchCriteria sc = StateChangeSearch.create(); + SearchCriteria sc = StateChangeSearch.create(); sc.setParameters("id", hostEntity.getId()); sc.setParameters("state", currentState); @@ -663,14 +657,14 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao builder.set(vo, "state", nextState); builder.set(vo, "lastUpdated", new Date()); - int rows = update((HostVO) vo, sc); - + int rows = update(vo, sc); + if (rows == 0 && s_logger.isDebugEnabled()) { - HostVO dbHost = findByIdIncludingRemoved(vo.getId()); + HostVO dbHost = findByIdIncludingRemoved(vo.getId()); if (dbHost != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); str.append(": DB Data={id=").append(dbHost.getId()).append("; state=").append(dbHost.getState()).append(";updatedTime=") - .append(dbHost.getLastUpdated()); + .append(dbHost.getLastUpdated()); str.append(": New Data={id=").append(vo.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatedTime=").append(vo.getLastUpdated()); str.append(": stale Data={id=").append(vo.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatedTime=").append(oldUpdatedTime); } else { @@ -678,8 +672,8 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } } return rows > 0; - } - + } + @Override public boolean updateResourceState(ResourceState oldState, ResourceState.Event event, ResourceState newState, Host vo) { HostVO host = (HostVO)vo; @@ -687,41 +681,41 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sb.and("resource_state", sb.entity().getResourceState(), SearchCriteria.Op.EQ); sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); sb.done(); - + SearchCriteria sc = sb.create(); sc.setParameters("resource_state", oldState); sc.setParameters("id", host.getId()); - + UpdateBuilder ub = getUpdateBuilder(host); ub.set(host, _resourceStateAttr, newState); int result = update(ub, sc, null); assert result <= 1 : "How can this update " + result + " rows? "; - + if (state_logger.isDebugEnabled() && result == 0) { HostVO ho = findById(host.getId()); assert ho != null : "How how how? : " + host.getId(); StringBuilder str = new StringBuilder("Unable to update resource state: ["); - str.append("m = " + host.getId()); - str.append("; name = " + host.getName()); - str.append("; old state = " + oldState); - str.append("; event = " + event); - str.append("; new state = " + newState + "]"); - state_logger.debug(str.toString()); + str.append("m = " + host.getId()); + str.append("; name = " + host.getName()); + str.append("; old state = " + oldState); + str.append("; event = " + event); + str.append("; new state = " + newState + "]"); + state_logger.debug(str.toString()); } else { - StringBuilder msg = new StringBuilder("Resource state update: ["); - msg.append("id = " + host.getId()); - msg.append("; name = " + host.getName()); - msg.append("; old state = " + oldState); - msg.append("; event = " + event); - msg.append("; new state = " + newState + "]"); - state_logger.debug(msg.toString()); + StringBuilder msg = new StringBuilder("Resource state update: ["); + msg.append("id = " + host.getId()); + msg.append("; name = " + host.getName()); + msg.append("; old state = " + oldState); + msg.append("; event = " + event); + msg.append("; new state = " + newState + "]"); + state_logger.debug(msg.toString()); } - + return result > 0; } - + @Override public HostVO findByTypeNameAndZoneId(long zoneId, String name, Host.Type type) { SearchCriteria sc = TypeNameZoneSearch.create(); @@ -731,94 +725,94 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return findOneBy(sc); } - @Override - public List findHypervisorHostInCluster(long clusterId) { - SearchCriteria sc = TypeClusterStatusSearch.create(); - sc.setParameters("type", Host.Type.Routing); - sc.setParameters("cluster", clusterId); - sc.setParameters("status", Status.Up); - sc.setParameters("resourceState", ResourceState.Enabled); - - return listBy(sc); - } + @Override + public List findHypervisorHostInCluster(long clusterId) { + SearchCriteria sc = TypeClusterStatusSearch.create(); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("cluster", clusterId); + sc.setParameters("status", Status.Up); + sc.setParameters("resourceState", ResourceState.Enabled); - @Override - public List lockRows( - SearchCriteria sc, - Filter filter, boolean exclusive) { - // TODO Auto-generated method stub - return null; - } + return listBy(sc); + } - @Override - public org.apache.cloudstack.engine.datacenter.entity.api.db.HostVO lockOneRandomRow( - SearchCriteria sc, - boolean exclusive) { - // TODO Auto-generated method stub - return null; - } + @Override + public List lockRows( + SearchCriteria sc, + Filter filter, boolean exclusive) { + // TODO Auto-generated method stub + return null; + } + + @Override + public org.apache.cloudstack.engine.datacenter.entity.api.db.HostVO lockOneRandomRow( + SearchCriteria sc, + boolean exclusive) { + // TODO Auto-generated method stub + return null; + } - @Override - public List search( - SearchCriteria sc, - Filter filter) { - // TODO Auto-generated method stub - return null; - } + @Override + public List search( + SearchCriteria sc, + Filter filter) { + // TODO Auto-generated method stub + return null; + } - @Override - public List search( - SearchCriteria sc, - Filter filter, boolean enable_query_cache) { - // TODO Auto-generated method stub - return null; - } + @Override + public List search( + SearchCriteria sc, + Filter filter, boolean enable_query_cache) { + // TODO Auto-generated method stub + return null; + } - @Override - public List searchIncludingRemoved( - SearchCriteria sc, - Filter filter, Boolean lock, boolean cache) { - // TODO Auto-generated method stub - return null; - } + @Override + public List searchIncludingRemoved( + SearchCriteria sc, + Filter filter, Boolean lock, boolean cache) { + // TODO Auto-generated method stub + return null; + } - @Override - public List searchIncludingRemoved( - SearchCriteria sc, - Filter filter, Boolean lock, boolean cache, - boolean enable_query_cache) { - // TODO Auto-generated method stub - return null; - } + @Override + public List searchIncludingRemoved( + SearchCriteria sc, + Filter filter, Boolean lock, boolean cache, + boolean enable_query_cache) { + // TODO Auto-generated method stub + return null; + } - @Override - public int remove( - SearchCriteria sc) { - // TODO Auto-generated method stub - return 0; - } + @Override + public int remove( + SearchCriteria sc) { + // TODO Auto-generated method stub + return 0; + } - @Override - public int expunge(SearchCriteria sc) { - // TODO Auto-generated method stub - return 0; - } + @Override + public int expunge(SearchCriteria sc) { + // TODO Auto-generated method stub + return 0; + } - @Override - public HostVO findOneBy(SearchCriteria sc) { - // TODO Auto-generated method stub - return null; - } + @Override + public HostVO findOneBy(SearchCriteria sc) { + // TODO Auto-generated method stub + return null; + } - @Override - public HostVO findByUUID(String uuid) { - SearchCriteria sc = UUIDSearch.create(); - sc.setParameters("uuid", uuid); + @Override + public HostVO findByUUID(String uuid) { + SearchCriteria sc = UUIDSearch.create(); + sc.setParameters("uuid", uuid); return findOneBy(sc); - } + } } diff --git a/engine/orchestration/test/org/apache/cloudstack/engine/provisioning/test/ChildTestConfiguration.java b/engine/orchestration/test/org/apache/cloudstack/engine/provisioning/test/ChildTestConfiguration.java index 72f956ecf15..86cd47830e6 100644 --- a/engine/orchestration/test/org/apache/cloudstack/engine/provisioning/test/ChildTestConfiguration.java +++ b/engine/orchestration/test/org/apache/cloudstack/engine/provisioning/test/ChildTestConfiguration.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.engine.provisioning.test; diff --git a/engine/orchestration/test/org/apache/cloudstack/engine/provisioning/test/ProvisioningTest.java b/engine/orchestration/test/org/apache/cloudstack/engine/provisioning/test/ProvisioningTest.java index 70b5b93c4fb..eaff4269a0b 100644 --- a/engine/orchestration/test/org/apache/cloudstack/engine/provisioning/test/ProvisioningTest.java +++ b/engine/orchestration/test/org/apache/cloudstack/engine/provisioning/test/ProvisioningTest.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. /** * */ diff --git a/engine/orchestration/test/resource/provisioningContext.xml b/engine/orchestration/test/resource/provisioningContext.xml index a5a9560a935..6ed0ab5d472 100644 --- a/engine/orchestration/test/resource/provisioningContext.xml +++ b/engine/orchestration/test/resource/provisioningContext.xml @@ -1,3 +1,21 @@ + +--> + 4.0.0 cloud-engine Apache CloudStack Cloud Engine @@ -42,7 +42,6 @@ storage/imagemotion storage/backup storage/snapshot - storage/integration-test components-api schema network diff --git a/engine/service/pom.xml b/engine/service/pom.xml index c39f0e76133..cd4e9531d28 100644 --- a/engine/service/pom.xml +++ b/engine/service/pom.xml @@ -1,4 +1,21 @@ - + diff --git a/engine/service/src/main/webapp/WEB-INF/beans.xml b/engine/service/src/main/webapp/WEB-INF/beans.xml old mode 100755 new mode 100644 index 4d20638bb82..e5bcb88951d --- a/engine/service/src/main/webapp/WEB-INF/beans.xml +++ b/engine/service/src/main/webapp/WEB-INF/beans.xml @@ -1,3 +1,21 @@ + diff --git a/engine/service/src/main/webapp/WEB-INF/web.xml b/engine/service/src/main/webapp/WEB-INF/web.xml index 71c1ef38329..6b8648a5b33 100644 --- a/engine/service/src/main/webapp/WEB-INF/web.xml +++ b/engine/service/src/main/webapp/WEB-INF/web.xml @@ -1,3 +1,21 @@ + diff --git a/engine/service/src/main/webapp/index.jsp b/engine/service/src/main/webapp/index.jsp index 6f07b72e67e..6b26cc21c4d 100644 --- a/engine/service/src/main/webapp/index.jsp +++ b/engine/service/src/main/webapp/index.jsp @@ -1,3 +1,21 @@ +

Hello World!

diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageDataFactoryImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageDataFactoryImpl.java deleted file mode 100644 index 2fbe616fd1e..00000000000 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageDataFactoryImpl.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.storage.datastore.DataStoreManager; -import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.db.ImageDataDao; -import org.apache.cloudstack.storage.image.db.ImageDataVO; -import org.apache.cloudstack.storage.image.store.TemplateObject; -import org.springframework.stereotype.Component; - -@Component -public class ImageDataFactoryImpl implements ImageDataFactory { - @Inject - ImageDataDao imageDataDao; - @Inject - ObjectInDataStoreManager objMap; - @Inject - DataStoreManager storeMgr; - @Override - public TemplateInfo getTemplate(long templateId, DataStore store) { - ObjectInDataStoreVO obj = objMap.findObject(templateId, DataObjectType.TEMPLATE, store.getId(), store.getRole()); - if (obj == null) { - return null; - } - ImageDataVO templ = imageDataDao.findById(templateId); - TemplateObject tmpl = new TemplateObject(templ, store); - return tmpl; - } -} diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageServiceImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageServiceImpl.java index 99b57e87733..4b6583165ed 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageServiceImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageServiceImpl.java @@ -20,141 +20,75 @@ package org.apache.cloudstack.storage.image; import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.framework.async.AsyncCallFuture; -import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.framework.async.AsyncRpcConext; -import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.store.TemplateObject; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; -import org.apache.log4j.Logger; +import org.apache.cloudstack.engine.cloud.entity.api.TemplateEntity; +import org.apache.cloudstack.storage.EndPoint; +import org.apache.cloudstack.storage.image.downloader.ImageDownloader; +import org.apache.cloudstack.storage.image.manager.ImageDataStoreManager; +import org.apache.cloudstack.storage.image.provider.ImageDataStoreProviderManager; +import org.apache.cloudstack.storage.image.store.ImageDataStore; import org.springframework.stereotype.Component; -import com.cloud.utils.fsm.NoTransitionException; - @Component public class ImageServiceImpl implements ImageService { - private static final Logger s_logger = Logger.getLogger(ImageServiceImpl.class); @Inject - ObjectInDataStoreManager objectInDataStoreMgr; - - class CreateTemplateContext extends AsyncRpcConext { - final TemplateInfo srcTemplate; - final TemplateInfo templateOnStore; - final AsyncCallFuture future; - final ObjectInDataStoreVO obj; - public CreateTemplateContext(AsyncCompletionCallback callback, TemplateInfo srcTemplate, - TemplateInfo templateOnStore, - AsyncCallFuture future, - ObjectInDataStoreVO obj) { - super(callback); - this.srcTemplate = srcTemplate; - this.templateOnStore = templateOnStore; - this.future = future; - this.obj = obj; - } + ImageDataStoreProviderManager imageStoreProviderMgr; + + public ImageServiceImpl() { } @Override - public AsyncCallFuture createTemplateAsync( - TemplateInfo template, DataStore store) { - TemplateObject to = (TemplateObject) template; - AsyncCallFuture future = new AsyncCallFuture(); - try { - to.stateTransit(TemplateEvent.CreateRequested); - } catch (NoTransitionException e) { - s_logger.debug("Failed to transit state:", e); - CommandResult result = new CommandResult(); - result.setResult(e.toString()); - future.complete(result); - return future; - } - - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject(template.getId(), template.getType(), store.getId(), store.getRole()); - TemplateInfo templateOnStore = null; - if (obj == null) { - templateOnStore = objectInDataStoreMgr.create(template, store); - } else { - CommandResult result = new CommandResult(); - result.setResult("duplicate template on the storage"); - future.complete(result); - return future; - } - - try { - objectInDataStoreMgr.update(templateOnStore, Event.CreateOnlyRequested); - } catch (NoTransitionException e) { - s_logger.debug("failed to transit", e); - CommandResult result = new CommandResult(); - result.setResult(e.toString()); - future.complete(result); - return future; - } - CreateTemplateContext context = new CreateTemplateContext(null, - template, templateOnStore, - future, - obj); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); - caller.setCallback(caller.getTarget().createTemplateCallback(null, null)) - .setContext(context); - store.getDriver().createAsync(templateOnStore, caller); - return future; + public TemplateEntity registerTemplate(long templateId, long imageStoreId) { + ImageDataStore ids = imageStoreProviderMgr.getDataStore(imageStoreId); + TemplateObject to = ids.registerTemplate(templateId); + return new TemplateEntityImpl(to); } - - protected Void createTemplateCallback(AsyncCallbackDispatcher callback, - CreateTemplateContext context) { - - TemplateInfo templateOnStore = context.templateOnStore; - TemplateObject template = (TemplateObject)context.srcTemplate; - AsyncCallFuture future = context.future; - CommandResult result = new CommandResult(); - - CreateCmdResult callbackResult = callback.getResult(); - if (callbackResult.isFailed()) { - try { - objectInDataStoreMgr.update(templateOnStore, Event.OperationFailed); - } catch (NoTransitionException e) { - s_logger.debug("failed to transit state", e); - } - result.setResult(callbackResult.getResult()); - future.complete(result); - return null; - } - - ObjectInDataStoreVO obj = context.obj; - obj.setInstallPath(callbackResult.getPath()); - - try { - objectInDataStoreMgr.update(templateOnStore, Event.OperationSuccessed); - } catch (NoTransitionException e) { - s_logger.debug("Failed to transit state", e); - result.setResult(e.toString()); - future.complete(result); - return null; - } - - template.setImageStoreId(templateOnStore.getDataStore().getId()); - try { - template.stateTransit(TemplateEvent.OperationSucceeded); - } catch (NoTransitionException e) { - s_logger.debug("Failed to transit state", e); - result.setResult(e.toString()); - future.complete(result); - return null; - } - - future.complete(result); + + @Override + public boolean deleteTemplate(long templateId) { + // TODO Auto-generated method stub + return false; + } + + @Override + public long registerIso(String isoUrl, long accountId) { + // TODO Auto-generated method stub + return 0; + } + + @Override + public boolean deleteIso(long isoId) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean revokeTemplateAccess(long templateId, long endpointId) { + // TODO Auto-generated method stub + return false; + } + + @Override + public String grantIsoAccess(long isoId, long endpointId) { + // TODO Auto-generated method stub return null; } @Override - public AsyncCallFuture deleteTemplateAsync( - TemplateInfo template) { + public boolean revokeIsoAccess(long isoId, long endpointId) { // TODO Auto-generated method stub - return null; + return false; + } + + @Override + public TemplateEntity getTemplateEntity(long templateId) { + ImageDataStore dataStore = imageStoreProviderMgr.getDataStoreFromTemplateId(templateId); + TemplateObject to = dataStore.getTemplate(templateId); + return new TemplateEntityImpl(to); + } + + @Override + public boolean grantTemplateAccess(TemplateInfo template, EndPoint endpointId) { + // TODO Auto-generated method stub + return true; } } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateObject.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateObject.java new file mode 100644 index 00000000000..367302d8445 --- /dev/null +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateObject.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.image; + +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskTypeHelper; +import org.apache.cloudstack.storage.image.db.ImageDataVO; +import org.apache.cloudstack.storage.image.store.ImageDataStoreInfo; + +public class TemplateObject implements TemplateInfo { + private ImageDataVO imageVO; + private ImageDataStoreInfo dataStore; + + public TemplateObject(ImageDataVO template, ImageDataStoreInfo dataStore) { + this.imageVO = template; + this.dataStore = dataStore; + } + + @Override + public ImageDataStoreInfo getDataStore() { + return this.dataStore; + } + + @Override + public long getId() { + return this.imageVO.getId(); + } + + @Override + public VolumeDiskType getDiskType() { + return VolumeDiskTypeHelper.getDiskType(imageVO.getFormat()); + } + + @Override + public String getPath() { + //TODO: add installation path if it's downloaded to cache storage already + return this.imageVO.getUrl(); + } + + @Override + public String getUuid() { + // TODO Auto-generated method stub + return null; + } +} diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/DefaultImageDataStoreDriverImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/DefaultImageDataStoreDriverImpl.java deleted file mode 100644 index ae349ff7f74..00000000000 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/DefaultImageDataStoreDriverImpl.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image.driver; - -import java.util.Set; - -import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.image.ImageDataStoreDriver; - -public class DefaultImageDataStoreDriverImpl implements ImageDataStoreDriver { - - public DefaultImageDataStoreDriverImpl() { - } - - @Override - public String grantAccess(DataObject data, EndPoint ep) { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean revokeAccess(DataObject data, EndPoint ep) { - // TODO Auto-generated method stub - return false; - } - - @Override - public Set listObjects(DataStore store) { - // TODO Auto-generated method stub - return null; - } - - @Override - public void createAsync(DataObject data, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - - @Override - public void deleteAsync(DataObject data, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - - @Override - public boolean canCopy(DataObject srcData, DataObject destData) { - // TODO Auto-generated method stub - return false; - } - - @Override - public void copyAsync(DataObject srcdata, DataObject destData, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } -} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/ImageDataStoreDriver.java similarity index 66% rename from engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java rename to engine/storage/image/src/org/apache/cloudstack/storage/image/driver/ImageDataStoreDriver.java index 7679bb3e729..a968c9c47a7 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/ImageDataStoreDriver.java @@ -16,13 +16,18 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.volume; +package org.apache.cloudstack.storage.image.driver; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.datastore.PrimaryDataStore; +import org.apache.cloudstack.storage.EndPoint; import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.volume.VolumeServiceImpl.CreateBaseImageResult; +import org.apache.cloudstack.storage.image.TemplateObject; -public interface TemplateInstallStrategy { - public Void installAsync(TemplateInfo template, PrimaryDataStore store, AsyncCompletionCallback callback); +public interface ImageDataStoreDriver { + boolean registerTemplate(TemplateInfo template); + + String grantAccess(TemplateObject template, EndPoint endPointId); + + boolean revokeAccess(long templateId, long endPointId); + + boolean deleteTemplate(TemplateInfo template); } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/ImageDataStoreDriverImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/ImageDataStoreDriverImpl.java new file mode 100644 index 00000000000..cae57c02401 --- /dev/null +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/ImageDataStoreDriverImpl.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.image.driver; + +import org.apache.cloudstack.storage.EndPoint; +import org.apache.cloudstack.storage.image.TemplateInfo; +import org.apache.cloudstack.storage.image.TemplateObject; + +public class ImageDataStoreDriverImpl implements ImageDataStoreDriver { + + public ImageDataStoreDriverImpl() { + } + + @Override + public boolean registerTemplate(TemplateInfo template) { + // TODO: check the availability of template + return true; + } + + @Override + public String grantAccess(TemplateObject template, EndPoint endPointId) { + return template.getPath(); + } + + @Override + public boolean revokeAccess(long templateId, long endPointId) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean deleteTemplate(TemplateInfo template) { + // TODO Auto-generated method stub + return false; + } + +} diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManager.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManager.java deleted file mode 100644 index e5a6863a58b..00000000000 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManager.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image.manager; - -import org.apache.cloudstack.storage.image.TemplateEvent; -import org.apache.cloudstack.storage.image.TemplateState; -import org.apache.cloudstack.storage.image.db.ImageDataVO; - -import com.cloud.utils.fsm.StateMachine2; - -public interface ImageDataManager { - StateMachine2 getStateMachine(); - -} diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManagerImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManagerImpl.java deleted file mode 100644 index 09303aa1bb1..00000000000 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManagerImpl.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image.manager; - -import org.apache.cloudstack.storage.image.TemplateEvent; -import org.apache.cloudstack.storage.image.TemplateState; -import org.apache.cloudstack.storage.image.db.ImageDataVO; - -import com.cloud.utils.fsm.StateMachine2; - -public class ImageDataManagerImpl implements ImageDataManager { - private final static StateMachine2 - stateMachine = new StateMachine2(); - - public ImageDataManagerImpl() { - stateMachine.addTransition(TemplateState.Allocated, TemplateEvent.CreateRequested, TemplateState.Creating); - stateMachine.addTransition(TemplateState.Creating, TemplateEvent.CreateRequested, TemplateState.Creating); - stateMachine.addTransition(TemplateState.Creating, TemplateEvent.OperationSucceeded, TemplateState.Ready); - stateMachine.addTransition(TemplateState.Creating, TemplateEvent.OperationFailed, TemplateState.Allocated); - stateMachine.addTransition(TemplateState.Creating, TemplateEvent.DestroyRequested, TemplateState.Destroying); - stateMachine.addTransition(TemplateState.Ready, TemplateEvent.DestroyRequested, TemplateState.Destroying); - stateMachine.addTransition(TemplateState.Allocated, TemplateEvent.DestroyRequested, TemplateState.Destroying); - stateMachine.addTransition(TemplateState.Destroying, TemplateEvent.DestroyRequested, TemplateState.Destroying); - stateMachine.addTransition(TemplateState.Destroying, TemplateEvent.OperationFailed, TemplateState.Destroying); - stateMachine.addTransition(TemplateState.Destroying, TemplateEvent.OperationSucceeded, TemplateState.Destroyed); - } - - @Override - public StateMachine2 getStateMachine() { - return stateMachine; - } -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreManager.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManager.java similarity index 82% rename from engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreManager.java rename to engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManager.java index 2bd361f05e9..e1dce500361 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreManager.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManager.java @@ -16,11 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.image.datastore; +package org.apache.cloudstack.storage.image.manager; -import org.apache.cloudstack.storage.image.ImageDataStoreDriver; +import org.apache.cloudstack.storage.image.store.ImageDataStore; public interface ImageDataStoreManager { ImageDataStore getImageDataStore(long dataStoreId); - boolean registerDriver(String uuid, ImageDataStoreDriver driver); } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java index 2a24f9b5f04..81839581d0f 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java @@ -18,52 +18,24 @@ */ package org.apache.cloudstack.storage.image.manager; -import java.util.HashMap; -import java.util.Map; - import javax.inject.Inject; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProvider; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManager; -import org.apache.cloudstack.storage.image.ImageDataStoreDriver; -import org.apache.cloudstack.storage.image.datastore.ImageDataStore; -import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; -import org.apache.cloudstack.storage.image.db.ImageDataDao; import org.apache.cloudstack.storage.image.db.ImageDataStoreDao; +import org.apache.cloudstack.storage.image.db.ImageDataDao; import org.apache.cloudstack.storage.image.db.ImageDataStoreVO; -import org.apache.cloudstack.storage.image.store.ImageDataStoreImpl; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; -import org.springframework.stereotype.Component; +import org.apache.cloudstack.storage.image.store.ImageDataStore; -@Component public class ImageDataStoreManagerImpl implements ImageDataStoreManager { @Inject ImageDataStoreDao dataStoreDao; @Inject ImageDataDao imageDataDao; - @Inject - DataStoreProviderManager providerManager; - Map driverMaps = new HashMap(); @Override public ImageDataStore getImageDataStore(long dataStoreId) { ImageDataStoreVO dataStore = dataStoreDao.findById(dataStoreId); - long providerId = dataStore.getProvider(); - DataStoreProvider provider = providerManager.getDataStoreProviderById(providerId); - ImageDataStore imgStore = new ImageDataStoreImpl(dataStore, - driverMaps.get(provider.getUuid()) - ); // TODO Auto-generated method stub - return imgStore; - } - - @Override - public boolean registerDriver(String uuid, ImageDataStoreDriver driver) { - if (driverMaps.containsKey(uuid)) { - return false; - } - driverMaps.put(uuid, driver); - return true; + return null; } } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/provider/DefaultImageDataStoreProvider.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/provider/DefaultImageDataStoreProvider.java new file mode 100644 index 00000000000..363ed5f2ff2 --- /dev/null +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/provider/DefaultImageDataStoreProvider.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.image.provider; + +import javax.inject.Inject; + +import org.apache.cloudstack.storage.image.db.ImageDataStoreDao; +import org.apache.cloudstack.storage.image.db.ImageDataStoreProviderDao; +import org.apache.cloudstack.storage.image.db.ImageDataStoreProviderVO; +import org.apache.cloudstack.storage.image.db.ImageDataStoreVO; +import org.apache.cloudstack.storage.image.driver.ImageDataStoreDriver; +import org.apache.cloudstack.storage.image.driver.ImageDataStoreDriverImpl; +import org.apache.cloudstack.storage.image.store.ImageDataStore; +import org.apache.cloudstack.storage.image.store.ImageDataStoreImpl; +import org.apache.cloudstack.storage.image.store.lifecycle.DefaultImageDataStoreLifeCycle; +import org.apache.cloudstack.storage.image.store.lifecycle.ImageDataStoreLifeCycle; +import org.springframework.stereotype.Component; + +import com.cloud.utils.component.ComponentContext; + +@Component +public class DefaultImageDataStoreProvider implements ImageDataStoreProvider { + private final String providerName = "DefaultProvider"; + @Inject + ImageDataStoreProviderDao providerDao; + @Inject + ImageDataStoreDao imageStoreDao; + ImageDataStoreProviderVO provider; + + @Override + public ImageDataStore getImageDataStore(long imageStoreId) { + ImageDataStoreVO idsv = imageStoreDao.findById(imageStoreId); + ImageDataStoreDriver driver = new ImageDataStoreDriverImpl(); + ImageDataStore ids = new ImageDataStoreImpl(idsv, driver, false); + ids = ComponentContext.inject(ids); + return ids; + } + + @Override + public String getName() { + return providerName; + } + + @Override + public boolean register(long providerId) { + return true; + } + + @Override + public boolean init() { + provider = providerDao.findByName(providerName); + return true; + } + + @Override + public ImageDataStoreLifeCycle getLifeCycle() { + return new DefaultImageDataStoreLifeCycle(this, provider, imageStoreDao); + } +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProvider.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/provider/ImageDataStoreProvider.java similarity index 64% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProvider.java rename to engine/storage/image/src/org/apache/cloudstack/storage/image/provider/ImageDataStoreProvider.java index 0d38f34f1c7..710153af6dd 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProvider.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/provider/ImageDataStoreProvider.java @@ -16,17 +16,19 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.datastore.provider; +package org.apache.cloudstack.storage.image.provider; -import java.util.Map; +import org.apache.cloudstack.storage.image.store.ImageDataStore; +import org.apache.cloudstack.storage.image.store.lifecycle.ImageDataStoreLifeCycle; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; - -public interface DataStoreProvider { - public DataStoreLifeCycle getLifeCycle(); +public interface ImageDataStoreProvider { + ImageDataStore getImageDataStore(long imageStoreId); + boolean register(long providerId); public String getName(); - public String getUuid(); - public long getId(); - public boolean configure(Map params); + ImageDataStoreLifeCycle getLifeCycle(); + /** + * @param providerId + * @return + */ + boolean init(); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManager.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/provider/ImageDataStoreProviderManager.java similarity index 59% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManager.java rename to engine/storage/image/src/org/apache/cloudstack/storage/image/provider/ImageDataStoreProviderManager.java index cbe045c5bc8..f03a0e14cca 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManager.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/provider/ImageDataStoreProviderManager.java @@ -16,15 +16,24 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.datastore.provider; +package org.apache.cloudstack.storage.image.provider; import java.util.List; +import org.apache.cloudstack.storage.image.TemplateObject; +import org.apache.cloudstack.storage.image.store.ImageDataStore; + import com.cloud.utils.component.Manager; -public interface DataStoreProviderManager extends Manager { - public DataStoreProvider getDataStoreProviderByUuid(String uuid); - public DataStoreProvider getDataStoreProviderById(long id); - public DataStoreProvider getDataStoreProvider(String name); - public List getDataStoreProviders(); +public interface ImageDataStoreProviderManager extends Manager { + public ImageDataStoreProvider getProvider(long providerId); + public List listProvider(); + public ImageDataStore getDataStore(Long dataStoreId); + + public ImageDataStore getDataStoreFromTemplateId(long templateId); + /** + * @param name + * @return + */ + ImageDataStoreProvider getProvider(String name); } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/provider/ImageDataStoreProviderManagerImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/provider/ImageDataStoreProviderManagerImpl.java new file mode 100644 index 00000000000..62777f90bb6 --- /dev/null +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/provider/ImageDataStoreProviderManagerImpl.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.image.provider; + +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.storage.image.db.ImageDataDao; +import org.apache.cloudstack.storage.image.db.ImageDataStoreDao; +import org.apache.cloudstack.storage.image.db.ImageDataStoreProviderDao; +import org.apache.cloudstack.storage.image.db.ImageDataStoreProviderVO; +import org.apache.cloudstack.storage.image.db.ImageDataStoreVO; +import org.apache.cloudstack.storage.image.db.ImageDataVO; +import org.apache.cloudstack.storage.image.store.ImageDataStore; +import org.springframework.stereotype.Component; + +@Component +public class ImageDataStoreProviderManagerImpl implements ImageDataStoreProviderManager { + @Inject + ImageDataStoreProviderDao providerDao; + @Inject + ImageDataStoreDao dataStoreDao; + @Inject + ImageDataDao imageDataDao; + @Inject + List providers; + + @Override + public ImageDataStoreProvider getProvider(long providerId) { + + return null; + } + + @Override + public ImageDataStoreProvider getProvider(String name) { + for (ImageDataStoreProvider provider : providers) { + if (provider.getName().equalsIgnoreCase(name)) { + return provider; + } + } + return null; + } + + @Override + public ImageDataStore getDataStore(Long dataStoreId) { + if (dataStoreId == null) { + return null; + } + + ImageDataStoreVO idsv = dataStoreDao.findById(dataStoreId); + if (idsv == null) { + return null; + } + + long providerId = idsv.getProvider(); + ImageDataStoreProviderVO idspv = providerDao.findById(providerId); + ImageDataStoreProvider provider = getProvider(idspv.getName()); + return provider.getImageDataStore(dataStoreId); + } + + @Override + public ImageDataStore getDataStoreFromTemplateId(long templateId) { + ImageDataVO iddv = imageDataDao.findById(templateId); + return getDataStore(iddv.getImageDataStoreId()); + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + List existingProviders = providerDao.listAll(); + //TODO: hold global lock + boolean foundExistingProvider = false; + for (ImageDataStoreProvider provider : providers) { + foundExistingProvider = false; + for (ImageDataStoreProviderVO existingProvider : existingProviders) { + if (provider.getName().equalsIgnoreCase(existingProvider.getName())) { + foundExistingProvider = true; + break; + } + } + + if (!foundExistingProvider) { + //add a new provider into db + ImageDataStoreProviderVO nProvider = new ImageDataStoreProviderVO(); + nProvider.setName(provider.getName()); + nProvider = providerDao.persist(nProvider); + provider.register(nProvider.getId()); + } + provider.init(); + } + + return true; + } + + @Override + public boolean start() { + // TODO Auto-generated method stub + return true; + } + + @Override + public boolean stop() { + // TODO Auto-generated method stub + return true; + } + + @Override + public String getName() { + // TODO Auto-generated method stub + return null; + } + + @Override + public List listProvider() { + return providers; + } +} diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java deleted file mode 100644 index 3569fe803d5..00000000000 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image.store; - -import java.util.Map; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; -import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider; -import org.apache.cloudstack.storage.image.ImageDataStoreDriver; -import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; -import org.apache.cloudstack.storage.image.driver.DefaultImageDataStoreDriverImpl; -import org.apache.cloudstack.storage.image.store.lifecycle.DefaultImageDataStoreLifeCycle; -import org.apache.cloudstack.storage.image.store.lifecycle.ImageDataStoreLifeCycle; -import org.springframework.stereotype.Component; - -import com.cloud.utils.component.ComponentContext; - -@Component -public class DefaultImageDataStoreProvider implements ImageDataStoreProvider { - private final String name = "default image data store"; - protected ImageDataStoreLifeCycle lifeCycle; - protected ImageDataStoreDriver driver; - @Inject - ImageDataStoreManager storeMgr; - long id; - String uuid; - @Override - public DataStoreLifeCycle getLifeCycle() { - return lifeCycle; - } - - @Override - public String getName() { - return this.name; - } - - @Override - public String getUuid() { - return this.uuid; - } - - @Override - public long getId() { - return this.id; - } - - @Override - public boolean configure(Map params) { - lifeCycle = ComponentContext.inject(DefaultImageDataStoreLifeCycle.class); - driver = ComponentContext.inject(DefaultImageDataStoreDriverImpl.class); - uuid = (String)params.get("uuid"); - id = (Long)params.get("id"); - storeMgr.registerDriver(uuid, driver); - return true; - } - -} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CreateCmdResult.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageDataStore.java similarity index 65% rename from engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CreateCmdResult.java rename to engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageDataStore.java index 8934416b177..b1fabc704d7 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CreateCmdResult.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageDataStore.java @@ -16,16 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.engine.subsystem.api.storage; +package org.apache.cloudstack.storage.image.store; -public class CreateCmdResult extends CommandResult { - private String path; - public CreateCmdResult(String path) { - super(); - this.path = path; - } +import org.apache.cloudstack.storage.datastore.DataStore; +import org.apache.cloudstack.storage.image.TemplateObject; + +public interface ImageDataStore extends ImageDataStoreInfo { + TemplateObject registerTemplate(long templateId); + boolean deleteTemplate(long templateId); + + boolean needDownloadToCacheStorage(); - public String getPath() { - return this.path; - } + TemplateObject getTemplate(long templateId); } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageDataStoreImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageDataStoreImpl.java index 014d61feabd..355f7934469 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageDataStoreImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageDataStoreImpl.java @@ -18,26 +18,18 @@ */ package org.apache.cloudstack.storage.image.store; -import java.util.Set; - import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; -import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; -import org.apache.cloudstack.storage.image.ImageDataStoreDriver; +import org.apache.cloudstack.storage.EndPoint; import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.image.datastore.ImageDataStore; +import org.apache.cloudstack.storage.image.TemplateObject; import org.apache.cloudstack.storage.image.db.ImageDataDao; import org.apache.cloudstack.storage.image.db.ImageDataStoreVO; import org.apache.cloudstack.storage.image.db.ImageDataVO; +import org.apache.cloudstack.storage.image.driver.ImageDataStoreDriver; import org.apache.cloudstack.storage.snapshot.SnapshotInfo; - public class ImageDataStoreImpl implements ImageDataStore { @Inject ImageDataDao imageDao; @@ -45,88 +37,107 @@ public class ImageDataStoreImpl implements ImageDataStore { ImageDataStoreVO imageDataStoreVO; boolean needDownloadToCacheStorage = false; - public ImageDataStoreImpl(ImageDataStoreVO dataStoreVO, ImageDataStoreDriver imageDataStoreDriver) { - this.driver = imageDataStoreDriver; + public ImageDataStoreImpl(ImageDataStoreVO dataStoreVO, ImageDataStoreDriver driver, boolean needDownloadToCacheStorage) { + this.driver = driver; + this.needDownloadToCacheStorage = needDownloadToCacheStorage; this.imageDataStoreVO = dataStoreVO; } - + /* + * @Override public TemplateInfo registerTemplate(long templateId) { + * ImageDataVO idv = imageDao.findById(templateId); TemplateInfo template = + * new TemplateInfo(this, idv); if (driver.registerTemplate(template)) { + * template.setImageDataStoreId(imageDataStoreVO.getId()); return template; + * } else { return null; } } + */ @Override - public Set listTemplates() { + public boolean deleteTemplate(long templateId) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean needDownloadToCacheStorage() { + // TODO Auto-generated method stub + return false; + } + + @Override + public long getImageDataStoreId() { + return imageDataStoreVO.getId(); + } + + @Override + public TemplateObject registerTemplate(long templateId) { + ImageDataVO image = imageDao.findById(templateId); + image.setImageDataStoreId(this.getImageDataStoreId()); + imageDao.update(templateId, image); + return getTemplate(templateId); + } + + @Override + public TemplateObject getTemplate(long templateId) { + ImageDataVO image = imageDao.findById(templateId); + TemplateObject to = new TemplateObject(image, this); + return to; + } + + @Override + public String getType() { // TODO Auto-generated method stub return null; } - - - @Override - public DataStoreDriver getDriver() { - // TODO Auto-generated method stub - return null; - } - - - - @Override - public DataStoreRole getRole() { - // TODO Auto-generated method stub - return null; - } - - - - @Override - public long getId() { - // TODO Auto-generated method stub - return 0; - } - - - @Override public String getUri() { // TODO Auto-generated method stub return null; } - - @Override - public Scope getScope() { - // TODO Auto-generated method stub + public String grantAccess(VolumeInfo volume, EndPoint ep) { return null; } - - @Override - public TemplateInfo getTemplate(long templateId) { - // TODO Auto-generated method stub - return null; - } - - - - @Override - public VolumeInfo getVolume(long volumeId) { - // TODO Auto-generated method stub - return null; - } - - - - @Override - public SnapshotInfo getSnapshot(long snapshotId) { - // TODO Auto-generated method stub - return null; - } - - - - @Override - public boolean exists(DataObject object) { + public boolean revokeAccess(VolumeInfo volume, EndPoint ep) { // TODO Auto-generated method stub return false; } + + @Override + public String grantAccess(TemplateInfo template, EndPoint ep) { + return this.driver.grantAccess((TemplateObject)template, ep); + } + + @Override + public boolean revokeAccess(TemplateInfo template, EndPoint ep) { + // TODO Auto-generated method stub + return false; + } + + @Override + public String grantAccess(SnapshotInfo snapshot, EndPoint ep) { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean revokeAccess(SnapshotInfo snapshot, EndPoint ep) { + // TODO Auto-generated method stub + return false; + } + + @Override + public String getRole() { + return "imageStore"; + } + + @Override + public long getId() { + // TODO Auto-generated method stub + return 0; + } + } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java deleted file mode 100644 index 766ac450763..00000000000 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image.store; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; -import org.apache.cloudstack.storage.image.TemplateEvent; -import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.image.db.ImageDataDao; -import org.apache.cloudstack.storage.image.db.ImageDataVO; -import org.apache.cloudstack.storage.image.manager.ImageDataManager; -import org.apache.log4j.Logger; - -import com.cloud.utils.component.ComponentContext; -import com.cloud.utils.fsm.NoTransitionException; - -public class TemplateObject implements TemplateInfo { - private static final Logger s_logger = Logger.getLogger(TemplateObject.class); - private ImageDataVO imageVO; - private DataStore dataStore; - @Inject - ImageDataManager imageMgr; - @Inject - ImageDataDao imageDao; - - public TemplateObject(ImageDataVO template, DataStore dataStore) { - this.imageVO = template; - this.dataStore = dataStore; - } - - public static TemplateObject getTemplate(ImageDataVO vo, DataStore store) { - TemplateObject to = new TemplateObject(vo, store); - return ComponentContext.inject(to); - } - - public void setImageStoreId(long id) { - this.imageVO.setImageDataStoreId(id); - } - - public ImageDataVO getImage() { - return this.imageVO; - } - - @Override - public DataStore getDataStore() { - return this.dataStore; - } - - @Override - public long getId() { - return this.imageVO.getId(); - } - - @Override - public String getPath() { - //TODO: add installation path if it's downloaded to cache storage already - return this.imageVO.getUrl(); - } - - @Override - public String getUuid() { - // TODO Auto-generated method stub - return null; - } - - @Override - public String getUri() { - return this.dataStore.getUri() + "template/" + this.getPath(); - } - - @Override - public long getSize() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public DataObjectType getType() { - return DataObjectType.TEMPLATE; - } - - @Override - public DiskFormat getFormat() { - return DiskFormat.getFormat(this.imageVO.getFormat()); - } - - @Override - public boolean stateTransit(TemplateEvent e) throws NoTransitionException { - return imageMgr.getStateMachine().transitTo(this.imageVO, e, null, imageDao); - } -} diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java index c167ecb75b2..3ced8d3b8d2 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java @@ -1,75 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.image.store.lifecycle; import java.util.Map; import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.image.db.ImageDataStoreDao; +import org.apache.cloudstack.storage.image.db.ImageDataStoreProviderVO; +import org.apache.cloudstack.storage.image.db.ImageDataStoreVO; +import org.apache.cloudstack.storage.image.provider.ImageDataStoreProvider; +import org.apache.cloudstack.storage.image.store.ImageDataStore; public class DefaultImageDataStoreLifeCycle implements ImageDataStoreLifeCycle { - @Inject + protected ImageDataStoreProvider provider; + protected ImageDataStoreProviderVO providerVO; protected ImageDataStoreDao imageStoreDao; + @Override + public ImageDataStore registerDataStore(String name, + Map params) { + ImageDataStoreVO dataStore = imageStoreDao.findByName(name); + if (dataStore == null) { + dataStore = new ImageDataStoreVO(); + dataStore.setName(name); + dataStore.setProvider(providerVO.getId()); + dataStore = imageStoreDao.persist(dataStore); + } + return provider.getImageDataStore(dataStore.getId()); + } - public DefaultImageDataStoreLifeCycle() { + public DefaultImageDataStoreLifeCycle(ImageDataStoreProvider provider, + ImageDataStoreProviderVO providerVO, + ImageDataStoreDao dao) { + this.provider = provider; + this.providerVO = providerVO; + this.imageStoreDao = dao; } - - @Override - public boolean initialize(DataStore store, Map dsInfos) { - // TODO Auto-generated method stub - return false; - } - - - @Override - public boolean attachCluster(DataStore store, ClusterScope scope) { - // TODO Auto-generated method stub - return false; - } - - - @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope) { - // TODO Auto-generated method stub - return false; - } - - - @Override - public boolean dettach() { - // TODO Auto-generated method stub - return false; - } - - - @Override - public boolean unmanaged() { - // TODO Auto-generated method stub - return false; - } - - - @Override - public boolean maintain() { - // TODO Auto-generated method stub - return false; - } - - - @Override - public boolean cancelMaintain() { - // TODO Auto-generated method stub - return false; - } - - - @Override - public boolean deleteDataStore() { - // TODO Auto-generated method stub - return false; - } - } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/ImageDataStoreLifeCycle.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/ImageDataStoreLifeCycle.java index a36823959df..a96983c22ee 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/ImageDataStoreLifeCycle.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/ImageDataStoreLifeCycle.java @@ -18,7 +18,10 @@ */ package org.apache.cloudstack.storage.image.store.lifecycle; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import java.util.Map; -public interface ImageDataStoreLifeCycle extends DataStoreLifeCycle { +import org.apache.cloudstack.storage.image.store.ImageDataStore; + +public interface ImageDataStoreLifeCycle { + public ImageDataStore registerDataStore(String name, Map params); } diff --git a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java index 6572b85d9ab..fd6f1939c34 100644 --- a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java +++ b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java @@ -18,29 +18,32 @@ */ package org.apache.cloudstack.storage.image.motion; -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcConext; +import org.apache.cloudstack.storage.EndPoint; +import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CopyCmd; -import org.apache.cloudstack.storage.command.CopyCmdAnswer; -import org.apache.cloudstack.storage.endpoint.EndPointSelector; +import org.apache.cloudstack.storage.command.CopyTemplateToPrimaryStorageAnswer; +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; +import org.apache.cloudstack.storage.image.TemplateInfo; +import org.apache.cloudstack.storage.to.ImageOnPrimayDataStoreTO; import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; -//At least one of datastore is coming from image store or image cache store @Component public class DefaultImageMotionStrategy implements ImageMotionStrategy { - @Inject - EndPointSelector selector; + + @Override + public boolean canHandle(TemplateInfo templateStore) { + // TODO Auto-generated method stub + return true; + } + + + private class CreateTemplateContext extends AsyncRpcConext { private final TemplateOnPrimaryDataStoreInfo template; public CreateTemplateContext(AsyncCompletionCallback callback, TemplateOnPrimaryDataStoreInfo template) { @@ -53,7 +56,7 @@ public class DefaultImageMotionStrategy implements ImageMotionStrategy { } } -/* + @Override public void copyTemplateAsync(String destUri, String srcUri, EndPoint ep, AsyncCompletionCallback callback) { @@ -82,59 +85,12 @@ public class DefaultImageMotionStrategy implements ImageMotionStrategy { parentCall.complete(result); return null; - }*/ - - @Override - public boolean canHandle(DataObject srcData, DataObject destData) { - DataStore destStore = destData.getDataStore(); - DataStore srcStore = srcData.getDataStore(); - if (destStore.getRole() == DataStoreRole.Image || destStore.getRole() == DataStoreRole.ImageCache - || srcStore.getRole() == DataStoreRole.Image - || srcStore.getRole() == DataStoreRole.ImageCache) { - return true; - } - return false; } @Override - public Void copyAsync(DataObject srcData, DataObject destData, - AsyncCompletionCallback callback) { - DataStore destStore = destData.getDataStore(); - DataStore srcStore = srcData.getDataStore(); - EndPoint ep = selector.select(srcData, destData); - CopyCommandResult result = new CopyCommandResult(""); - if (ep == null) { - result.setResult("can't find end point"); - callback.complete(result); - return null; - } - - String srcUri = srcStore.getDriver().grantAccess(srcData, ep); - String destUri = destStore.getDriver().grantAccess(destData, ep); - CopyCmd cmd = new CopyCmd(srcUri, destUri); - - CreateTemplateContext context = new CreateTemplateContext(callback, null); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); - caller.setCallback(caller.getTarget().copyAsyncCallback(null, null)) - .setContext(context); - - ep.sendMessageAsync(cmd, caller); + public EndPoint getEndPoint(TemplateInfo destTemplate, + TemplateInfo srcTemplate) { return null; } - - protected Void copyAsyncCallback(AsyncCallbackDispatcher callback, CreateTemplateContext context) { - AsyncCompletionCallback parentCall = context.getParentCallback(); - CopyCmdAnswer answer = (CopyCmdAnswer)callback.getResult(); - if (!answer.getResult()) { - CopyCommandResult result = new CopyCommandResult(""); - result.setResult(answer.getDetails()); - parentCall.complete(result); - } else { - CopyCommandResult result = new CopyCommandResult(answer.getPath()); - parentCall.complete(result); - } - return null; - - } } diff --git a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/ImageMotionServiceImpl.java b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/ImageMotionServiceImpl.java index 0e3636e3886..0d007ed675d 100644 --- a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/ImageMotionServiceImpl.java +++ b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/ImageMotionServiceImpl.java @@ -22,9 +22,9 @@ import java.util.List; import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.EndPoint; +import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; import org.apache.cloudstack.storage.image.ImageService; import org.apache.cloudstack.storage.image.TemplateInfo; @@ -53,7 +53,7 @@ public class ImageMotionServiceImpl implements ImageMotionService { @Override public void copyTemplateAsync(TemplateInfo destTemplate, TemplateInfo srcTemplate, AsyncCompletionCallback callback) { - /* ImageMotionStrategy ims = null; + ImageMotionStrategy ims = null; for (ImageMotionStrategy strategy : motionStrategies) { if (strategy.canHandle(srcTemplate)) { ims = strategy; @@ -69,7 +69,7 @@ public class ImageMotionServiceImpl implements ImageMotionService { String srcUri = srcTemplate.getDataStore().grantAccess(srcTemplate, ep); String destUri = destTemplate.getDataStore().grantAccess(destTemplate, ep); - ims.copyTemplateAsync(destUri, srcUri, ep, callback);*/ + ims.copyTemplateAsync(destUri, srcUri, ep, callback); } diff --git a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/ImageMotionStrategy.java b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/ImageMotionStrategy.java index 7a476367d37..037005d7134 100644 --- a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/ImageMotionStrategy.java +++ b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/ImageMotionStrategy.java @@ -18,7 +18,15 @@ */ package org.apache.cloudstack.storage.image.motion; -import org.apache.cloudstack.storage.motion.DataMotionStrategy; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.EndPoint; +import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.datastore.DataStore; +import org.apache.cloudstack.storage.image.TemplateInfo; -public interface ImageMotionStrategy extends DataMotionStrategy { +public interface ImageMotionStrategy { + public boolean canHandle(TemplateInfo templateStore); + public EndPoint getEndPoint(TemplateInfo destTemplate, TemplateInfo srcTemplate); + public void copyTemplateAsync(String destUri, String sourceUri, EndPoint ep, AsyncCompletionCallback callback); } diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/AopTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/AopTest.java index 0c2a2adf061..bde5804e624 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/AopTest.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/AopTest.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; import static java.lang.annotation.ElementType.METHOD; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/AopTestAdvice.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/AopTestAdvice.java index ba356e3e6b5..63669c453d7 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/AopTestAdvice.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/AopTestAdvice.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; import org.aspectj.lang.ProceedingJoinPoint; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java index 69907bc790b..1b12b54e024 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; import org.apache.cloudstack.storage.HostEndpointRpcServer; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/CloudStackTestNGBase.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/CloudStackTestNGBase.java index 5bc7c0d2d27..dc7223c9e84 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/CloudStackTestNGBase.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/CloudStackTestNGBase.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; import java.lang.reflect.Method; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentTest.java index 20ac94611e7..371e6d0eb87 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentTest.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentTest.java @@ -18,25 +18,33 @@ */ package org.apache.cloudstack.storage.test; +import java.lang.reflect.Method; import java.util.UUID; import javax.inject.Inject; +import org.apache.cloudstack.storage.command.CopyTemplateToPrimaryStorageCmd; import org.apache.cloudstack.storage.to.ImageDataStoreTO; import org.apache.cloudstack.storage.to.ImageOnPrimayDataStoreTO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.TemplateTO; + import org.mockito.Mockito; import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.testng.AbstractTestNGSpringContextTests; +import org.testng.annotations.AfterClass; +import org.testng.annotations.AfterMethod; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.BeforeTest; +import org.testng.annotations.Parameters; import org.testng.annotations.Test; import com.cloud.agent.AgentManager; -import com.cloud.agent.api.Command; import com.cloud.agent.api.ReadyCommand; import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; +import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; @@ -49,6 +57,8 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Cluster.ClusterType; import com.cloud.org.Managed.ManagedState; import com.cloud.resource.ResourceState; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; @ContextConfiguration(locations="classpath:/storageContext.xml") public class DirectAgentTest extends CloudStackTestNGBase { @@ -139,8 +149,7 @@ public class DirectAgentTest extends CloudStackTestNGBase { Mockito.when(template.getImageDataStore()).thenReturn(imageStore); Mockito.when(image.getTemplate()).thenReturn(template); - //CopyTemplateToPrimaryStorageCmd cmd = new CopyTemplateToPrimaryStorageCmd(image); - Command cmd = null; + CopyTemplateToPrimaryStorageCmd cmd = new CopyTemplateToPrimaryStorageCmd(image); try { agentMgr.send(hostId, cmd); } catch (AgentUnavailableException e) { diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockHypervsiorHostEndPointRpcServer.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockHypervsiorHostEndPointRpcServer.java index d6985768d91..6c5ee1918f2 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockHypervsiorHostEndPointRpcServer.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockHypervsiorHostEndPointRpcServer.java @@ -18,6 +18,7 @@ */ package org.apache.cloudstack.storage.test; +import java.util.UUID; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -25,6 +26,10 @@ import java.util.concurrent.TimeUnit; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.HostEndpointRpcServer; import org.apache.cloudstack.storage.HypervisorHostEndPoint; +import org.apache.cloudstack.storage.command.CopyTemplateToPrimaryStorageCmd; +import org.apache.cloudstack.storage.command.CopyTemplateToPrimaryStorageAnswer; +import org.apache.cloudstack.storage.command.CreateVolumeAnswer; +import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; @@ -46,11 +51,11 @@ public class MockHypervsiorHostEndPointRpcServer implements HostEndpointRpcServe public void run() { try { Answer answer = new Answer(cmd, false, "unknown command"); - /*if (cmd instanceof CopyTemplateToPrimaryStorageCmd) { + if (cmd instanceof CopyTemplateToPrimaryStorageCmd) { answer = new CopyTemplateToPrimaryStorageAnswer(cmd, UUID.randomUUID().toString()); } else if (cmd instanceof CreateVolumeFromBaseImageCommand) { answer = new CreateVolumeAnswer(cmd, UUID.randomUUID().toString()); - }*/ + } callback.complete(answer); } catch (Exception e) { diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockRpcCallBack.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockRpcCallBack.java index 294cb1ed8fa..207cc52e989 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockRpcCallBack.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockRpcCallBack.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; import javax.inject.Inject; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/StorageFactoryBean.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/StorageFactoryBean.java index 68952b17f9e..2ac6dac4c16 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/StorageFactoryBean.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/StorageFactoryBean.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/StorageTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/StorageTest.java index 2a285cb8f41..0ee7fe0a431 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/StorageTest.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/StorageTest.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; import static org.junit.Assert.*; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestConfiguration.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestConfiguration.java index de7944ce8ac..d3280c0e38d 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestConfiguration.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestConfiguration.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; import org.springframework.context.annotation.Bean; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestNG.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestNG.java index 3da31e8eb4b..b3ecd3c22cb 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestNG.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestNG.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; import junit.framework.Assert; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestNGAop.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestNGAop.java index 6cc2d209c20..130ecd21980 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestNGAop.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestNGAop.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; import java.lang.reflect.Method; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/XenEndpoint.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/XenEndpoint.java index a96d7eca154..d0709d5be0f 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/XenEndpoint.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/XenEndpoint.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; public class XenEndpoint { diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java index a81c5467dc5..2aec9054380 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java @@ -18,39 +18,75 @@ */ package org.apache.cloudstack.storage.test; +import org.testng.annotations.Test; +import org.testng.annotations.BeforeMethod; +import org.testng.AssertJUnit; import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.cloud.entity.api.TemplateEntity; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.QCOW2; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VHD; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VMDK; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskTypeHelper; import org.apache.cloudstack.engine.subsystem.api.storage.type.RootDisk; +import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeTypeHelper; +import org.apache.cloudstack.storage.command.CreateVolumeAnswer; +import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand; +import org.apache.cloudstack.storage.datastore.DefaultPrimaryDataStore; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; +import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.image.ImageService; import org.apache.cloudstack.storage.image.db.ImageDataDao; import org.apache.cloudstack.storage.image.db.ImageDataVO; +import org.apache.cloudstack.storage.image.format.ISO; +import org.apache.cloudstack.storage.image.format.ImageFormat; +import org.apache.cloudstack.storage.image.format.ImageFormatHelper; +import org.apache.cloudstack.storage.image.format.OVA; +import org.apache.cloudstack.storage.image.format.Unknown; +import org.apache.cloudstack.storage.image.provider.ImageDataStoreProvider; +import org.apache.cloudstack.storage.image.provider.ImageDataStoreProviderManager; +import org.apache.cloudstack.storage.image.store.ImageDataStore; +import org.apache.cloudstack.storage.image.store.lifecycle.ImageDataStoreLifeCycle; import org.apache.cloudstack.storage.volume.VolumeService; import org.apache.cloudstack.storage.volume.db.VolumeDao2; import org.apache.cloudstack.storage.volume.db.VolumeVO; -import org.mockito.Mockito; import org.springframework.test.context.ContextConfiguration; -import org.testng.annotations.Test; +import org.mockito.Mockito; +import org.mockito.Mockito.*; + import com.cloud.agent.AgentManager; import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; +import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; import com.cloud.host.Host; import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.Status.Event; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Cluster.ClusterType; @@ -60,8 +96,8 @@ import com.cloud.storage.Storage.TemplateType; @ContextConfiguration(locations="classpath:/storageContext.xml") public class volumeServiceTest extends CloudStackTestNGBase { - //@Inject - //ImageDataStoreProviderManager imageProviderMgr; + @Inject + ImageDataStoreProviderManager imageProviderMgr; @Inject ImageService imageService; @Inject @@ -80,8 +116,8 @@ public class volumeServiceTest extends CloudStackTestNGBase { DataCenterDao dcDao; @Inject PrimaryDataStoreDao primaryStoreDao; - //@Inject - //PrimaryDataStoreProviderManager primaryDataStoreProviderMgr; + @Inject + PrimaryDataStoreProviderManager primaryDataStoreProviderMgr; @Inject AgentManager agentMgr; Long dcId; @@ -174,7 +210,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { image.setFeatured(true); image.setRequireHvm(true); image.setBits(64); - //image.setFormat(new VHD().toString()); + image.setFormat(new VHD().toString()); image.setAccountId(1); image.setEnablePassword(true); image.setEnableSshKey(true); @@ -189,16 +225,15 @@ public class volumeServiceTest extends CloudStackTestNGBase { private TemplateEntity createTemplate() { try { - /*imageProviderMgr.configure("image Provider", new HashMap()); + imageProviderMgr.configure("image Provider", new HashMap()); ImageDataVO image = createImageData(); ImageDataStoreProvider defaultProvider = imageProviderMgr.getProvider("DefaultProvider"); ImageDataStoreLifeCycle lifeCycle = defaultProvider.getLifeCycle(); ImageDataStore store = lifeCycle.registerDataStore("defaultHttpStore", new HashMap()); imageService.registerTemplate(image.getId(), store.getImageDataStoreId()); TemplateEntity te = imageService.getTemplateEntity(image.getId()); - return te;*/ - return null; - } catch (Exception e) { + return te; + } catch (ConfigurationException e) { return null; } } @@ -209,7 +244,6 @@ public class volumeServiceTest extends CloudStackTestNGBase { private PrimaryDataStoreInfo createPrimaryDataStore() { try { - /* PrimaryDataStoreProvider provider = primaryDataStoreProviderMgr.getDataStoreProvider("default primary data store provider"); primaryDataStoreProviderMgr.configure("primary data store mgr", new HashMap()); @@ -232,9 +266,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { ClusterScope scope = new ClusterScope(clusterId, podId, dcId); lc.attachCluster(scope); return primaryDataStoreInfo; - */ - return null; - } catch (Exception e) { + } catch (ConfigurationException e) { return null; } } @@ -252,7 +284,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { TemplateEntity te = createTemplate(); VolumeVO volume = createVolume(te.getId(), primaryStore.getId()); VolumeEntity ve = volumeService.getVolumeEntity(volume.getId()); - //ve.createVolumeFromTemplate(primaryStore.getId(), new VHD(), te); + ve.createVolumeFromTemplate(primaryStore.getId(), new VHD(), te); ve.destroy(); } @@ -261,7 +293,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { primaryStore = createPrimaryDataStore(); VolumeVO volume = createVolume(null, primaryStore.getId()); VolumeEntity ve = volumeService.getVolumeEntity(volume.getId()); - //ve.createVolume(primaryStore.getId(), new VHD()); + ve.createVolume(primaryStore.getId(), new VHD()); ve.destroy(); } @@ -279,7 +311,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { //@Test @Test public void test1() { - /*System.out.println(VolumeTypeHelper.getType("Root")); + System.out.println(VolumeTypeHelper.getType("Root")); System.out.println(VolumeDiskTypeHelper.getDiskType("vmdk")); System.out.println(ImageFormatHelper.getFormat("ova")); AssertJUnit.assertFalse(new VMDK().equals(new VHD())); @@ -297,7 +329,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { VolumeDiskType qcow2 = new QCOW2(); ImageFormat qcow2format = new org.apache.cloudstack.storage.image.format.QCOW2(); AssertJUnit.assertFalse(qcow2.equals(qcow2format)); -*/ + } } diff --git a/engine/storage/integration-test/test/resource/storageContext.xml b/engine/storage/integration-test/test/resource/storageContext.xml index f5e343e5850..c81fe7d8ee9 100644 --- a/engine/storage/integration-test/test/resource/storageContext.xml +++ b/engine/storage/integration-test/test/resource/storageContext.xml @@ -1,3 +1,21 @@ + diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java deleted file mode 100644 index 487e2d53eff..00000000000 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.snapshot; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.storage.datastore.DataStoreManager; -import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.snapshot.db.SnapshotDao2; -import org.apache.cloudstack.storage.snapshot.db.SnapshotVO; -import org.springframework.stereotype.Component; - -@Component -public class SnapshotDataFactoryImpl implements SnapshotDataFactory { - @Inject - SnapshotDao2 snapshotDao; - @Inject - ObjectInDataStoreManager objMap; - @Inject - DataStoreManager storeMgr; - @Override - public SnapshotInfo getSnapshot(long snapshotId, DataStore store) { - SnapshotVO snapshot = snapshotDao.findById(snapshotId); - ObjectInDataStoreVO obj = objMap.findObject(snapshotId, DataObjectType.SNAPSHOT, store.getId(), store.getRole()); - SnapshotObject so = new SnapshotObject(snapshot, store); - return so; - } -} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java deleted file mode 100644 index 49a9410bd7c..00000000000 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.snapshot; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; -import org.apache.cloudstack.storage.snapshot.db.SnapshotVO; - -public class SnapshotObject implements SnapshotInfo { - private SnapshotVO snapshot; - private DataStore store; - - public SnapshotObject(SnapshotVO snapshot, DataStore store) { - this.snapshot = snapshot; - this.store = store; - } - - public DataStore getStore() { - return this.store; - } - @Override - public String getName() { - return this.snapshot.getName(); - } - - @Override - public SnapshotInfo getParent() { - // TODO Auto-generated method stub - return null; - } - - @Override - public SnapshotInfo getChild() { - // TODO Auto-generated method stub - return null; - } - - @Override - public VolumeInfo getBaseVolume() { - // TODO Auto-generated method stub - return null; - } - - @Override - public long getId() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public String getUri() { - // TODO Auto-generated method stub - return null; - } - - @Override - public DataStore getDataStore() { - // TODO Auto-generated method stub - return null; - } - - @Override - public long getSize() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public DataObjectType getType() { - // TODO Auto-generated method stub - return null; - } - - @Override - public DiskFormat getFormat() { - // TODO Auto-generated method stub - return null; - } - -} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index 971e9a5503b..80b1918665d 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.snapshot; import org.apache.cloudstack.engine.cloud.entity.api.SnapshotEntity; diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/db/SnapshotDao2.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/db/SnapshotDao2.java deleted file mode 100644 index d531ede0aba..00000000000 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/db/SnapshotDao2.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.snapshot.db; - -import com.cloud.utils.db.GenericDao; - -public interface SnapshotDao2 extends GenericDao { - -} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/db/SnapshotDao2Impl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/db/SnapshotDao2Impl.java deleted file mode 100644 index 5e36e10bb74..00000000000 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/db/SnapshotDao2Impl.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.snapshot.db; - -import com.cloud.utils.db.GenericDaoBase; - -public class SnapshotDao2Impl extends GenericDaoBase implements SnapshotDao2 { - -} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/db/SnapshotVO.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/db/SnapshotVO.java deleted file mode 100644 index b0834be9bd4..00000000000 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/db/SnapshotVO.java +++ /dev/null @@ -1,296 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.storage.snapshot.db; - -import java.util.Date; -import java.util.UUID; - -import javax.persistence.Column; -import javax.persistence.Entity; -import javax.persistence.EnumType; -import javax.persistence.Enumerated; -import javax.persistence.GeneratedValue; -import javax.persistence.GenerationType; -import javax.persistence.Id; -import javax.persistence.Table; - -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.Snapshot.Status; -import com.cloud.storage.Snapshot.Type; -import com.cloud.utils.db.GenericDao; -import com.google.gson.annotations.Expose; - -@Entity -@Table(name="snapshots") -public class SnapshotVO { - @Id - @GeneratedValue(strategy=GenerationType.IDENTITY) - @Column(name="id") - private final long id = -1; - - @Column(name="data_center_id") - long dataCenterId; - - @Column(name="account_id") - long accountId; - - @Column(name="domain_id") - long domainId; - - @Column(name="volume_id") - Long volumeId; - - @Column(name="disk_offering_id") - Long diskOfferingId; - - @Expose - @Column(name="path") - String path; - - @Expose - @Column(name="name") - String name; - - @Expose - @Column(name="status", updatable = true, nullable=false) - @Enumerated(value=EnumType.STRING) - private Status status; - - @Column(name="snapshot_type") - short snapshotType; - - @Column(name="type_description") - String typeDescription; - - @Column(name="size") - long size; - - @Column(name=GenericDao.CREATED_COLUMN) - Date created; - - @Column(name=GenericDao.REMOVED_COLUMN) - Date removed; - - @Column(name="backup_snap_id") - String backupSnapshotId; - - @Column(name="swift_id") - Long swiftId; - - @Column(name="s3_id") - Long s3Id; - - @Column(name="sechost_id") - Long secHostId; - - @Column(name="prev_snap_id") - long prevSnapshotId; - - @Column(name="hypervisor_type") - @Enumerated(value=EnumType.STRING) - HypervisorType hypervisorType; - - @Expose - @Column(name="version") - String version; - - @Column(name="uuid") - String uuid; - - public SnapshotVO() { - this.uuid = UUID.randomUUID().toString(); - } - - public SnapshotVO(long dcId, long accountId, long domainId, Long volumeId, Long diskOfferingId, String path, String name, short snapshotType, String typeDescription, long size, HypervisorType hypervisorType ) { - this.dataCenterId = dcId; - this.accountId = accountId; - this.domainId = domainId; - this.volumeId = volumeId; - this.diskOfferingId = diskOfferingId; - this.path = path; - this.name = name; - this.snapshotType = snapshotType; - this.typeDescription = typeDescription; - this.size = size; - this.status = Status.Creating; - this.prevSnapshotId = 0; - this.hypervisorType = hypervisorType; - this.version = "2.2"; - this.uuid = UUID.randomUUID().toString(); - } - - public long getId() { - return id; - } - - public long getDataCenterId() { - return dataCenterId; - } - - - public long getAccountId() { - return accountId; - } - - - public long getDomainId() { - return domainId; - } - - public long getVolumeId() { - return volumeId; - } - - public long getDiskOfferingId() { - return diskOfferingId; - } - - public void setVolumeId(Long volumeId) { - this.volumeId = volumeId; - } - - public String getPath() { - return path; - } - - public void setPath(String path) { - this.path = path; - } - - public String getName() { - return name; - } - - public short getsnapshotType() { - return snapshotType; - } - - public Type getType() { - if (snapshotType < 0 || snapshotType >= Type.values().length) { - return null; - } - return Type.values()[snapshotType]; - } - - public Long getSwiftId() { - return swiftId; - } - - public void setSwiftId(Long swiftId) { - this.swiftId = swiftId; - } - - public Long getSecHostId() { - return secHostId; - } - - public void setSecHostId(Long secHostId) { - this.secHostId = secHostId; - } - - public HypervisorType getHypervisorType() { - return hypervisorType; - } - - public void setSnapshotType(short snapshotType) { - this.snapshotType = snapshotType; - } - - public boolean isRecursive(){ - if ( snapshotType >= Type.HOURLY.ordinal() && snapshotType <= Type.MONTHLY.ordinal() ) { - return true; - } - return false; - } - - public long getSize() { - return size; - } - - public String getTypeDescription() { - return typeDescription; - } - public void setTypeDescription(String typeDescription) { - this.typeDescription = typeDescription; - } - - public String getVersion() { - return version; - } - - public void setVersion(String version) { - this.version = version; - } - - public Date getCreated() { - return created; - } - - public Date getRemoved() { - return removed; - } - - public Status getStatus() { - return status; - } - - public void setStatus(Status status) { - this.status = status; - } - - public String getBackupSnapshotId(){ - return backupSnapshotId; - } - - public long getPrevSnapshotId(){ - return prevSnapshotId; - } - - public void setBackupSnapshotId(String backUpSnapshotId){ - this.backupSnapshotId = backUpSnapshotId; - } - - public void setPrevSnapshotId(long prevSnapshotId){ - this.prevSnapshotId = prevSnapshotId; - } - - public static Type getSnapshotType(String snapshotType) { - for ( Type type : Type.values()) { - if ( type.equals(snapshotType)) { - return type; - } - } - return null; - } - - public String getUuid() { - return this.uuid; - } - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public Long getS3Id() { - return s3Id; - } - - public void setS3Id(Long s3Id) { - this.s3Id = s3Id; - } - -} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java index 7df413f9ab6..7f18200cd3d 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.snapshot.strategy; import org.apache.cloudstack.storage.snapshot.SnapshotInfo; diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java index 42807d6f738..fa9c5aeaa08 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.snapshot.strategy; import org.apache.cloudstack.storage.snapshot.SnapshotInfo; diff --git a/engine/storage/src/org/apache/cloudstack/storage/EndPoint.java b/engine/storage/src/org/apache/cloudstack/storage/EndPoint.java new file mode 100644 index 00000000000..cdc66269a99 --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/EndPoint.java @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage; + +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; + +public interface EndPoint { + public Answer sendMessage(Command cmd); + public void sendMessageAsync(Command cmd, AsyncCompletionCallback callback); +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/HypervisorHostEndPoint.java b/engine/storage/src/org/apache/cloudstack/storage/HypervisorHostEndPoint.java index c4ebfb2ff05..a2e9ea9f34a 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/HypervisorHostEndPoint.java +++ b/engine/storage/src/org/apache/cloudstack/storage/HypervisorHostEndPoint.java @@ -20,7 +20,6 @@ package org.apache.cloudstack.storage; import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.log4j.Logger; diff --git a/engine/storage/src/org/apache/cloudstack/storage/backup/SnapshotOnBackupStoreInfo.java b/engine/storage/src/org/apache/cloudstack/storage/backup/SnapshotOnBackupStoreInfo.java index d01f2b43131..5f5ce2f4a92 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/backup/SnapshotOnBackupStoreInfo.java +++ b/engine/storage/src/org/apache/cloudstack/storage/backup/SnapshotOnBackupStoreInfo.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.backup; import org.apache.cloudstack.storage.backup.datastore.BackupStoreInfo; diff --git a/engine/storage/src/org/apache/cloudstack/storage/backup/datastore/BackupStoreInfo.java b/engine/storage/src/org/apache/cloudstack/storage/backup/datastore/BackupStoreInfo.java index 2c126cf555f..ca1454af570 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/backup/datastore/BackupStoreInfo.java +++ b/engine/storage/src/org/apache/cloudstack/storage/backup/datastore/BackupStoreInfo.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.backup.datastore; import org.apache.cloudstack.storage.backup.SnapshotOnBackupStoreInfo; diff --git a/engine/storage/src/org/apache/cloudstack/storage/command/AttachPrimaryDataStoreCmd.java b/engine/storage/src/org/apache/cloudstack/storage/command/AttachPrimaryDataStoreCmd.java index 8aaca94aee0..b0b329904b2 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/command/AttachPrimaryDataStoreCmd.java +++ b/engine/storage/src/org/apache/cloudstack/storage/command/AttachPrimaryDataStoreCmd.java @@ -18,15 +18,17 @@ */ package org.apache.cloudstack.storage.command; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; + import com.cloud.agent.api.Command; public class AttachPrimaryDataStoreCmd extends Command implements StorageSubSystemCommand { - private final String dataStore; - public AttachPrimaryDataStoreCmd(String uri) { - this.dataStore = uri; + private final PrimaryDataStoreTO dataStore; + public AttachPrimaryDataStoreCmd(PrimaryDataStoreTO dataStore) { + this.dataStore = dataStore; } - public String getDataStore() { + public PrimaryDataStoreTO getDataStore() { return this.dataStore; } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CommandResult.java b/engine/storage/src/org/apache/cloudstack/storage/command/CommandResult.java similarity index 91% rename from engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CommandResult.java rename to engine/storage/src/org/apache/cloudstack/storage/command/CommandResult.java index 6b6139b937d..d1528635945 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CommandResult.java +++ b/engine/storage/src/org/apache/cloudstack/storage/command/CommandResult.java @@ -16,24 +16,21 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.engine.subsystem.api.storage; +package org.apache.cloudstack.storage.command; public class CommandResult { private boolean success; private String result; + public CommandResult() { this.success = true; this.result = ""; } - + public boolean isSuccess() { return this.success; } - public boolean isFailed() { - return !this.success; - } - public void setSucess(boolean success) { this.success = success; } diff --git a/engine/storage/src/org/apache/cloudstack/storage/command/CopyCmd.java b/engine/storage/src/org/apache/cloudstack/storage/command/CopyCmd.java index 42eaa2fbce2..dcb81ba486c 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/command/CopyCmd.java +++ b/engine/storage/src/org/apache/cloudstack/storage/command/CopyCmd.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.command; import org.apache.cloudstack.storage.to.ImageOnPrimayDataStoreTO; diff --git a/engine/storage/src/org/apache/cloudstack/storage/command/CopyCmdAnswer.java b/engine/storage/src/org/apache/cloudstack/storage/command/CopyCmdAnswer.java deleted file mode 100644 index d9781bb8abe..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/command/CopyCmdAnswer.java +++ /dev/null @@ -1,17 +0,0 @@ -package org.apache.cloudstack.storage.command; - -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.Command; - -public class CopyCmdAnswer extends Answer { - private final String path; - - public CopyCmdAnswer(Command cmd, String path) { - super(cmd); - this.path = path; - } - - public String getPath() { - return this.path; - } -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/command/CopyTemplateToPrimaryStorageAnswer.java b/engine/storage/src/org/apache/cloudstack/storage/command/CopyTemplateToPrimaryStorageAnswer.java new file mode 100644 index 00000000000..9fd9317c38f --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/command/CopyTemplateToPrimaryStorageAnswer.java @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.command; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; + +public class CopyTemplateToPrimaryStorageAnswer extends Answer { + private final String path; + + public CopyTemplateToPrimaryStorageAnswer(Command cmd, String path) { + super(cmd); + this.path = path; + } + + public String getPath() { + return this.path; + } +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/command/CreatePrimaryDataStoreCmd.java b/engine/storage/src/org/apache/cloudstack/storage/command/CreatePrimaryDataStoreCmd.java index 5a64e334bee..c9808d904ae 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/command/CreatePrimaryDataStoreCmd.java +++ b/engine/storage/src/org/apache/cloudstack/storage/command/CreatePrimaryDataStoreCmd.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.command; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; diff --git a/engine/storage/src/org/apache/cloudstack/storage/command/CreateVolumeCommand.java b/engine/storage/src/org/apache/cloudstack/storage/command/CreateVolumeCommand.java index db643feac41..c44970e36ff 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/command/CreateVolumeCommand.java +++ b/engine/storage/src/org/apache/cloudstack/storage/command/CreateVolumeCommand.java @@ -18,14 +18,16 @@ */ package org.apache.cloudstack.storage.command; +import org.apache.cloudstack.storage.to.VolumeTO; + import com.cloud.agent.api.Command; public class CreateVolumeCommand extends Command implements StorageSubSystemCommand { - protected String volumeUri; + protected VolumeTO volumeTO; - public CreateVolumeCommand(String volumeUri) { + public CreateVolumeCommand(VolumeTO volumeTO) { super(); - this.volumeUri = volumeUri; + this.volumeTO = volumeTO; } protected CreateVolumeCommand() { @@ -38,8 +40,8 @@ public class CreateVolumeCommand extends Command implements StorageSubSystemComm return false; } - public String getVolume() { - return this.volumeUri; + public VolumeTO getVolume() { + return this.volumeTO; } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/command/DeleteCommand.java b/engine/storage/src/org/apache/cloudstack/storage/command/DeleteVolumeCommand.java similarity index 79% rename from engine/storage/src/org/apache/cloudstack/storage/command/DeleteCommand.java rename to engine/storage/src/org/apache/cloudstack/storage/command/DeleteVolumeCommand.java index 5d948d19356..a30a83b1448 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/command/DeleteCommand.java +++ b/engine/storage/src/org/apache/cloudstack/storage/command/DeleteVolumeCommand.java @@ -22,13 +22,13 @@ import org.apache.cloudstack.storage.to.VolumeTO; import com.cloud.agent.api.Command; -public class DeleteCommand extends Command implements StorageSubSystemCommand { - private String uri; - public DeleteCommand(String uri) { - this.uri = uri; +public class DeleteVolumeCommand extends Command implements StorageSubSystemCommand { + private VolumeTO volume; + public DeleteVolumeCommand(VolumeTO volume) { + this.volume = volume; } - protected DeleteCommand() { + protected DeleteVolumeCommand() { } @Override @@ -37,8 +37,8 @@ public class DeleteCommand extends Command implements StorageSubSystemCommand { return false; } - public String getUri() { - return this.uri; + public VolumeTO getVolume() { + return this.volume; } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStore.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStore.java new file mode 100644 index 00000000000..90e0cb64228 --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStore.java @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore; + +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.EndPoint; +import org.apache.cloudstack.storage.image.TemplateInfo; +import org.apache.cloudstack.storage.snapshot.SnapshotInfo; + +public interface DataStore { + String grantAccess(VolumeInfo volume, EndPoint ep); + boolean revokeAccess(VolumeInfo volume, EndPoint ep); + String grantAccess(TemplateInfo template, EndPoint ep); + boolean revokeAccess(TemplateInfo template, EndPoint ep); + String grantAccess(SnapshotInfo snapshot, EndPoint ep); + boolean revokeAccess(SnapshotInfo snapshot, EndPoint ep); + String getRole(); + long getId(); +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManager.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManager.java deleted file mode 100644 index 829be506ccc..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManager.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.datastore; - -import java.util.Map; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; - -public interface DataStoreManager { - public DataStore getDataStore(long storeId, DataStoreRole role); - public DataStore registerDataStore(Map params, String providerUuid); -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java deleted file mode 100644 index 6e7df92a5fc..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.datastore; - -import java.util.Map; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; -import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; -import org.springframework.stereotype.Component; - -import com.cloud.utils.exception.CloudRuntimeException; - -@Component -public class DataStoreManagerImpl implements DataStoreManager { - @Inject - PrimaryDataStoreProviderManager primaryStorMgr; - @Inject - ImageDataStoreManager imageDataStoreMgr; - @Override - public DataStore getDataStore(long storeId, DataStoreRole role) { - if (role == DataStoreRole.Primary) { - return primaryStorMgr.getPrimaryDataStore(storeId); - } else if (role == DataStoreRole.Image) { - return imageDataStoreMgr.getImageDataStore(storeId); - } - throw new CloudRuntimeException("un recognized type" + role); - } - @Override - public DataStore registerDataStore(Map params, - String providerUuid) { - // TODO Auto-generated method stub - return null; - } - -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DefaultDatastoreLifeCyle.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/DefaultDatastoreLifeCyle.java deleted file mode 100644 index 910c07121b9..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DefaultDatastoreLifeCyle.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.datastore; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; -import org.apache.cloudstack.storage.image.datastore.ImageDataStoreHelper; -import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; -import org.springframework.stereotype.Component; - -import edu.emory.mathcs.backport.java.util.Arrays; - -public class DefaultDatastoreLifeCyle implements DataStoreLifeCycle { - @Inject - PrimaryDataStoreHelper primaryStoreHelper; - @Inject - ImageDataStoreHelper imageStoreHelper; - @Override - public boolean initialize(DataStore store, Map dsInfos) { - String roles = dsInfos.get("roles"); - List roleArry = Arrays.asList(roles.split(";")); - List storeRoles = new ArrayList(); - for (String role : roleArry) { - storeRoles.add(DataStoreRole.getRole(role)); - } - - if (storeRoles.contains(DataStoreRole.Primary)) { - primaryStoreHelper.createPrimaryDataStore(dsInfos); - } - - if (storeRoles.contains(DataStoreRole.Image)) { - imageStoreHelper.createImageDataStore(dsInfos); - } - - //TODO: add more roles - - return true; - } - - @Override - public boolean attachCluster(DataStore dataStore, ClusterScope scope) { - if (dataStore.getRole() == DataStoreRole.Primary) { - primaryStoreHelper.attachCluster(dataStore); - } - // TODO Auto-generated method stub - return true; - } - - @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope) { - return false; - } - - @Override - public boolean dettach() { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean unmanaged() { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean maintain() { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean cancelMaintain() { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean deleteDataStore() { - // TODO Auto-generated method stub - return false; - } - -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManager.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManager.java index d7ffc175b66..2c2738f5a40 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManager.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManager.java @@ -1,23 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.datastore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.snapshot.SnapshotInfo; import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; - -import com.cloud.utils.fsm.NoTransitionException; public interface ObjectInDataStoreManager { public TemplateInfo create(TemplateInfo template, DataStore dataStore); - public VolumeInfo create(VolumeInfo volume, DataStore dataStore); - public SnapshotInfo create(SnapshotInfo snapshot, DataStore dataStore); - public ObjectInDataStoreVO findObject(long objectId, DataObjectType type, - long dataStoreId, DataStoreRole role); - public boolean update(DataObject vo, Event event) throws NoTransitionException; + public ObjectInDataStoreVO create(VolumeInfo volume, DataStore dataStore); + public ObjectInDataStoreVO create(SnapshotInfo snapshot, DataStore dataStore); + public TemplateInfo findTemplate(TemplateInfo template, DataStore dataStore); + public VolumeInfo findVolume(VolumeInfo volume, DataStore dataStore); + public SnapshotInfo findSnapshot(SnapshotInfo snapshot, DataStore dataStore); + public boolean update(TemplateInfo vo, ObjectInDataStoreStateMachine.Event event); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java index 9e2718379ea..0607e1ac8a5 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java @@ -1,107 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.datastore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import javax.inject.Inject; + import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.db.ObjectInDataStoreDao; import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.ImageDataFactory; import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.State; import org.springframework.stereotype.Component; -import com.cloud.utils.component.Inject; -import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.utils.fsm.StateMachine2; + @Component public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager { @Inject ObjectInDataStoreDao objectDataStoreDao; - @Inject - ImageDataFactory imageFactory; - @Inject - VolumeDataFactory volumeFactory; - protected StateMachine2 stateMachines; - public ObjectInDataStoreManagerImpl() { - stateMachines = new StateMachine2(); - stateMachines.addTransition(State.Allocated, Event.CreateRequested, State.Creating); - stateMachines.addTransition(State.Creating, Event.OperationSuccessed, State.Created); - stateMachines.addTransition(State.Creating, Event.OperationFailed, State.Failed); - stateMachines.addTransition(State.Failed, Event.CreateRequested, State.Creating); - stateMachines.addTransition(State.Ready, Event.DestroyRequested, State.Destroying); - stateMachines.addTransition(State.Destroying, Event.OperationSuccessed, State.Destroyed); - stateMachines.addTransition(State.Destroying, Event.OperationFailed, State.Destroying); - stateMachines.addTransition(State.Destroying, Event.DestroyRequested, State.Destroying); - stateMachines.addTransition(State.Created, Event.CopyingRequested, State.Copying); - stateMachines.addTransition(State.Copying, Event.OperationFailed, State.Created); - stateMachines.addTransition(State.Copying, Event.OperationSuccessed, State.Ready); - stateMachines.addTransition(State.Allocated, Event.CreateOnlyRequested, State.Creating2); - stateMachines.addTransition(State.Creating2, Event.OperationFailed, State.Failed); - stateMachines.addTransition(State.Creating2, Event.OperationSuccessed, State.Ready); - } - @Override public TemplateInfo create(TemplateInfo template, DataStore dataStore) { ObjectInDataStoreVO vo = new ObjectInDataStoreVO(); vo.setDataStoreId(dataStore.getId()); - vo.setDataStoreRole(dataStore.getRole()); + vo.setDataStoreType(dataStore.getRole()); vo.setObjectId(template.getId()); - vo.setObjectType(template.getType()); + vo.setObjectType("template"); vo = objectDataStoreDao.persist(vo); - - return imageFactory.getTemplate(template.getId(), dataStore); + TemplateInDataStore tmpl = new TemplateInDataStore(template, dataStore, vo); + return tmpl; } @Override - public VolumeInfo create(VolumeInfo volume, DataStore dataStore) { - ObjectInDataStoreVO vo = new ObjectInDataStoreVO(); - vo.setDataStoreId(dataStore.getId()); - vo.setDataStoreRole(dataStore.getRole()); - vo.setObjectId(volume.getId()); - vo.setObjectType(volume.getType()); - vo = objectDataStoreDao.persist(vo); - - return volumeFactory.getVolume(volume.getId(), dataStore); - } - - @Override - public SnapshotInfo create(SnapshotInfo snapshot, DataStore dataStore) { + public ObjectInDataStoreVO create(VolumeInfo volume, DataStore dataStore) { // TODO Auto-generated method stub return null; } - + @Override - public ObjectInDataStoreVO findObject(long objectId, DataObjectType type, long dataStoreId, DataStoreRole role) { - SearchCriteriaService sc = SearchCriteria2.create(ObjectInDataStoreVO.class); - sc.addAnd(sc.getEntity().getObjectId(), Op.EQ, objectId); - sc.addAnd(sc.getEntity().getDataStoreId(), Op.EQ, dataStoreId); - sc.addAnd(sc.getEntity().getObjectType(), Op.EQ, type); - sc.addAnd(sc.getEntity().getDataStoreRole(), Op.EQ, role); - sc.addAnd(sc.getEntity().getState(), Op.NIN, ObjectInDataStoreStateMachine.State.Destroyed, - ObjectInDataStoreStateMachine.State.Failed); - ObjectInDataStoreVO objectStoreVO = sc.find(); - return objectStoreVO; + public ObjectInDataStoreVO create(SnapshotInfo snapshot, DataStore dataStore) { + // TODO Auto-generated method stub + return null; } @Override - public boolean update(DataObject data, Event event) throws NoTransitionException { - ObjectInDataStoreVO obj = this.findObject(data.getId(), data.getType(), - data.getDataStore().getId(), data.getDataStore().getRole()); - if (obj == null) { - throw new CloudRuntimeException("can't find mapping in ObjectInDataStore table for: " + data); - } + public TemplateInfo findTemplate(TemplateInfo template, DataStore dataStore) { + // TODO Auto-generated method stub + return null; + } - return this.stateMachines.transitTo(obj, event, null, objectDataStoreDao); + @Override + public VolumeInfo findVolume(VolumeInfo volume, DataStore dataStore) { + // TODO Auto-generated method stub + return null; + } + + @Override + public SnapshotInfo findSnapshot(SnapshotInfo snapshot, DataStore dataStore) { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean update(TemplateInfo vo, Event event) { + // TODO Auto-generated method stub + return false; } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStore.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStore.java index a6ba9bc1f60..cf4f879a37c 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStore.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStore.java @@ -20,17 +20,15 @@ package org.apache.cloudstack.storage.datastore; import java.util.List; -import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.EndPoint; +import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeTO; import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; public interface PrimaryDataStore extends DataStore, PrimaryDataStoreInfo { @@ -38,28 +36,27 @@ public interface PrimaryDataStore extends DataStore, PrimaryDataStoreInfo { List getVolumes(); -/* void deleteVolumeAsync(VolumeInfo volume, AsyncCompletionCallback callback); + void deleteVolumeAsync(VolumeInfo volume, AsyncCompletionCallback callback); void createVolumeAsync(VolumeInfo vo, VolumeDiskType diskType, AsyncCompletionCallback callback); - void createVoluemFromBaseImageAsync(VolumeInfo volume, TemplateInfo templateStore, AsyncCompletionCallback callback); - */ - - boolean exists(DataObject data); - - TemplateInfo getTemplate(long templateId); + VolumeInfo createVoluemFromBaseImage(VolumeInfo volume, TemplateOnPrimaryDataStoreInfo templateStore); - SnapshotInfo getSnapshot(long snapshotId); + void createVoluemFromBaseImageAsync(VolumeInfo volume, TemplateInfo templateStore, AsyncCompletionCallback callback); + + List getEndPoints(); + boolean exists(VolumeInfo vi); - DiskFormat getDefaultDiskType(); + boolean templateExists(TemplateInfo template); -/* void takeSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback); + TemplateOnPrimaryDataStoreInfo getTemplate(TemplateInfo template); - void revertSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback); + boolean installTemplate(TemplateOnPrimaryDataStoreInfo template); - void deleteSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback);*/ + VolumeDiskType getDefaultDiskType(); + + PrimaryDataStoreTO getDataStoreTO(); + + VolumeTO getVolumeTO(VolumeInfo volume); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/TemplateInDataStore.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/TemplateInDataStore.java new file mode 100644 index 00000000000..03c0a07630a --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/TemplateInDataStore.java @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore; + +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; +import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; +import org.apache.cloudstack.storage.image.TemplateInfo; +import org.apache.cloudstack.storage.image.store.ImageDataStoreInfo; + +public class TemplateInDataStore implements TemplateInfo { + public TemplateInDataStore(TemplateInfo template, DataStore dataStore, ObjectInDataStoreVO obj) { + + } + @Override + public ImageDataStoreInfo getDataStore() { + // TODO Auto-generated method stub + return null; + } + + @Override + public long getId() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public VolumeDiskType getDiskType() { + // TODO Auto-generated method stub + return null; + } + + @Override + public String getPath() { + // TODO Auto-generated method stub + return null; + } + + @Override + public String getUuid() { + // TODO Auto-generated method stub + return null; + } + +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/protocol/DataStoreProtocol.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/protocol/DataStoreProtocol.java index 54518ae3a5b..b0a7d50c57d 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/protocol/DataStoreProtocol.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/protocol/DataStoreProtocol.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.datastore.protocol; public enum DataStoreProtocol { diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java deleted file mode 100644 index 1276825acbc..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.datastore.provider; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao; -import org.apache.cloudstack.storage.datastore.db.DataStoreProviderVO; -import org.springframework.stereotype.Component; - -@Component -public class DataStoreProviderManagerImpl implements DataStoreProviderManager { - @Inject - List providers; - @Inject - DataStoreProviderDao providerDao; - protected Map providerMap = new HashMap(); - @Override - public DataStoreProvider getDataStoreProviderByUuid(String uuid) { - return providerMap.get(uuid); - } - - @Override - public DataStoreProvider getDataStoreProvider(String name) { - // TODO Auto-generated method stub - return null; - } - - @Override - public List getDataStoreProviders() { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean configure(String name, Map params) - throws ConfigurationException { - //TODO: hold global lock - List providerVos = providerDao.listAll(); - for (DataStoreProvider provider : providers) { - boolean existingProvider = false; - DataStoreProviderVO providerVO = null; - for (DataStoreProviderVO prov : providerVos) { - if (prov.getName().equalsIgnoreCase(provider.getName())) { - existingProvider = true; - providerVO = prov; - break; - } - } - String uuid = provider.getUuid(); - if (!existingProvider) { - uuid = UUID.nameUUIDFromBytes(provider.getName().getBytes()).toString(); - providerVO = new DataStoreProviderVO(); - providerVO.setName(provider.getName()); - providerVO.setUuid(uuid); - providerVO = providerDao.persist(providerVO); - } - params.put("uuid", uuid); - params.put("id", providerVO.getId()); - provider.configure(params); - providerMap.put(uuid, provider); - } - return true; - } - - @Override - public boolean start() { - return true; - } - - @Override - public boolean stop() { - return true; - } - - @Override - public String getName() { - return "Data store provider manager"; - } - - @Override - public DataStoreProvider getDataStoreProviderById(long id) { - DataStoreProviderVO provider = providerDao.findById(id); - return providerMap.get(provider.getUuid()); - } -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java deleted file mode 100644 index 502158cdaaa..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.datastore.provider; - -public interface ImageDataStoreProvider extends DataStoreProvider { - -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java deleted file mode 100644 index ffaf897358a..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java +++ /dev/null @@ -1,5 +0,0 @@ -package org.apache.cloudstack.storage.datastore.provider; - - -public interface PrimaryDataStoreProvider extends DataStoreProvider { -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDao.java b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDao.java index 13d8132da86..08f9182f237 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDao.java +++ b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDao.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.db; import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; diff --git a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java index dd0b2c67f1f..ac75a9abed6 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java @@ -1,10 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.db; - - +import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.State; -import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; diff --git a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreVO.java b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreVO.java index cdc8f874342..c6bacbd9078 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreVO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreVO.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.db; import java.util.Date; @@ -13,17 +29,12 @@ import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; - import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.fsm.StateObject; @Entity @Table(name = "object_datastore_ref") -public class ObjectInDataStoreVO implements StateObject { +public class ObjectInDataStoreVO { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) long id; @@ -31,16 +42,14 @@ public class ObjectInDataStoreVO implements StateObject createTemplateAsync(TemplateInfo template, DataStore store); - AsyncCallFuture deleteTemplateAsync(TemplateInfo template); + TemplateEntity registerTemplate(long templateId, long imageStoreId); + + boolean deleteTemplate(long templateId); + + long registerIso(String isoUrl, long accountId); + + boolean deleteIso(long isoId); + + boolean grantTemplateAccess(TemplateInfo template, EndPoint endpointId); + + boolean revokeTemplateAccess(long templateId, long endpointId); + + String grantIsoAccess(long isoId, long endpointId); + + boolean revokeIsoAccess(long isoId, long endpointId); + + TemplateEntity getTemplateEntity(long templateId); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEntityImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEntityImpl.java index 4dc68f07396..a9998ae7869 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEntityImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEntityImpl.java @@ -24,7 +24,7 @@ import java.util.List; import java.util.Map; import org.apache.cloudstack.engine.cloud.entity.api.TemplateEntity; -import org.apache.cloudstack.storage.image.datastore.ImageDataStoreInfo; +import org.apache.cloudstack.storage.image.store.ImageDataStoreInfo; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage.ImageFormat; diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEvent.java b/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEvent.java deleted file mode 100644 index 44d0005ac80..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEvent.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image; - -public enum TemplateEvent { - CreateRequested, - OperationFailed, - OperationSucceeded, - DestroyRequested; -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/TemplateInfo.java b/engine/storage/src/org/apache/cloudstack/storage/image/TemplateInfo.java index 45ec2682c6d..45fb51578a5 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/TemplateInfo.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/TemplateInfo.java @@ -18,19 +18,17 @@ */ package org.apache.cloudstack.storage.image; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; +import org.apache.cloudstack.storage.image.store.ImageDataStoreInfo; -import com.cloud.utils.fsm.NoTransitionException; - -public interface TemplateInfo extends DataObject { - DataStore getDataStore(); +public interface TemplateInfo { + ImageDataStoreInfo getDataStore(); long getId(); + + VolumeDiskType getDiskType(); String getPath(); String getUuid(); - - boolean stateTransit(TemplateEvent e) throws NoTransitionException; } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/TemplateState.java b/engine/storage/src/org/apache/cloudstack/storage/image/TemplateState.java deleted file mode 100644 index c5981e38ac0..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/image/TemplateState.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image; - -public enum TemplateState { - Allocated, - Creating, - Destroying, - Destroyed, - Ready; -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreHelper.java deleted file mode 100644 index e88819253e7..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreHelper.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image.datastore; - -import java.util.Map; - -import javax.inject.Inject; - -import org.apache.cloudstack.storage.image.db.ImageDataStoreDao; -import org.apache.cloudstack.storage.image.db.ImageDataStoreVO; -import org.springframework.stereotype.Component; - -import com.cloud.utils.exception.CloudRuntimeException; - -@Component -public class ImageDataStoreHelper { - @Inject - ImageDataStoreDao imageStoreDao; - public ImageDataStoreVO createImageDataStore(Map params) { - ImageDataStoreVO store = new ImageDataStoreVO(); - store.setName(params.get("name")); - store.setProtocol(params.get("protocol")); - store.setProvider(Long.parseLong(params.get("provider"))); - store = imageStoreDao.persist(store); - return store; - } - - public boolean deleteImageDataStore(long id) { - ImageDataStoreVO store = imageStoreDao.findById(id); - if (store == null) { - throw new CloudRuntimeException("can't find image store:" + id); - } - - imageStoreDao.remove(id); - return true; - } -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDao.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDao.java index b5db164055d..5f79e966f35 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDao.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDao.java @@ -22,9 +22,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import org.apache.cloudstack.storage.image.TemplateEvent; -import org.apache.cloudstack.storage.image.TemplateState; - import com.cloud.domain.DomainVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.projects.Project.ListProjectResourcesCriteria; @@ -32,9 +29,8 @@ import com.cloud.template.VirtualMachineTemplate.TemplateFilter; import com.cloud.user.Account; import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; -import com.cloud.utils.fsm.StateDao; -public interface ImageDataDao extends GenericDao, StateDao { +public interface ImageDataDao extends GenericDao { public List listByPublic(); public ImageDataVO findByName(String templateName); diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDaoImpl.java index 3dbc844e67c..4c37c9d58e6 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDaoImpl.java @@ -29,10 +29,9 @@ import java.util.List; import java.util.Map; import java.util.Set; +import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.storage.image.TemplateEvent; -import org.apache.cloudstack.storage.image.TemplateState; import org.apache.cloudstack.storage.image.format.ISO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -75,18 +74,18 @@ public class ImageDataDaoImpl extends GenericDaoBase implemen private static final Logger s_logger = Logger.getLogger(VMTemplateDaoImpl.class); - VMTemplateZoneDao _templateZoneDao = null; + @Inject VMTemplateZoneDao _templateZoneDao = null; - VMTemplateDetailsDao _templateDetailsDao = null; + @Inject VMTemplateDetailsDao _templateDetailsDao = null; - ConfigurationDao _configDao = null; + @Inject ConfigurationDao _configDao = null; - HostDao _hostDao = null; + @Inject HostDao _hostDao = null; - DomainDao _domainDao = null; + @Inject DomainDao _domainDao = null; - DataCenterDao _dcDao = null; + @Inject DataCenterDao _dcDao = null; private final String SELECT_TEMPLATE_HOST_REF = "SELECT t.id, h.data_center_id, t.unique_name, t.name, t.public, t.featured, t.type, t.hvm, t.bits, t.url, t.format, t.created, t.account_id, " + "t.checksum, t.display_text, t.enable_password, t.guest_os_id, t.bootable, t.prepopulate, t.cross_zones, t.hypervisor_type FROM vm_template t"; @@ -925,12 +924,4 @@ public class ImageDataDaoImpl extends GenericDaoBase implemen private boolean isAdmin(short accountType) { return ((accountType == Account.ACCOUNT_TYPE_ADMIN) || (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) || (accountType == Account.ACCOUNT_TYPE_DOMAIN_ADMIN) || (accountType == Account.ACCOUNT_TYPE_READ_ONLY_ADMIN)); } - - @Override - public boolean updateState(TemplateState currentState, TemplateEvent event, - TemplateState nextState, ImageDataVO vo, Object data) { - // TODO Auto-generated method stub - return false; - } - } \ No newline at end of file diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataStoreVO.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataStoreVO.java index 0eb7536c6af..5b660ec80fb 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataStoreVO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataStoreVO.java @@ -34,16 +34,10 @@ public class ImageDataStoreVO { @Column(name = "name", nullable = false) private String name; - - @Column(name = "protocol", nullable = false) - private String protocol; @Column(name = "image_provider_id", nullable = false) private long provider; - - @Column(name = "data_center_id") - private long dcId; - + public long getId() { return this.id; } @@ -63,20 +57,4 @@ public class ImageDataStoreVO { public void setProvider(long provider) { this.provider = provider; } - - public void setProtocol(String protocol) { - this.protocol = protocol; - } - - public String getProtocol() { - return this.protocol; - } - - public void setDcId(long dcId) { - this.dcId = dcId; - } - - public long getDcId() { - return this.dcId; - } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataVO.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataVO.java index f7274c3931c..e66b7bbb31d 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataVO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataVO.java @@ -34,18 +34,16 @@ import javax.persistence.TemporalType; import javax.persistence.Transient; import org.apache.cloudstack.api.Identity; -import org.apache.cloudstack.storage.image.TemplateState; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage; import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.VMTemplateVO; import com.cloud.utils.db.GenericDao; -import com.cloud.utils.fsm.StateObject; @Entity @Table(name = "vm_template") -public class ImageDataVO implements Identity, StateObject { +public class ImageDataVO implements Identity { @Id @TableGenerator(name = "vm_template_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "vm_template_seq", allocationSize = 1) @Column(name = "id", nullable = false) @@ -134,12 +132,6 @@ public class ImageDataVO implements Identity, StateObject { @Column(name = "image_data_store_id") private long imageDataStoreId; - - @Column(name = "size") - private long size; - - @Column(name = "state") - private TemplateState state; @Transient Map details; @@ -154,7 +146,6 @@ public class ImageDataVO implements Identity, StateObject { public ImageDataVO() { this.uuid = UUID.randomUUID().toString(); - this.state = TemplateState.Allocated; } public boolean getEnablePassword() { @@ -406,17 +397,5 @@ public class ImageDataVO implements Identity, StateObject { public void setImageDataStoreId(long dataStoreId) { this.imageDataStoreId = dataStoreId; } - - public void setSize(long size) { - this.size = size; - } - - public long getSize() { - return this.size; - } - - public TemplateState getState() { - return this.state; - } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/motion/ImageMotionService.java b/engine/storage/src/org/apache/cloudstack/storage/image/motion/ImageMotionService.java index 422bc066211..89de552ba92 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/motion/ImageMotionService.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/motion/ImageMotionService.java @@ -18,8 +18,8 @@ */ package org.apache.cloudstack.storage.image.motion; -import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreInfo.java b/engine/storage/src/org/apache/cloudstack/storage/image/store/ImageDataStoreInfo.java similarity index 87% rename from engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreInfo.java rename to engine/storage/src/org/apache/cloudstack/storage/image/store/ImageDataStoreInfo.java index b6b9a2a55d7..4c55c081343 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreInfo.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/store/ImageDataStoreInfo.java @@ -16,11 +16,12 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.image.datastore; +package org.apache.cloudstack.storage.image.store; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.storage.datastore.DataStore; public interface ImageDataStoreInfo extends DataStore { public long getImageDataStoreId(); public String getType(); + public String getUri(); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionDriver.java b/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionDriver.java deleted file mode 100644 index 3a59b21238b..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionDriver.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.motion; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; - -public interface DataMotionDriver { - public void copy(DataObject srcObj, DataObject destObj); -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionService.java b/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionService.java deleted file mode 100644 index db36f6492e8..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionService.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.motion; - -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; - -public interface DataMotionService { - public void copyAsync(DataObject srcData, DataObject destData, - AsyncCompletionCallback callback); -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java b/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java deleted file mode 100644 index 343140fb98e..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.motion; - -import java.util.List; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.springframework.stereotype.Component; - -import com.cloud.utils.exception.CloudRuntimeException; - -@Component -public class DataMotionServiceImpl implements DataMotionService { - @Inject - List strategies; - - @Override - public void copyAsync(DataObject srcData, DataObject destData, - AsyncCompletionCallback callback) { - - if (srcData.getDataStore().getDriver().canCopy(srcData, destData)) { - srcData.getDataStore().getDriver() - .copyAsync(srcData, destData, callback); - return; - } else if (destData.getDataStore().getDriver() - .canCopy(srcData, destData)) { - destData.getDataStore().getDriver() - .copyAsync(srcData, destData, callback); - return; - } - - for (DataMotionStrategy strategy : strategies) { - if (strategy.canHandle(srcData, destData)) { - strategy.copyAsync(srcData, destData, callback); - return; - } - } - throw new CloudRuntimeException("can't find strategy to move data"); - } - -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionStrategy.java deleted file mode 100644 index ba40c6dcbce..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionStrategy.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.motion; - -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; - -public interface DataMotionStrategy { - public boolean canHandle(DataObject srcData, DataObject destData); - - public Void copyAsync(DataObject srcData, DataObject destData, - AsyncCompletionCallback callback); -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactory.java b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactory.java deleted file mode 100644 index 22d328f4932..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactory.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.snapshot; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; - -public interface SnapshotDataFactory { - public SnapshotInfo getSnapshot(long snapshotId, DataStore store); -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotEntityImpl.java b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotEntityImpl.java index d57d078cb52..1363251ed95 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotEntityImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotEntityImpl.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.snapshot; import java.lang.reflect.Method; diff --git a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotInfo.java b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotInfo.java index 983ec4dcd42..1c572cf3b25 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotInfo.java +++ b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotInfo.java @@ -1,9 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.snapshot; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -public interface SnapshotInfo extends DataObject { +public interface SnapshotInfo { public String getName(); public SnapshotInfo getParent(); public SnapshotInfo getChild(); diff --git a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotService.java b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotService.java index bc56e6287a0..d50c9a0c8f3 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotService.java +++ b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotService.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.snapshot; import org.apache.cloudstack.engine.cloud.entity.api.SnapshotEntity; diff --git a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategy.java index 980b2ddb970..4e311862e50 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategy.java +++ b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategy.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.snapshot; public interface SnapshotStrategy { diff --git a/engine/storage/src/org/apache/cloudstack/storage/to/ImageDataStoreTO.java b/engine/storage/src/org/apache/cloudstack/storage/to/ImageDataStoreTO.java index 9f59a4ff337..43998a30102 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/to/ImageDataStoreTO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/to/ImageDataStoreTO.java @@ -1,6 +1,22 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.to; -import org.apache.cloudstack.storage.image.datastore.ImageDataStoreInfo; +import org.apache.cloudstack.storage.image.store.ImageDataStoreInfo; public class ImageDataStoreTO { private final String type; diff --git a/engine/storage/src/org/apache/cloudstack/storage/to/ImageOnPrimayDataStoreTO.java b/engine/storage/src/org/apache/cloudstack/storage/to/ImageOnPrimayDataStoreTO.java index 18743d70bf2..f7c2322e346 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/to/ImageOnPrimayDataStoreTO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/to/ImageOnPrimayDataStoreTO.java @@ -22,11 +22,11 @@ import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; public class ImageOnPrimayDataStoreTO { private final String pathOnPrimaryDataStore; - private PrimaryDataStoreTO dataStore; + private final PrimaryDataStoreTO dataStore; private final TemplateTO template; public ImageOnPrimayDataStoreTO(TemplateOnPrimaryDataStoreInfo template) { this.pathOnPrimaryDataStore = template.getPath(); - //this.dataStore = template.getPrimaryDataStore().getDataStoreTO(); + this.dataStore = template.getPrimaryDataStore().getDataStoreTO(); this.template = new TemplateTO(template.getTemplate()); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/to/NfsPrimaryDataStoreTO.java b/engine/storage/src/org/apache/cloudstack/storage/to/NfsPrimaryDataStoreTO.java index 06ff16b2bfe..96fb6bb2401 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/to/NfsPrimaryDataStoreTO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/to/NfsPrimaryDataStoreTO.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.to; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; diff --git a/engine/storage/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/engine/storage/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index 13d51acb7a1..cd67b97b02c 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.to; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; diff --git a/engine/storage/src/org/apache/cloudstack/storage/to/TemplateTO.java b/engine/storage/src/org/apache/cloudstack/storage/to/TemplateTO.java index 26a523ac2d7..b9db8cc95ba 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/to/TemplateTO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/to/TemplateTO.java @@ -1,19 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.to; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.image.datastore.ImageDataStoreInfo; +import org.apache.cloudstack.storage.image.store.ImageDataStoreInfo; public class TemplateTO { private final String path; private final String uuid; - private DiskFormat diskType; + private final VolumeDiskType diskType; private final ImageDataStoreTO imageDataStore; public TemplateTO(TemplateInfo template) { this.path = template.getPath(); this.uuid = template.getUuid(); - //this.diskType = template.getDiskType(); + this.diskType = template.getDiskType(); this.imageDataStore = new ImageDataStoreTO((ImageDataStoreInfo)template.getDataStore()); } @@ -25,7 +41,7 @@ public class TemplateTO { return this.uuid; } - public DiskFormat getDiskType() { + public VolumeDiskType getDiskType() { return this.diskType; } diff --git a/engine/storage/src/org/apache/cloudstack/storage/to/VolumeTO.java b/engine/storage/src/org/apache/cloudstack/storage/to/VolumeTO.java index 4373bada650..af71344bf33 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/to/VolumeTO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/to/VolumeTO.java @@ -1,29 +1,44 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.to; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; public class VolumeTO { private final String uuid; private final String path; - private VolumeType volumeType; - private DiskFormat diskType; + private final VolumeType volumeType; + private final VolumeDiskType diskType; private PrimaryDataStoreTO dataStore; - private String name; + private final String name; private final long size; public VolumeTO(VolumeInfo volume) { this.uuid = volume.getUuid(); - this.path = volume.getUri(); - //this.volumeType = volume.getType(); - //this.diskType = volume.getDiskType(); + this.path = volume.getPath(); + this.volumeType = volume.getType(); + this.diskType = volume.getDiskType(); if (volume.getDataStore() != null) { - this.dataStore = new PrimaryDataStoreTO((PrimaryDataStoreInfo)volume.getDataStore()); + this.dataStore = new PrimaryDataStoreTO(volume.getDataStore()); } else { this.dataStore = null; } - //this.name = volume.getName(); + this.name = volume.getName(); this.size = volume.getSize(); } @@ -39,7 +54,7 @@ public class VolumeTO { return this.volumeType; } - public DiskFormat getDiskType() { + public VolumeDiskType getDiskType() { return this.diskType; } diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/ObjectInDataStoreStateMachine.java b/engine/storage/src/org/apache/cloudstack/storage/volume/ObjectInDataStoreStateMachine.java index d0530d1934a..11cf2ef928c 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/ObjectInDataStoreStateMachine.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/ObjectInDataStoreStateMachine.java @@ -23,11 +23,8 @@ import com.cloud.utils.fsm.StateObject; public interface ObjectInDataStoreStateMachine extends StateObject { enum State { Allocated("The initial state"), - Creating2("This is only used with createOnlyRequested event"), - Creating("The object is being creating on data store"), - Created("The object is created"), + Creating("The template is being downloading to data store"), Ready("Template downloading is complished"), - Copying("The object is being coping"), Destroying("Template is destroying"), Destroyed("Template is destroyed"), Failed("Failed to download template"); @@ -44,11 +41,8 @@ public interface ObjectInDataStoreStateMachine extends StateObject callback); - public void revertSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback); -} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java b/engine/storage/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java similarity index 68% rename from engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java rename to engine/storage/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java index 14d741707b5..b90a6d60a80 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java @@ -25,14 +25,18 @@ import java.util.Map; import java.util.concurrent.ExecutionException; import org.apache.cloudstack.engine.cloud.entity.api.SnapshotEntity; +import org.apache.cloudstack.engine.cloud.entity.api.TemplateEntity; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; import org.apache.cloudstack.engine.datacenter.entity.api.StorageEntity; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreEntityImpl; +import org.apache.cloudstack.storage.image.TemplateEntityImpl; +import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.volume.VolumeService.VolumeApiResult; import com.cloud.utils.exception.CloudRuntimeException; @@ -72,27 +76,27 @@ public class VolumeEntityImpl implements VolumeEntity { @Override public String getCurrentState() { - return null; + return volumeInfo.getCurrentState().toString(); } @Override public String getDesiredState() { - return null; + return volumeInfo.getDesiredState().toString(); } @Override public Date getCreatedTime() { - return null; + return volumeInfo.getCreatedDate(); } @Override public Date getLastUpdatedTime() { - return null; + return volumeInfo.getUpdatedDate(); } @Override public String getOwner() { - return null; + return volumeInfo.getOwner(); } @@ -151,20 +155,55 @@ public class VolumeEntityImpl implements VolumeEntity { } @Override - public DiskFormat getDiskType() { - return null; + public VolumeDiskType getDiskType() { + return volumeInfo.getDiskType(); } @Override public VolumeType getType() { - return null; + return volumeInfo.getType(); } @Override public StorageEntity getDataStore() { - return new PrimaryDataStoreEntityImpl((PrimaryDataStoreInfo) volumeInfo.getDataStore()); + return new PrimaryDataStoreEntityImpl(volumeInfo.getDataStore()); } + @Override + public boolean createVolumeFromTemplate(long dataStoreId, VolumeDiskType diskType, TemplateEntity template) { + TemplateInfo ti = ((TemplateEntityImpl)template).getTemplateInfo(); + + AsyncCallFuture future = vs.createVolumeFromTemplateAsync(volumeInfo, dataStoreId, diskType, ti); + try { + result = future.get(); + if (!result.isSuccess()) { + throw new CloudRuntimeException("create volume from template failed: " + result.getResult()); + } + return true; + } catch (InterruptedException e) { + throw new CloudRuntimeException("wait result failed", e); + } catch (ExecutionException e) { + throw new CloudRuntimeException("wait result failed", e); + } + } + + @Override + public boolean createVolume(long dataStoreId, VolumeDiskType diskType) { + AsyncCallFuture future = vs.createVolumeAsync(volumeInfo, dataStoreId, diskType); + try { + result = future.get(); + if (result.isSuccess()) { + return true; + } else { + throw new CloudRuntimeException("Failed to create volume:" + result.getResult()); + } + } catch (InterruptedException e) { + throw new CloudRuntimeException("wait volume info failed", e); + } catch (ExecutionException e) { + throw new CloudRuntimeException("wait volume failed", e); + } + } + @Override public void destroy() { AsyncCallFuture future = vs.deleteVolumeAsync(volumeInfo); diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/VolumeService.java b/engine/storage/src/org/apache/cloudstack/storage/volume/VolumeService.java index 19a4c3a881c..2bd2127de40 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/VolumeService.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/VolumeService.java @@ -19,13 +19,13 @@ package org.apache.cloudstack.storage.volume; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; -import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.EndPoint; +import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.image.TemplateInfo; public interface VolumeService { @@ -52,7 +52,7 @@ public interface VolumeService { * * @return the volume object */ - AsyncCallFuture createVolumeAsync(VolumeInfo volume, long dataStoreId); + AsyncCallFuture createVolumeAsync(VolumeInfo volume, long dataStoreId, VolumeDiskType diskType); /** * Delete volume @@ -87,5 +87,5 @@ public interface VolumeService { VolumeEntity getVolumeEntity(long volumeId); - AsyncCallFuture createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, TemplateInfo template); + AsyncCallFuture createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, VolumeDiskType diskType, TemplateInfo template); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java deleted file mode 100644 index a7397e17c4f..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.volume.datastore; - -import java.util.Map; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; -import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; -import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; -import org.springframework.stereotype.Component; - -import com.cloud.utils.exception.CloudRuntimeException; - -@Component -public class PrimaryDataStoreHelper { - @Inject - private PrimaryDataStoreDao dataStoreDao; - public PrimaryDataStoreVO createPrimaryDataStore(Map params) { - PrimaryDataStoreVO dataStoreVO = dataStoreDao.findPoolByUUID(params.get("uuid")); - if (dataStoreVO != null) { - throw new CloudRuntimeException("duplicate uuid: " + params.get("uuid")); - } - - dataStoreVO = new PrimaryDataStoreVO(); - dataStoreVO.setStorageProviderId(Long.parseLong(params.get("providerId"))); - dataStoreVO.setHostAddress(params.get("server")); - dataStoreVO.setPath(params.get("path")); - dataStoreVO.setPoolType(params.get("protocol")); - dataStoreVO.setPort(Integer.parseInt(params.get("port"))); - //dataStoreVO.setKey(params.get("key")); - dataStoreVO.setName(params.get("name")); - dataStoreVO.setUuid(params.get("uuid")); - dataStoreVO = dataStoreDao.persist(dataStoreVO); - return dataStoreVO; - } - - public boolean deletePrimaryDataStore(long id) { - PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(id); - if (dataStoreVO == null) { - throw new CloudRuntimeException("can't find store: " + id); - } - dataStoreDao.remove(id); - return true; - } - - public void attachCluster(DataStore dataStore) { - //send down AttachPrimaryDataStoreCmd command to all the hosts in the cluster - AttachPrimaryDataStoreCmd cmd = new AttachPrimaryDataStoreCmd(dataStore.getUri()); - /*for (EndPoint ep : dataStore.getEndPoints()) { - ep.sendMessage(cmd); - } */ - } - - -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/db/VolumeDao2Impl.java b/engine/storage/src/org/apache/cloudstack/storage/volume/db/VolumeDao2Impl.java index 0eb0ac30d5c..1e12498dff6 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/db/VolumeDao2Impl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/db/VolumeDao2Impl.java @@ -38,7 +38,7 @@ import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Volume; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/db/VolumeVO.java b/engine/storage/src/org/apache/cloudstack/storage/volume/db/VolumeVO.java index da8234e35f3..ee1600ddd4f 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/db/VolumeVO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/db/VolumeVO.java @@ -33,7 +33,7 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import org.apache.cloudstack.api.Identity; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.Unknown; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.Volume; @@ -113,7 +113,7 @@ public class VolumeVO implements Identity, StateObject { StoragePoolType poolType; @Column(name = "disk_type") - DiskFormat diskType; + String diskType = new Unknown().toString(); @Column(name = GenericDao.REMOVED_COLUMN) Date removed; @@ -406,11 +406,11 @@ public class VolumeVO implements Identity, StateObject { this.uuid = uuid; } - public DiskFormat getDiskType() { + public String getDiskType() { return diskType; } - public void setDiskType(DiskFormat type) { + public void setDiskType(String type) { diskType = type; } } diff --git a/engine/storage/storage.ucls b/engine/storage/storage.ucls index 9b3a47ce3f7..23a7b21fe00 100644 --- a/engine/storage/storage.ucls +++ b/engine/storage/storage.ucls @@ -1,3 +1,21 @@ + callback) { - this.driver.deleteAsync((VolumeObject) volume, callback); + @Override + public void deleteVolumeAsync(VolumeInfo volume, AsyncCompletionCallback callback) { + CommandResult result = new CommandResult(); + if (volume.isAttachedVM()) { + result.setResult("Can't delete volume: " + volume.getId() + ", if it's attached to a VM"); + callback.complete(result); + } + this.driver.deleteVolumeAsync((VolumeObject)volume, callback); } -*/ - /* + @Override public List getEndPoints() { Long clusterId = pdsv.getClusterId(); @@ -115,14 +149,13 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { List endpoints = new ArrayList(); List hosts = hostDao.findHypervisorHostInCluster(clusterId); for (HostVO host : hosts) { - HypervisorHostEndPoint ep = new HypervisorHostEndPoint( - host.getId(), host.getPrivateIpAddress()); - ComponentInject.inject(ep); + HypervisorHostEndPoint ep = new HypervisorHostEndPoint(host.getId(), host.getPrivateIpAddress()); + ComponentContext.inject(ep); endpoints.add(ep); } Collections.shuffle(endpoints); return endpoints; - }*/ + } public void setSupportedHypervisor(HypervisorType type) { this.supportedHypervisor = type; @@ -143,53 +176,43 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { } @Override - public boolean isVolumeDiskTypeSupported(DiskFormat diskType) { + public boolean isVolumeDiskTypeSupported(VolumeDiskType diskType) { return true; } @Override public long getCapacity() { - return 0; + return this.driver.getCapacity(); } @Override public long getAvailableCapacity() { - //return this.driver.getAvailableCapacity(); - return 0; - } - -/* @Override - public void createAsync(DataObject data, - AsyncCompletionCallback callback) { - this.provider.getVolumeDriver().createAsync(data, callback); - } -*/ -/* @Override - public void takeSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback) { - this.provider.getSnapshotDriver().takeSnapshot(snapshot, callback); - } -*/ -/* @Override - public void revertSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback) { - this.provider.getSnapshotDriver().revertSnapshot(snapshot, callback); + return this.driver.getAvailableCapacity(); } @Override - public void deleteSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback) { - this.provider.getSnapshotDriver().deleteSnapshot(snapshot, callback); - } -*/ - @Override - public boolean exists(DataObject data) { - return (objectInStoreMgr.findObject(data.getId(), data.getType(), this.getId(), this.getRole()) != null) ? true - : false; + public void createVolumeAsync(VolumeInfo vi, VolumeDiskType diskType, AsyncCompletionCallback callback) { + if (!isVolumeDiskTypeSupported(diskType)) { + throw new CloudRuntimeException("disk type " + diskType + " is not supported"); + } + VolumeObject vo = (VolumeObject) vi; + vo.setVolumeDiskType(diskType); + this.driver.createVolumeAsync(vo, callback); } @Override - public DiskFormat getDefaultDiskType() { + public boolean exists(VolumeInfo vi) { + VolumeVO vol = volumeDao.findByVolumeIdAndPoolId(vi.getId(), this.getId()); + return (vol != null) ? true : false; + } + + @Override + public boolean templateExists(TemplateInfo template) { + return (templatePrimaryStoreMgr.findTemplateOnPrimaryDataStore(template, this) != null) ? true : false; + } + + @Override + public VolumeDiskType getDefaultDiskType() { // TODO Auto-generated method stub return null; } @@ -200,19 +223,31 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { } @Override - public TemplateInfo getTemplate(long templateId) { - return imageDataFactory.getTemplate(templateId, this); + public TemplateOnPrimaryDataStoreInfo getTemplate(TemplateInfo template) { + return templatePrimaryStoreMgr.findTemplateOnPrimaryDataStore(template, this); } -/* @Override - public void createVoluemFromBaseImageAsync(VolumeInfo volume, - TemplateInfo template, - AsyncCompletionCallback callback) { + @Override + public VolumeInfo createVoluemFromBaseImage(VolumeInfo volume, TemplateOnPrimaryDataStoreInfo template) { VolumeObject vo = (VolumeObject) volume; - vo.setVolumeDiskType(template.getDiskType()); - this.driver.createVolumeFromBaseImageAsync(vo, template, callback); + vo.setVolumeDiskType(template.getTemplate().getDiskType()); + //this.driver.createVolumeFromBaseImage(vo, template); + return volume; + } + + @Override + public void createVoluemFromBaseImageAsync(VolumeInfo volume, TemplateInfo templateStore, AsyncCompletionCallback callback) { + VolumeObject vo = (VolumeObject) volume; + vo.setVolumeDiskType(templateStore.getDiskType()); + String templateUri = templateStore.getDataStore().grantAccess(templateStore, this.getEndPoints().get(0)); + this.driver.createVolumeFromBaseImageAsync(vo, templateUri, callback); + } + + @Override + public boolean installTemplate(TemplateOnPrimaryDataStoreInfo template) { + // TODO Auto-generated method stub + return true; } -*/ @Override public String getUuid() { @@ -234,40 +269,55 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { return this.pdsv.getPoolType(); } - public DataStoreProvider getProvider() { + @Override + public PrimaryDataStoreLifeCycle getLifeCycle() { + return lifeCycle; + } + + @Override + public PrimaryDataStoreProvider getProvider() { return this.provider; } @Override - public DataStoreRole getRole() { - return DataStoreRole.Primary; + public String grantAccess(VolumeInfo volume, EndPoint ep) { + return this.driver.grantAccess((VolumeObject)volume, ep); } @Override - public String getUri() { - return this.pdsv.getPoolType() + File.separator - + this.pdsv.getHostAddress() + File.separator - + this.pdsv.getPath(); + public boolean revokeAccess(VolumeInfo volume, EndPoint ep) { + // TODO Auto-generated method stub + return false; } @Override - public PrimaryDataStoreLifeCycle getLifeCycle() { - return this.lifeCycle; - } - - @Override - public SnapshotInfo getSnapshot(long snapshotId) { - return snapshotFactory.getSnapshot(snapshotId, this); - } - - @Override - public Scope getScope() { - if (pdsv.getScope() == ScopeType.CLUSTER) { - return new ClusterScope(pdsv.getClusterId(), pdsv.getPodId(), pdsv.getDataCenterId()); - } else if (pdsv.getScope() == ScopeType.ZONE) { - return new ZoneScope(pdsv.getDataCenterId()); - } - + public String grantAccess(TemplateInfo template, EndPoint ep) { + // TODO Auto-generated method stub return null; } + + @Override + public boolean revokeAccess(TemplateInfo template, EndPoint ep) { + // TODO Auto-generated method stub + return false; + } + + @Override + public String grantAccess(SnapshotInfo snapshot, EndPoint ep) { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean revokeAccess(SnapshotInfo snapshot, EndPoint ep) { + // TODO Auto-generated method stub + return false; + } + + @Override + public String getRole() { + // TODO Auto-generated method stub + return "volumeStore"; + } + } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/AbstractPrimaryDataStoreConfigurator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/AbstractPrimaryDataStoreConfigurator.java new file mode 100644 index 00000000000..2ecbfbf8ad6 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/AbstractPrimaryDataStoreConfigurator.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.configurator; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.storage.datastore.DefaultPrimaryDataStore; +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; +import org.apache.cloudstack.storage.datastore.driver.PrimaryDataStoreDriver; +import com.cloud.utils.exception.CloudRuntimeException; + +public abstract class AbstractPrimaryDataStoreConfigurator implements PrimaryDataStoreConfigurator { + @Inject + protected PrimaryDataStoreDao dataStoreDao; + + protected abstract PrimaryDataStoreLifeCycle getLifeCycle(); + + protected abstract PrimaryDataStoreDriver getDriver(); + + protected abstract boolean isLocalStorageSupported(); + + @Override + public PrimaryDataStore getDataStore(long dataStoreId) { + PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(dataStoreId); + if (dataStoreVO == null) { + throw new CloudRuntimeException("Can't find primary data store: " + dataStoreId); + } + + DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO); + dataStore.setDriver(getDriver()); + dataStore.setLifeCycle(getLifeCycle()); + dataStore.setSupportedHypervisor(getSupportedHypervisor()); + dataStore.setLocalStorageFlag(isLocalStorageSupported()); + dataStore.setProtocolTransFormer(getProtocolTransformer()); + return dataStore; + } +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/PrimaryDataStoreConfigurator.java similarity index 57% rename from engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/PrimaryDataStoreConfigurator.java index cc13c3a1f56..e868b4e149d 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/PrimaryDataStoreConfigurator.java @@ -16,27 +16,17 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.engine.subsystem.api.storage; +package org.apache.cloudstack.storage.datastore.configurator; -import java.util.Map; - - -public interface DataStoreLifeCycle { - public boolean initialize(DataStore store, Map dsInfos); - - public boolean attachCluster(DataStore store, ClusterScope scope); - - boolean attachZone(DataStore dataStore, ZoneScope scope); - - public boolean dettach(); - - public boolean unmanaged(); - - public boolean maintain(); - - public boolean cancelMaintain(); - - public boolean deleteDataStore(); +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; +import org.apache.cloudstack.storage.datastore.configurator.validator.StorageProtocolTransformer; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.Storage.StoragePoolType; +public interface PrimaryDataStoreConfigurator { + public HypervisorType getSupportedHypervisor(); + public String getSupportedDataStoreType(); + public PrimaryDataStore getDataStore(long dataStoreId); + public StorageProtocolTransformer getProtocolTransformer(); } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/kvm/AbstractKvmConfigurator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/kvm/AbstractKvmConfigurator.java new file mode 100644 index 00000000000..008af857106 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/kvm/AbstractKvmConfigurator.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.kvm; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.storage.datastore.configurator.AbstractPrimaryDataStoreConfigurator; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl; +import org.apache.cloudstack.storage.datastore.driver.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.datastore.lifecycle.DefaultKvmPrimaryDataStoreLifeCycle; + +import com.cloud.hypervisor.Hypervisor.HypervisorType; + +public abstract class AbstractKvmConfigurator extends AbstractPrimaryDataStoreConfigurator { + @Inject + PrimaryDataStoreDao dataStoreDao; + + @Override + public HypervisorType getSupportedHypervisor() { + return HypervisorType.KVM; + } + + protected PrimaryDataStoreLifeCycle getLifeCycle() { + return new DefaultKvmPrimaryDataStoreLifeCycle(dataStoreDao); + } + + protected PrimaryDataStoreDriver getDriver() { + return new DefaultPrimaryDataStoreDriverImpl(); + } +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/kvm/KvmCLVMConfigurator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/kvm/KvmCLVMConfigurator.java new file mode 100644 index 00000000000..f0b581f17e9 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/kvm/KvmCLVMConfigurator.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.kvm; + +import org.apache.cloudstack.storage.datastore.configurator.validator.CLVMProtocolTransformer; +import org.apache.cloudstack.storage.datastore.configurator.validator.StorageProtocolTransformer; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@Component +@Qualifier("defaultProvider") +public class KvmCLVMConfigurator extends AbstractKvmConfigurator { + + @Override + public String getSupportedDataStoreType() { + return "clvm"; + } + + @Override + public StorageProtocolTransformer getProtocolTransformer() { + return new CLVMProtocolTransformer(); + } + + @Override + protected boolean isLocalStorageSupported() { + // TODO Auto-generated method stub + return false; + } + + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/kvm/KvmNfsConfigurator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/kvm/KvmNfsConfigurator.java new file mode 100644 index 00000000000..1c36f152a97 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/kvm/KvmNfsConfigurator.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.kvm; + +import javax.inject.Inject; + +import org.apache.cloudstack.storage.datastore.configurator.validator.NfsProtocolTransformer; +import org.apache.cloudstack.storage.datastore.configurator.validator.StorageProtocolTransformer; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + + +@Component +@Qualifier("defaultProvider") +public class KvmNfsConfigurator extends AbstractKvmConfigurator { + @Inject + PrimaryDataStoreDao dataStoreDao; + + @Override + public String getSupportedDataStoreType() { + return "nfs"; + } + + @Override + public StorageProtocolTransformer getProtocolTransformer() { + return new NfsProtocolTransformer(dataStoreDao); + } + + @Override + protected boolean isLocalStorageSupported() { + // TODO Auto-generated method stub + return false; + } +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/kvm/KvmRBDConfigurator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/kvm/KvmRBDConfigurator.java new file mode 100644 index 00000000000..a6442393104 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/kvm/KvmRBDConfigurator.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.kvm; + +import org.apache.cloudstack.storage.datastore.configurator.validator.StorageProtocolTransformer; +import org.apache.cloudstack.storage.datastore.configurator.validator.RBDValidator; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +import com.cloud.storage.Storage.StoragePoolType; + +@Component +@Qualifier("defaultProvider") +public class KvmRBDConfigurator extends AbstractKvmConfigurator { + + public String getSupportedDataStoreType() { + return "rbd"; + } + + @Override + public StorageProtocolTransformer getProtocolTransformer() { + return new RBDValidator(); + } + + @Override + protected boolean isLocalStorageSupported() { + // TODO Auto-generated method stub + return false; + } +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/CLVMProtocolTransformer.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/CLVMProtocolTransformer.java new file mode 100644 index 00000000000..4dd19d260ab --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/CLVMProtocolTransformer.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.validator; + +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeTO; + +public class CLVMProtocolTransformer implements StorageProtocolTransformer { + + @Override + public boolean normalizeUserInput(Map params) { + // TODO Auto-generated method stub + return false; + } + + @Override + public List getInputParamNames() { + // TODO Auto-generated method stub + return null; + } + + @Override + public PrimaryDataStoreTO getDataStoreTO(PrimaryDataStoreInfo dataStore) { + // TODO Auto-generated method stub + return null; + } + + @Override + public VolumeTO getVolumeTO(VolumeInfo volume) { + // TODO Auto-generated method stub + return null; + } + + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/FileSystemValidator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/FileSystemValidator.java new file mode 100644 index 00000000000..8e1180eef3b --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/FileSystemValidator.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.validator; + +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeTO; + +public class FileSystemValidator implements StorageProtocolTransformer { + + @Override + public boolean normalizeUserInput(Map params) { + // TODO Auto-generated method stub + return false; + } + + @Override + public List getInputParamNames() { + // TODO Auto-generated method stub + return null; + } + + @Override + public PrimaryDataStoreTO getDataStoreTO(PrimaryDataStoreInfo dataStore) { + // TODO Auto-generated method stub + return null; + } + + @Override + public VolumeTO getVolumeTO(VolumeInfo volume) { + // TODO Auto-generated method stub + return null; + } + + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/ISCSIProtocolTransformer.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/ISCSIProtocolTransformer.java new file mode 100644 index 00000000000..44bdba9e0c1 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/ISCSIProtocolTransformer.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.validator; + +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeTO; + +public class ISCSIProtocolTransformer implements StorageProtocolTransformer { + + @Override + public List getInputParamNames() { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean normalizeUserInput(Map params) { + // TODO Auto-generated method stub + return false; + } + + @Override + public PrimaryDataStoreTO getDataStoreTO(PrimaryDataStoreInfo dataStore) { + // TODO Auto-generated method stub + return null; + } + + @Override + public VolumeTO getVolumeTO(VolumeInfo volume) { + // TODO Auto-generated method stub + return null; + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/NfsProtocolTransformer.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/NfsProtocolTransformer.java new file mode 100644 index 00000000000..67ec5e8dbdf --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/NfsProtocolTransformer.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.validator; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; +import org.apache.cloudstack.storage.to.NfsPrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeTO; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class NfsProtocolTransformer implements StorageProtocolTransformer { + private final PrimaryDataStoreDao dataStoreDao; + + public NfsProtocolTransformer(PrimaryDataStoreDao dao) { + this.dataStoreDao = dao; + } + + @Override + public boolean normalizeUserInput(Map params) { + String url = params.get("url"); + + try { + URI uri = new URI(url); + if (!"nfs".equalsIgnoreCase(uri.getScheme())) { + throw new CloudRuntimeException("invalid protocol, must starting with nfs"); + } + String storageHost = uri.getHost(); + String hostPath = uri.getPath(); + String userInfo = uri.getUserInfo(); + int port = uri.getPort(); + if (port == -1) { + port = 2049; + } + params.put("server", storageHost); + params.put("path", hostPath); + params.put("user", userInfo); + params.put("port", String.valueOf(port)); + params.put("uuid", UUID.nameUUIDFromBytes((storageHost + hostPath).getBytes()).toString()); + } catch (URISyntaxException e) { + throw new CloudRuntimeException("invalid url: " + e.toString()); + } + return true; + } + + @Override + public List getInputParamNames() { + List paramNames = new ArrayList(); + paramNames.add("server"); + paramNames.add("path"); + return paramNames; + } + + @Override + public PrimaryDataStoreTO getDataStoreTO(PrimaryDataStoreInfo dataStore) { + NfsPrimaryDataStoreTO dataStoreTO = new NfsPrimaryDataStoreTO(dataStore); + PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(dataStore.getId()); + dataStoreTO.setServer(dataStoreVO.getHostAddress()); + dataStoreTO.setPath(dataStoreVO.getPath()); + return dataStoreTO; + } + + @Override + public VolumeTO getVolumeTO(VolumeInfo volume) { + VolumeTO vol = new VolumeTO(volume); + vol.setDataStore(this.getDataStoreTO(volume.getDataStore())); + return vol; + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/RBDValidator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/RBDValidator.java new file mode 100644 index 00000000000..e18c8612e3a --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/RBDValidator.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.validator; + +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeTO; + +public class RBDValidator implements StorageProtocolTransformer { + + @Override + public boolean normalizeUserInput(Map params) { + // TODO Auto-generated method stub + return false; + } + + @Override + public List getInputParamNames() { + // TODO Auto-generated method stub + return null; + } + + @Override + public PrimaryDataStoreTO getDataStoreTO(PrimaryDataStoreInfo dataStore) { + // TODO Auto-generated method stub + return null; + } + + @Override + public VolumeTO getVolumeTO(VolumeInfo volume) { + // TODO Auto-generated method stub + return null; + } + +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/VolumeDataFactory.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/StorageProtocolTransformer.java similarity index 59% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/VolumeDataFactory.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/StorageProtocolTransformer.java index 0cffc055ee4..ab9a613cc40 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/VolumeDataFactory.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/StorageProtocolTransformer.java @@ -16,11 +16,19 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.datastore; +package org.apache.cloudstack.storage.datastore.configurator.validator; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeTO; -public interface VolumeDataFactory { - VolumeInfo getVolume(long volumeId, DataStore store); +public interface StorageProtocolTransformer { + public boolean normalizeUserInput(Map params); + public PrimaryDataStoreTO getDataStoreTO(PrimaryDataStoreInfo dataStore); + public VolumeTO getVolumeTO(VolumeInfo volume); + public List getInputParamNames(); } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/VMFSValidator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/VMFSValidator.java new file mode 100644 index 00000000000..a0ae1f84845 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/validator/VMFSValidator.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.validator; + +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeTO; + +public class VMFSValidator implements StorageProtocolTransformer { + + @Override + public boolean normalizeUserInput(Map params) { + // TODO Auto-generated method stub + return false; + } + + @Override + public List getInputParamNames() { + // TODO Auto-generated method stub + return null; + } + + @Override + public PrimaryDataStoreTO getDataStoreTO(PrimaryDataStoreInfo dataStore) { + // TODO Auto-generated method stub + return null; + } + + @Override + public VolumeTO getVolumeTO(VolumeInfo volume) { + // TODO Auto-generated method stub + return null; + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/vmware/AbstractVmwareConfigurator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/vmware/AbstractVmwareConfigurator.java new file mode 100644 index 00000000000..c688bd6b005 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/vmware/AbstractVmwareConfigurator.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.vmware; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.storage.datastore.configurator.AbstractPrimaryDataStoreConfigurator; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl; +import org.apache.cloudstack.storage.datastore.driver.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.datastore.lifecycle.DefaultVmwarePrimaryDataStoreLifeCycle; +import com.cloud.hypervisor.Hypervisor.HypervisorType; + +public abstract class AbstractVmwareConfigurator extends AbstractPrimaryDataStoreConfigurator { + + @Inject + PrimaryDataStoreDao dataStoreDao; + @Override + public HypervisorType getSupportedHypervisor() { + return HypervisorType.VMware; + } + + @Override + protected PrimaryDataStoreLifeCycle getLifeCycle() { + return new DefaultVmwarePrimaryDataStoreLifeCycle(dataStoreDao); + } + + @Override + protected PrimaryDataStoreDriver getDriver() { + return new DefaultPrimaryDataStoreDriverImpl(); + } +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/vmware/VmwareIsciConfigurator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/vmware/VmwareIsciConfigurator.java new file mode 100644 index 00000000000..4e596560034 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/vmware/VmwareIsciConfigurator.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.vmware; + +import org.apache.cloudstack.storage.datastore.configurator.validator.ISCSIProtocolTransformer; +import org.apache.cloudstack.storage.datastore.configurator.validator.StorageProtocolTransformer; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@Component +@Qualifier("defaultProvider") +public class VmwareIsciConfigurator extends AbstractVmwareConfigurator { + + public String getSupportedDataStoreType() { + return "iscsi"; + } + + @Override + public StorageProtocolTransformer getProtocolTransformer() { + return new ISCSIProtocolTransformer(); + } + + @Override + protected boolean isLocalStorageSupported() { + // TODO Auto-generated method stub + return false; + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/vmware/VmwareNfsConfigurator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/vmware/VmwareNfsConfigurator.java new file mode 100644 index 00000000000..afd8d21a626 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/vmware/VmwareNfsConfigurator.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.vmware; + +import javax.inject.Inject; + +import org.apache.cloudstack.storage.datastore.configurator.validator.NfsProtocolTransformer; +import org.apache.cloudstack.storage.datastore.configurator.validator.StorageProtocolTransformer; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@Component +@Qualifier("defaultProvider") +public class VmwareNfsConfigurator extends AbstractVmwareConfigurator { + @Inject + PrimaryDataStoreDao dataStoreDao; + @Override + public String getSupportedDataStoreType() { + return "nfs"; + } + + @Override + public StorageProtocolTransformer getProtocolTransformer() { + return new NfsProtocolTransformer(dataStoreDao); + } + + @Override + protected boolean isLocalStorageSupported() { + // TODO Auto-generated method stub + return false; + } +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/vmware/VmwareVMFSConfigurator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/vmware/VmwareVMFSConfigurator.java new file mode 100644 index 00000000000..fc8738c2507 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/vmware/VmwareVMFSConfigurator.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.vmware; + +import org.apache.cloudstack.storage.datastore.configurator.validator.StorageProtocolTransformer; +import org.apache.cloudstack.storage.datastore.configurator.validator.VMFSValidator; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +import com.cloud.storage.Storage.StoragePoolType; + +@Component +@Qualifier("defaultProvider") +public class VmwareVMFSConfigurator extends AbstractVmwareConfigurator { + + @Override + public String getSupportedDataStoreType() { + return "vmfs"; + } + + @Override + public StorageProtocolTransformer getProtocolTransformer() { + return new VMFSValidator(); + } + + @Override + protected boolean isLocalStorageSupported() { + // TODO Auto-generated method stub + return false; + } +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/xen/AbstractXenConfigurator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/xen/AbstractXenConfigurator.java new file mode 100644 index 00000000000..1181dea50df --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/xen/AbstractXenConfigurator.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.configurator.xen; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.storage.datastore.configurator.AbstractPrimaryDataStoreConfigurator; +import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl; +import org.apache.cloudstack.storage.datastore.driver.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.datastore.lifecycle.DefaultXenPrimaryDataStoreLifeCycle; + +import com.cloud.hypervisor.Hypervisor.HypervisorType; + +public abstract class AbstractXenConfigurator extends AbstractPrimaryDataStoreConfigurator { + @Override + public HypervisorType getSupportedHypervisor() { + return HypervisorType.XenServer; + } + + protected PrimaryDataStoreLifeCycle getLifeCycle() { + return new DefaultXenPrimaryDataStoreLifeCycle(dataStoreDao); + } + + protected PrimaryDataStoreDriver getDriver() { + return new DefaultPrimaryDataStoreDriverImpl(); + } +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CopyCommandResult.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/xen/XenIscsiConfigurator.java similarity index 50% rename from engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CopyCommandResult.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/xen/XenIscsiConfigurator.java index 100fd4edba3..1120ec2e743 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CopyCommandResult.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/xen/XenIscsiConfigurator.java @@ -16,16 +16,30 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.engine.subsystem.api.storage; +package org.apache.cloudstack.storage.datastore.configurator.xen; -public class CopyCommandResult extends CommandResult { - private final String path; - public CopyCommandResult(String path) { - super(); - this.path = path; - } - - public String getPath() { - return this.path; +import org.apache.cloudstack.storage.datastore.configurator.validator.ISCSIProtocolTransformer; +import org.apache.cloudstack.storage.datastore.configurator.validator.StorageProtocolTransformer; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@Component +@Qualifier("defaultProvider") +public class XenIscsiConfigurator extends AbstractXenConfigurator { + + @Override + public String getSupportedDataStoreType() { + return "iscsi"; + } + + @Override + public StorageProtocolTransformer getProtocolTransformer() { + return new ISCSIProtocolTransformer(); + } + + protected boolean isLocalStorageSupported() { + // TODO Auto-generated method stub + return false; } + } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/xen/XenNfsConfigurator.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/xen/XenNfsConfigurator.java new file mode 100644 index 00000000000..0cb24a8d574 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/configurator/xen/XenNfsConfigurator.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.configurator.xen; + +import org.apache.cloudstack.storage.datastore.configurator.validator.NfsProtocolTransformer; +import org.apache.cloudstack.storage.datastore.configurator.validator.StorageProtocolTransformer; +import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.stereotype.Component; + +@Component +@Qualifier("defaultProvider") +public class XenNfsConfigurator extends AbstractXenConfigurator { + @Override + public String getSupportedDataStoreType() { + return DataStoreProtocol.NFS.toString(); + } + + @Override + public StorageProtocolTransformer getProtocolTransformer() { + return new NfsProtocolTransformer(dataStoreDao); + } + + @Override + protected boolean isLocalStorageSupported() { + // TODO Auto-generated method stub + return false; + } +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java similarity index 100% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java similarity index 97% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index 247cdeec682..ef42208ec51 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -26,24 +26,20 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.storage.datastore.DataStoreStatus; import org.springframework.stereotype.Component; -import com.cloud.storage.StoragePoolDetailVO; -import com.cloud.storage.dao.StoragePoolDetailsDao; -import com.cloud.storage.dao.StoragePoolDetailsDaoImpl; -import com.cloud.utils.component.ComponentInject; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -54,7 +50,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase DeleteLvmSearch; protected final GenericSearchBuilder StatusCountSearch; - protected final PrimaryDataStoreDetailsDao _detailsDao = null; + @Inject protected PrimaryDataStoreDetailsDao _detailsDao; private final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and ("; private final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?"; @@ -100,9 +96,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase findPoolByName(String name) { diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java similarity index 100% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java similarity index 100% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDaoImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDaoImpl.java similarity index 100% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDaoImpl.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDaoImpl.java diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDao.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreProviderDao.java similarity index 84% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDao.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreProviderDao.java index dca83ce0b8a..ebba01ce3f8 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDao.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreProviderDao.java @@ -20,6 +20,6 @@ package org.apache.cloudstack.storage.datastore.db; import com.cloud.utils.db.GenericDao; -public interface DataStoreProviderDao extends GenericDao { - public DataStoreProviderVO findByName(String name); +public interface PrimaryDataStoreProviderDao extends GenericDao { + public PrimaryDataStoreProviderVO findByName(String name); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDaoImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreProviderDaoImpl.java similarity index 77% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDaoImpl.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreProviderDaoImpl.java index ccb6b483253..0050c2f0df4 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDaoImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreProviderDaoImpl.java @@ -26,11 +26,11 @@ import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.db.SearchCriteria.Op; @Component -class DataStoreProviderDaoImpl extends GenericDaoBase implements DataStoreProviderDao { +class PrimaryDataStoreProviderDaoImpl extends GenericDaoBase implements PrimaryDataStoreProviderDao { @Override - public DataStoreProviderVO findByName(String name) { - SearchCriteriaService sc = SearchCriteria2.create(DataStoreProviderVO.class); + public PrimaryDataStoreProviderVO findByName(String name) { + SearchCriteriaService sc = SearchCriteria2.create(PrimaryDataStoreProviderVO.class); sc.addAnd(sc.getEntity().getName(), Op.EQ, name); return sc.find(); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderVO.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreProviderVO.java similarity index 84% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderVO.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreProviderVO.java index dcdafddb175..7e31d9c1b21 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderVO.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreProviderVO.java @@ -25,8 +25,8 @@ import javax.persistence.Table; import javax.persistence.TableGenerator; @Entity -@Table(name = "data_store_provider") -public class DataStoreProviderVO { +@Table(name = "primary_data_store_provider") +public class PrimaryDataStoreProviderVO { @Id @TableGenerator(name = "data_store_provider_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "data_store_provider_seq", allocationSize = 1) @Column(name = "id", updatable = false, nullable = false) @@ -35,9 +35,6 @@ public class DataStoreProviderVO { @Column(name = "name", nullable = false) private String name; - @Column(name = "uuid", nullable = false) - private String uuid; - public long getId() { return id; } @@ -49,12 +46,4 @@ public class DataStoreProviderVO { public void setName(String name) { this.name = name; } - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public String getUuid() { - return this.uuid; - } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreVO.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreVO.java similarity index 95% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreVO.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreVO.java index 19e1990b4e9..c8265c7956c 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreVO.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreVO.java @@ -31,7 +31,6 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import org.apache.cloudstack.api.Identity; -import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; import org.apache.cloudstack.storage.datastore.DataStoreStatus; import com.cloud.utils.db.GenericDao; @@ -97,8 +96,8 @@ public class PrimaryDataStoreVO implements Identity { @Column(name = "cluster_id") private Long clusterId; - @Column(name = "scope") - private ScopeType scope; + @Column(name = "configurator_key") + private String key; public long getId() { return id; @@ -237,12 +236,12 @@ public class PrimaryDataStoreVO implements Identity { this.name = name; } - public void setScope(ScopeType scope) { - this.scope = scope; + public void setKey(String key) { + this.key = key; } - public ScopeType getScope() { - return this.scope; + public String getKey() { + return this.key; } @Override diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java index fd8a35ca8b6..2855d4e96f9 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java @@ -1,27 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.datastore.driver; import java.util.List; -import java.util.Set; +import java.util.Map; -import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcConext; +import org.apache.cloudstack.storage.EndPoint; +import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CreateVolumeAnswer; import org.apache.cloudstack.storage.command.CreateVolumeCommand; -import org.apache.cloudstack.storage.command.DeleteCommand; +import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand; +import org.apache.cloudstack.storage.command.DeleteVolumeCommand; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.to.ImageOnPrimayDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeTO; +import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; +import org.apache.cloudstack.storage.volume.VolumeObject; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; - +import com.cloud.agent.api.Command; +import com.cloud.utils.exception.CloudRuntimeException; public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { private static final Logger s_logger = Logger.getLogger(DefaultPrimaryDataStoreDriverImpl.class); @@ -34,28 +52,48 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver } + @Override + public void setDataStore(PrimaryDataStore dataStore) { + this.dataStore = dataStore; + } + private class CreateVolumeContext extends AsyncRpcConext { - private final DataObject volume; + private final VolumeObject volume; /** * @param callback */ - public CreateVolumeContext(AsyncCompletionCallback callback, DataObject volume) { + public CreateVolumeContext(AsyncCompletionCallback callback, VolumeObject volume) { super(callback); this.volume = volume; } - public DataObject getVolume() { + public VolumeObject getVolume() { return this.volume; } } - public Void createAsyncCallback(AsyncCallbackDispatcher callback, CreateVolumeContext context) { + @Override + public void createVolumeAsync(VolumeObject vol, AsyncCompletionCallback callback) { + List endPoints = vol.getDataStore().getEndPoints(); + EndPoint ep = endPoints.get(0); + VolumeInfo volInfo = vol; + CreateVolumeCommand createCmd = new CreateVolumeCommand(this.dataStore.getVolumeTO(volInfo)); + + CreateVolumeContext context = new CreateVolumeContext(callback, vol); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setContext(context) + .setCallback(caller.getTarget().createVolumeAsyncCallback(null, null)); + + ep.sendMessageAsync(createCmd, caller); + } + + public Void createVolumeAsyncCallback(AsyncCallbackDispatcher callback, CreateVolumeContext context) { CommandResult result = new CommandResult(); CreateVolumeAnswer volAnswer = (CreateVolumeAnswer) callback.getResult(); if (volAnswer.getResult()) { - DataObject volume = context.getVolume(); - //volume.setPath(volAnswer.getVolumeUuid()); + VolumeObject volume = context.getVolume(); + volume.setPath(volAnswer.getVolumeUuid()); } else { result.setResult(volAnswer.getDetails()); } @@ -65,18 +103,18 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver } @Override - public void deleteAsync(DataObject vo, AsyncCompletionCallback callback) { - DeleteCommand cmd = new DeleteCommand(vo.getUri()); - List endPoints = null; + public void deleteVolumeAsync(VolumeObject vo, AsyncCompletionCallback callback) { + DeleteVolumeCommand cmd = new DeleteVolumeCommand(this.dataStore.getVolumeTO(vo)); + List endPoints = vo.getDataStore().getEndPoints(); EndPoint ep = endPoints.get(0); AsyncRpcConext context = new AsyncRpcConext(callback); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); - caller.setCallback(caller.getTarget().deleteCallback(null, null)) + caller.setCallback(caller.getTarget().deleteVolumeCallback(null, null)) .setContext(context); ep.sendMessageAsync(cmd, caller); } - public Void deleteCallback(AsyncCallbackDispatcher callback, AsyncRpcConext context) { + public Void deleteVolumeCallback(AsyncCallbackDispatcher callback, AsyncRpcConext context) { CommandResult result = new CommandResult(); Answer answer = callback.getResult(); if (!answer.getResult()) { @@ -85,7 +123,19 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver context.getParentCallback().complete(result); return null; } - /* + + @Override + public String grantAccess(VolumeObject vol, EndPoint ep) { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean revokeAccess(VolumeObject vol, EndPoint ep) { + // TODO Auto-generated method stub + return true; + } + private class CreateVolumeFromBaseImageContext extends AsyncRpcConext { private final VolumeObject volume; @@ -99,14 +149,12 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver } } - @Override - public void createVolumeFromBaseImageAsync(VolumeObject volume, TemplateInfo template, AsyncCompletionCallback callback) { + public void createVolumeFromBaseImageAsync(VolumeObject volume, String template, AsyncCompletionCallback callback) { VolumeTO vol = this.dataStore.getVolumeTO(volume); + CreateVolumeFromBaseImageCommand cmd = new CreateVolumeFromBaseImageCommand(vol, template); List endPoints = this.dataStore.getEndPoints(); EndPoint ep = endPoints.get(0); - String templateUri = template.getDataStore().grantAccess(template, ep); - CreateVolumeFromBaseImageCommand cmd = new CreateVolumeFromBaseImageCommand(vol, templateUri); CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext(callback, volume); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); @@ -114,8 +162,10 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver .setCallback(caller.getTarget().createVolumeFromBaseImageAsyncCallback(null, null)); ep.sendMessageAsync(cmd, caller); - }*/ - /* + + + } + public Object createVolumeFromBaseImageAsyncCallback(AsyncCallbackDispatcher callback, CreateVolumeFromBaseImageContext context) { CreateVolumeAnswer answer = (CreateVolumeAnswer)callback.getResult(); CommandResult result = new CommandResult(); @@ -132,69 +182,37 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver AsyncCompletionCallback parentCall = context.getParentCallback(); parentCall.complete(result); return null; - }*/ - - @Override - public void createAsync(DataObject vol, - AsyncCompletionCallback callback) { - List endPoints = null; - EndPoint ep = endPoints.get(0); - CreateVolumeCommand createCmd = new CreateVolumeCommand(vol.getUri()); - - CreateVolumeContext context = null; - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); - caller.setContext(context) - .setCallback(caller.getTarget().createAsyncCallback(null, null)); - - ep.sendMessageAsync(createCmd, caller); - } @Override - public String grantAccess(DataObject vol, EndPoint ep) { + public long getCapacity() { // TODO Auto-generated method stub - return null; + return 0; } @Override - public boolean revokeAccess(DataObject vol, EndPoint ep) { + public long getAvailableCapacity() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public boolean initialize(Map params) { // TODO Auto-generated method stub return false; } @Override - public Set listObjects(DataStore store) { - // TODO Auto-generated method stub - return null; - } - - @Override - public void takeSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - - @Override - public void revertSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - - - - @Override - public boolean canCopy(DataObject srcData, DataObject destData) { + public boolean grantAccess(EndPoint ep) { // TODO Auto-generated method stub return false; } @Override - public void copyAsync(DataObject srcdata, DataObject destData, - AsyncCompletionCallback callback) { + public boolean revokeAccess(EndPoint ep) { // TODO Auto-generated method stub - + return false; } + } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/PrimaryDataStoreDriver.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/PrimaryDataStoreDriver.java new file mode 100644 index 00000000000..3a9b13d5422 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/PrimaryDataStoreDriver.java @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.driver; + +import java.util.Map; + +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.EndPoint; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; +import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; +import org.apache.cloudstack.storage.volume.VolumeObject; + +public interface PrimaryDataStoreDriver { + void createVolumeAsync(VolumeObject vol, AsyncCompletionCallback callback); + + void createVolumeFromBaseImageAsync(VolumeObject volume, String template, AsyncCompletionCallback callback); + + void deleteVolumeAsync(VolumeObject vo, AsyncCompletionCallback callback); + + String grantAccess(VolumeObject vol, EndPoint ep); + + boolean revokeAccess(VolumeObject vol, EndPoint ep); + + long getCapacity(); + + long getAvailableCapacity(); + + + //Lifecycle API + boolean initialize(Map params); + boolean grantAccess(EndPoint ep); + boolean revokeAccess(EndPoint ep); + void setDataStore(PrimaryDataStore dataStore); +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java index c3372b7d14d..658d4dbece7 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java @@ -21,10 +21,9 @@ package org.apache.cloudstack.storage.datastore.lifecycle; import java.util.Map; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.EndPoint; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; import org.apache.cloudstack.storage.datastore.DataStoreStatus; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; @@ -39,21 +38,29 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif } @Override - public boolean initialize(DataStore store, Map dsInfos) { + public void setDataStore(PrimaryDataStoreInfo dataStore) { + this.dataStore = (PrimaryDataStore)dataStore; + } + + @Override + public boolean initialize(Map dsInfos) { + PrimaryDataStoreVO dataStore = dataStoreDao.findById(this.dataStore.getId()); + dataStore.setStatus(DataStoreStatus.Initialized); + dataStoreDao.update(this.dataStore.getId(), dataStore); //TODO: add extension point for each data store return true; } protected void attachCluster() { //send down AttachPrimaryDataStoreCmd command to all the hosts in the cluster - AttachPrimaryDataStoreCmd cmd = new AttachPrimaryDataStoreCmd(this.dataStore.getUri()); - /*for (EndPoint ep : dataStore.getEndPoints()) { + AttachPrimaryDataStoreCmd cmd = new AttachPrimaryDataStoreCmd(this.dataStore.getDataStoreTO()); + for (EndPoint ep : dataStore.getEndPoints()) { ep.sendMessage(cmd); - } */ + } } @Override - public boolean attachCluster(DataStore dataStore, ClusterScope scope) { + public boolean attachCluster(ClusterScope scope) { PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(this.dataStore.getId()); dataStoreVO.setDataCenterId(scope.getZoneId()); dataStoreVO.setPodId(scope.getPodId()); @@ -100,12 +107,4 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif return false; } - - - @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope) { - // TODO Auto-generated method stub - return false; - } - } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultXenPrimaryDataStoreLifeCycle.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultXenPrimaryDataStoreLifeCycle.java index da61825abec..bb255b0cc53 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultXenPrimaryDataStoreLifeCycle.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultXenPrimaryDataStoreLifeCycle.java @@ -18,7 +18,7 @@ */ package org.apache.cloudstack.storage.datastore.lifecycle; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.storage.EndPoint; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -39,9 +39,8 @@ public class DefaultXenPrimaryDataStoreLifeCycle extends DefaultPrimaryDataStore @Override public void attachCluster() { - String result = null; //send one time is enough, as xenserver is clustered - /*CreatePrimaryDataStoreCmd cmd = new CreatePrimaryDataStoreCmd(this.dataStore.getDataStoreTO()); + CreatePrimaryDataStoreCmd cmd = new CreatePrimaryDataStoreCmd(this.dataStore.getDataStoreTO()); String result = null; for (EndPoint ep : dataStore.getEndPoints()) { Answer answer = ep.sendMessage(cmd); @@ -49,7 +48,7 @@ public class DefaultXenPrimaryDataStoreLifeCycle extends DefaultPrimaryDataStore return; } result = answer.getDetails(); - }*/ + } if (result != null) throw new CloudRuntimeException("AttachPrimaryDataStoreCmd failed: " + result); diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreManagerImpl.java similarity index 51% rename from engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreManagerImpl.java index fd66ffb7315..139da4d729d 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreManagerImpl.java @@ -18,50 +18,33 @@ */ package org.apache.cloudstack.storage.datastore.manager; -import java.util.HashMap; -import java.util.Map; - import javax.inject.Inject; -import org.apache.cloudstack.storage.datastore.DefaultPrimaryDataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; -import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProvider; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManager; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreProviderDao; +import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProviderManager; import org.springframework.stereotype.Component; @Component -public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataStoreProviderManager { +public class DefaultPrimaryDataStoreManagerImpl implements PrimaryDataStoreManager { @Inject - DataStoreProviderDao dataStoreProviderDao; + PrimaryDataStoreProviderDao dataStoreProviderDao; @Inject - DataStoreProviderManager providerManager; + PrimaryDataStoreProviderManager providerManager; @Inject PrimaryDataStoreDao dataStoreDao; - Map driverMaps = new HashMap(); @Override public PrimaryDataStore getPrimaryDataStore(long dataStoreId) { PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(dataStoreId); - long providerId = dataStoreVO.getStorageProviderId(); - DataStoreProvider provider = providerManager.getDataStoreProviderById(providerId); - DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, - driverMaps.get(provider.getUuid()), - provider); - + Long providerId = dataStoreVO.getStorageProviderId(); + PrimaryDataStoreProvider provider = providerManager.getDataStoreProvider(providerId); + PrimaryDataStore dataStore = (PrimaryDataStore)provider.getDataStore(dataStoreId); return dataStore; } - - @Override - public boolean registerDriver(String uuid, PrimaryDataStoreDriver driver) { - if (driverMaps.get(uuid) != null) { - return false; - } - driverMaps.put(uuid, driver); - return true; - } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/PrimaryDataStoreManager.java similarity index 77% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/PrimaryDataStoreManager.java index a60ec7a6e65..f6b36258f00 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/PrimaryDataStoreManager.java @@ -16,13 +16,12 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.datastore; +package org.apache.cloudstack.storage.datastore.manager; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; - -public interface PrimaryDataStoreProviderManager { +public interface PrimaryDataStoreManager { public PrimaryDataStore getPrimaryDataStore(long dataStoreId); - boolean registerDriver(String uuid, PrimaryDataStoreDriver driver); } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls deleted file mode 100644 index 2e09bd5690a..00000000000 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls +++ /dev/null @@ -1,57 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java index fe44ed84a2d..55966be5ac1 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java @@ -1,55 +1,166 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.datastore.provider; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.HashMap; +import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; -import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.storage.datastore.DefaultPrimaryDataStore; +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; +import org.apache.cloudstack.storage.datastore.configurator.PrimaryDataStoreConfigurator; +import org.apache.cloudstack.storage.datastore.configurator.validator.StorageProtocolTransformer; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreProviderDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreProviderVO; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl; +import org.apache.cloudstack.storage.datastore.driver.PrimaryDataStoreDriver; import org.apache.cloudstack.storage.datastore.lifecycle.DefaultPrimaryDataStoreLifeCycleImpl; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.stereotype.Component; -import com.cloud.utils.component.ComponentContext; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.utils.exception.CloudRuntimeException; @Component public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider { private final String providerName = "default primary data store provider"; protected PrimaryDataStoreDriver driver; + private PrimaryDataStoreProviderVO providerVO; @Inject - PrimaryDataStoreProviderManager storeMgr; - protected DataStoreLifeCycle lifecyle; - protected String uuid; - protected long id; + protected PrimaryDataStoreDao dataStoreDao; + @Inject + protected PrimaryDataStoreProviderDao providerDao; + @Inject + protected ClusterDao clusterDao; + protected Map configuratorMaps = new HashMap(); + + @Inject + public DefaultPrimaryDatastoreProviderImpl(@Qualifier("defaultProvider") List configurators) { + for (PrimaryDataStoreConfigurator configurator : configurators) { + String key = generateKey(configurator.getSupportedHypervisor(), configurator.getSupportedDataStoreType().toString()); + configuratorMaps.put(key, configurator); + } + } + + // TODO: Remove this. I put this in to get over the compilation problem. Edison needs to look at Solidfire's implementation which requires this. + public DefaultPrimaryDatastoreProviderImpl() { + + } + + protected String generateKey(HypervisorType hypervisor, String poolType) { + return hypervisor.toString().toLowerCase() + "_" + poolType.toString().toLowerCase(); + } + + @Override + public PrimaryDataStore getDataStore(long dataStoreId) { + PrimaryDataStoreVO dsv = dataStoreDao.findById(dataStoreId); + if (dsv == null) { + return null; + } + + String key = dsv.getKey(); + + PrimaryDataStoreConfigurator configurator = configuratorMaps.get(key); + + DefaultPrimaryDataStore dataStore = (DefaultPrimaryDataStore)configurator.getDataStore(dataStoreId); + dataStore.setProvider(this); + return dataStore; + } + + @Override + public PrimaryDataStore registerDataStore(Map dsInfos) { + String url = dsInfos.get("url"); + URI uri = null; + try { + uri = new URI(url); + } catch (URISyntaxException e) { + throw new CloudRuntimeException("invalid url: " + e.toString()); + } + String protocol = uri.getScheme(); + Long cluster = null; + try { + cluster = Long.parseLong(dsInfos.get("clusterId")); + } catch (NumberFormatException e) { + throw new CloudRuntimeException("Failed to get clusterId"); + } + ClusterVO clusterVO = clusterDao.findById(cluster); + if (clusterVO == null) { + throw new CloudRuntimeException("Can't find cluster: " + cluster); + } + HypervisorType hypervisor = clusterVO.getHypervisorType(); + String key = generateKey(hypervisor, protocol); + PrimaryDataStoreConfigurator configurator = configuratorMaps.get(key); + if (configurator == null) { + throw new CloudRuntimeException("can't find configurator from key: " + key); + } + + StorageProtocolTransformer validator = configurator.getProtocolTransformer(); + validator.normalizeUserInput(dsInfos); + + PrimaryDataStoreVO dataStoreVO = dataStoreDao.findPoolByUUID(dsInfos.get("uuid")); + if (dataStoreVO != null) { + throw new CloudRuntimeException("duplicate uuid: " + dsInfos.get("uuid")); + } + + dataStoreVO = new PrimaryDataStoreVO(); + dataStoreVO.setStorageProviderId(this.getId()); + dataStoreVO.setHostAddress(dsInfos.get("server")); + dataStoreVO.setPath(dsInfos.get("path")); + dataStoreVO.setPoolType(protocol); + dataStoreVO.setPort(Integer.parseInt(dsInfos.get("port"))); + dataStoreVO.setKey(key); + dataStoreVO.setName(dsInfos.get("name")); + dataStoreVO.setUuid(dsInfos.get("uuid")); + dataStoreVO = dataStoreDao.persist(dataStoreVO); + + DefaultPrimaryDataStore dataStore = (DefaultPrimaryDataStore)configurator.getDataStore(dataStoreVO.getId()); + dataStore.setProvider(this); + + PrimaryDataStoreLifeCycle lifeCycle = dataStore.getLifeCycle(); + lifeCycle.initialize(dsInfos); + return getDataStore(dataStore.getId()); + } + + @Override + public long getId() { + return this.providerVO.getId(); + } + + @Override + public boolean configure() { + this.providerVO = providerDao.findByName(this.providerName); + return true; + } + @Override public String getName() { return providerName; } - @Override - public DataStoreLifeCycle getLifeCycle() { - return this.lifecyle; - } - - @Override - public boolean configure(Map params) { - lifecyle = ComponentContext.inject(DefaultPrimaryDataStoreLifeCycleImpl.class); - driver = ComponentContext.inject(DefaultPrimaryDataStoreDriverImpl.class); - uuid = (String)params.get("uuid"); - id = (Long)params.get("id"); - storeMgr.registerDriver(uuid, this.driver); - return true; - } - - @Override - public String getUuid() { - return this.uuid; - } - - @Override - public long getId() { - return this.id; - } - } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProviderManager.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProviderManager.java new file mode 100644 index 00000000000..c77f7a32c1c --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProviderManager.java @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import java.util.List; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; + +import com.cloud.utils.component.Manager; + +public interface PrimaryDataStoreProviderManager extends Manager { + public PrimaryDataStoreProvider getDataStoreProvider(Long providerId); + public PrimaryDataStoreProvider getDataStoreProvider(String name); + public List getDataStoreProviders(); +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProviderManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProviderManagerImpl.java new file mode 100644 index 00000000000..33d98db8bfe --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProviderManagerImpl.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.provider; + +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreProviderDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreProviderVO; +import org.springframework.stereotype.Component; + +import com.cloud.utils.db.DB; + +@Component +public class PrimaryDataStoreProviderManagerImpl implements PrimaryDataStoreProviderManager { + @Inject + List providers; + @Inject + PrimaryDataStoreProviderDao providerDao; + + @Override + public PrimaryDataStoreProvider getDataStoreProvider(Long providerId) { + for (PrimaryDataStoreProvider provider : providers) { + if (provider.getId() == providerId) { + return provider; + } + } + return null; + } + + @Override + @DB + public boolean configure(String name, Map params) throws ConfigurationException { + List providerVos = providerDao.listAll(); + for (PrimaryDataStoreProvider provider : providers) { + boolean existingProvider = false; + for (PrimaryDataStoreProviderVO providerVo : providerVos) { + if (providerVo.getName().equalsIgnoreCase(provider.getName())) { + existingProvider = true; + break; + } + } + if (!existingProvider) { + PrimaryDataStoreProviderVO dataStoreProvider = new PrimaryDataStoreProviderVO(); + dataStoreProvider.setName(provider.getName()); + dataStoreProvider = providerDao.persist(dataStoreProvider); + } + + provider.configure(); + } + return true; + } + + @Override + public boolean start() { + // TODO Auto-generated method stub + return true; + } + + @Override + public boolean stop() { + // TODO Auto-generated method stub + return true; + } + + @Override + public String getName() { + // TODO Auto-generated method stub + return null; + } + + @Override + public PrimaryDataStoreProvider getDataStoreProvider(String name) { + for (PrimaryDataStoreProvider provider : providers) { + if (provider.getName().equalsIgnoreCase(name)) { + return provider; + } + } + return null; + } + + @Override + public List getDataStoreProviders() { + return providers; + } +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java deleted file mode 100644 index ec6b0326aeb..00000000000 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java +++ /dev/null @@ -1,293 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.volume; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; -import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.framework.async.AsyncRpcConext; -import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.ImageDataFactory; -import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.motion.DataMotionService; -import org.apache.cloudstack.storage.volume.VolumeServiceImpl.CreateBaseImageResult; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.fsm.NoTransitionException; - -@Component -public class TemplateInstallStrategyImpl implements TemplateInstallStrategy { - private static final Logger s_logger = Logger - .getLogger(TemplateInstallStrategyImpl.class); - @Inject - ObjectInDataStoreManager objectInDataStoreMgr; - @Inject - DataMotionService motionSrv; - @Inject - ImageDataFactory imageFactory; - protected long waitingTime = 1800; // half an hour - protected long waitingRetries = 10; - - protected TemplateInfo waitingForTemplateDownload(TemplateInfo template, - PrimaryDataStore dataStore) { - long retries = this.waitingRetries; - ObjectInDataStoreVO obj = null; - do { - try { - Thread.sleep(waitingTime); - } catch (InterruptedException e) { - s_logger.debug("sleep interrupted", e); - throw new CloudRuntimeException("sleep interrupted", e); - } - - obj = objectInDataStoreMgr.findObject(template.getId(), - template.getType(), dataStore.getId(), dataStore.getRole()); - if (obj == null) { - s_logger.debug("can't find object in db, maybe it's cleaned up already, exit waiting"); - break; - } - if (obj.getState() == ObjectInDataStoreStateMachine.State.Ready) { - break; - } - retries--; - } while (retries > 0); - - if (obj == null || retries <= 0) { - s_logger.debug("waiting too long for template downloading, marked it as failed"); - throw new CloudRuntimeException( - "waiting too long for template downloading, marked it as failed"); - } - return imageFactory.getTemplate(template.getId(), dataStore); - } - - class InstallContext extends AsyncRpcConext { - final TemplateInfo destTemplate; - final TemplateInfo srcTemplate; - - public InstallContext(AsyncCompletionCallback callback, - TemplateInfo destTemplate, TemplateInfo srcTemplate) { - super(callback); - this.destTemplate = destTemplate; - this.srcTemplate = srcTemplate; - } - - } - - @Override - public Void installAsync(TemplateInfo template, PrimaryDataStore store, - AsyncCompletionCallback callback) { - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - template.getId(), template.getType(), store.getId(), - store.getRole()); - TemplateInfo templateOnPrimaryStoreObj = null; - boolean freshNewTemplate = false; - if (obj == null) { - try { - templateOnPrimaryStoreObj = objectInDataStoreMgr.create( - template, store); - freshNewTemplate = true; - } catch (Throwable e) { - obj = objectInDataStoreMgr.findObject(template.getId(), - template.getType(), store.getId(), store.getRole()); - if (obj == null) { - CreateBaseImageResult result = new CreateBaseImageResult( - null); - result.setSucess(false); - result.setResult(e.toString()); - callback.complete(result); - return null; - } - } - } - - if (!freshNewTemplate - && obj.getState() != ObjectInDataStoreStateMachine.State.Ready) { - try { - templateOnPrimaryStoreObj = waitingForTemplateDownload( - template, store); - } catch (Exception e) { - CreateBaseImageResult result = new CreateBaseImageResult(null); - result.setSucess(false); - result.setResult(e.toString()); - callback.complete(result); - return null; - } - - CreateBaseImageResult result = new CreateBaseImageResult( - templateOnPrimaryStoreObj); - callback.complete(result); - return null; - } - - try { - objectInDataStoreMgr.update(templateOnPrimaryStoreObj, - ObjectInDataStoreStateMachine.Event.CreateRequested); - } catch (NoTransitionException e) { - try { - objectInDataStoreMgr.update(templateOnPrimaryStoreObj, - ObjectInDataStoreStateMachine.Event.OperationFailed); - } catch (NoTransitionException e1) { - s_logger.debug("state transation failed", e1); - } - CreateBaseImageResult result = new CreateBaseImageResult(null); - result.setSucess(false); - result.setResult(e.toString()); - callback.complete(result); - return null; - } - - InstallContext context = new InstallContext( - callback, templateOnPrimaryStoreObj, template); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher - .create(this); - caller.setCallback( - caller.getTarget().installTemplateCallback(null, null)) - .setContext(context); - - store.getDriver().createAsync(templateOnPrimaryStoreObj, caller); - return null; - } - - class CopyTemplateContext extends AsyncRpcConext { - TemplateInfo template; - - public CopyTemplateContext(AsyncCompletionCallback callback, - TemplateInfo template) { - super(callback); - this.template = template; - } - } - - protected Void installTemplateCallback( - AsyncCallbackDispatcher callback, - InstallContext context) { - CreateCmdResult result = callback.getResult(); - TemplateInfo templateOnPrimaryStoreObj = context.destTemplate; - CreateBaseImageResult upResult = new CreateBaseImageResult( - templateOnPrimaryStoreObj); - if (result.isFailed()) { - upResult.setResult(result.getResult()); - context.getParentCallback().complete(upResult); - return null; - } - - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - templateOnPrimaryStoreObj.getId(), templateOnPrimaryStoreObj - .getType(), templateOnPrimaryStoreObj.getDataStore() - .getId(), templateOnPrimaryStoreObj.getDataStore() - .getRole()); - - obj.setInstallPath(result.getPath()); - try { - objectInDataStoreMgr.update(templateOnPrimaryStoreObj, - ObjectInDataStoreStateMachine.Event.OperationSuccessed); - } catch (NoTransitionException e) { - try { - objectInDataStoreMgr.update(templateOnPrimaryStoreObj, - ObjectInDataStoreStateMachine.Event.OperationFailed); - } catch (NoTransitionException e1) { - s_logger.debug("failed to change state", e1); - } - - upResult.setResult(e.toString()); - context.getParentCallback().complete(upResult); - return null; - } - - moveTemplate(context.srcTemplate, templateOnPrimaryStoreObj, obj, - context.getParentCallback()); - return null; - } - - protected void moveTemplate(TemplateInfo srcTemplate, - TemplateInfo destTemplate, ObjectInDataStoreVO obj, - AsyncCompletionCallback callback) { - // move template into primary storage - try { - objectInDataStoreMgr.update(destTemplate, - ObjectInDataStoreStateMachine.Event.CopyingRequested); - } catch (NoTransitionException e) { - s_logger.debug("failed to change state", e); - try { - objectInDataStoreMgr.update(destTemplate, - ObjectInDataStoreStateMachine.Event.OperationFailed); - } catch (NoTransitionException e1) { - - } - CreateBaseImageResult res = new CreateBaseImageResult(destTemplate); - res.setResult("Failed to change state: " + e.toString()); - callback.complete(res); - } - - CopyTemplateContext anotherCall = new CopyTemplateContext( - callback, destTemplate); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher - .create(this); - caller.setCallback(caller.getTarget().copyTemplateCallback(null, null)) - .setContext(anotherCall); - - motionSrv.copyAsync(srcTemplate, destTemplate, caller); - } - - protected Void copyTemplateCallback( - AsyncCallbackDispatcher callback, - CopyTemplateContext context) { - CopyCommandResult result = callback.getResult(); - TemplateInfo templateOnPrimaryStoreObj = context.template; - if (result.isFailed()) { - CreateBaseImageResult res = new CreateBaseImageResult( - templateOnPrimaryStoreObj); - res.setResult(result.getResult()); - context.getParentCallback().complete(res); - } - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - templateOnPrimaryStoreObj.getId(), templateOnPrimaryStoreObj - .getType(), templateOnPrimaryStoreObj.getDataStore() - .getId(), templateOnPrimaryStoreObj.getDataStore() - .getRole()); - - obj.setInstallPath(result.getPath()); - CreateBaseImageResult res = new CreateBaseImageResult( - templateOnPrimaryStoreObj); - try { - objectInDataStoreMgr.update(templateOnPrimaryStoreObj, - ObjectInDataStoreStateMachine.Event.OperationSuccessed); - } catch (NoTransitionException e) { - s_logger.debug("Failed to update copying state: ", e); - try { - objectInDataStoreMgr.update(templateOnPrimaryStoreObj, - ObjectInDataStoreStateMachine.Event.OperationFailed); - } catch (NoTransitionException e1) { - } - - res.setResult("Failed to update copying state: " + e.toString()); - context.getParentCallback().complete(res); - } - context.getParentCallback().complete(res); - return null; - } - -} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateOnPrimaryDataStoreObject.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateOnPrimaryDataStoreObject.java new file mode 100644 index 00000000000..9221fb481ce --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateOnPrimaryDataStoreObject.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.volume; + +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; +import org.apache.cloudstack.storage.image.TemplateInfo; +import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.State; +import org.apache.cloudstack.storage.volume.db.TemplatePrimaryDataStoreDao; +import org.apache.cloudstack.storage.volume.db.TemplatePrimaryDataStoreVO; + +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateMachine2; + +public class TemplateOnPrimaryDataStoreObject implements TemplateOnPrimaryDataStoreInfo { + protected PrimaryDataStore dataStore; + protected TemplateInfo template; + protected TemplatePrimaryDataStoreVO vo; + protected TemplatePrimaryDataStoreDao templateStoreDao; + protected TemplatePrimaryDataStoreManager mgr; + protected StateMachine2 stateMachine; + + public TemplateOnPrimaryDataStoreObject(PrimaryDataStore primaryDataStore, TemplateInfo template, TemplatePrimaryDataStoreVO vo, + TemplatePrimaryDataStoreDao templateStoreDao, TemplatePrimaryDataStoreManager mgr) { + this.dataStore = primaryDataStore; + this.template = template; + this.vo = vo; + this.templateStoreDao = templateStoreDao; + this.mgr = mgr; + this.stateMachine = mgr.getStateMachine(); + } + + @Override + public String getPath() { + return vo.getInstallPath(); + } + + @Override + public void setPath(String path) { + this.vo.setInstallPath(path); + } + + @Override + public PrimaryDataStore getPrimaryDataStore() { + return this.dataStore; + } + + @Override + public TemplateInfo getTemplate() { + return this.template; + } + + public void updateStatus(Status status) { + vo.setDownloadState(status); + templateStoreDao.update(vo.getId(), vo); + vo = templateStoreDao.findById(vo.getId()); + } + + public void stateTransit(ObjectInDataStoreStateMachine.Event event) { + try { + this.stateMachine.transitTo(vo, event, null, templateStoreDao); + vo = templateStoreDao.findById(vo.getId()); + } catch (NoTransitionException e) { + throw new CloudRuntimeException("Failed change state", e); + } catch (Exception e) { + throw new CloudRuntimeException("Failed change state", e); + } + } +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStore.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplatePrimaryDataStoreManager.java similarity index 52% rename from engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStore.java rename to engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplatePrimaryDataStoreManager.java index a443f39ef33..59784249158 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStore.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplatePrimaryDataStoreManager.java @@ -16,20 +16,20 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.image.datastore; +package org.apache.cloudstack.storage.volume; -import java.util.Set; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; +import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.State; +import org.apache.cloudstack.storage.volume.db.TemplatePrimaryDataStoreVO; -public interface ImageDataStore extends DataStore { - TemplateInfo getTemplate(long templateId); - VolumeInfo getVolume(long volumeId); - SnapshotInfo getSnapshot(long snapshotId); - boolean exists(DataObject object); - Set listTemplates(); +import com.cloud.utils.fsm.StateMachine2; + +public interface TemplatePrimaryDataStoreManager { + public TemplateOnPrimaryDataStoreInfo createTemplateOnPrimaryDataStore(TemplateInfo template, PrimaryDataStore dataStore); + + public TemplateOnPrimaryDataStoreInfo findTemplateOnPrimaryDataStore(TemplateInfo template, PrimaryDataStore dataStore); + + public StateMachine2 getStateMachine(); } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplatePrimaryDataStoreManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplatePrimaryDataStoreManagerImpl.java new file mode 100644 index 00000000000..fc4e4fd2659 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplatePrimaryDataStoreManagerImpl.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.volume; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; +import org.apache.cloudstack.storage.image.TemplateInfo; +import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.State; +import org.apache.cloudstack.storage.volume.db.TemplatePrimaryDataStoreDao; +import org.apache.cloudstack.storage.volume.db.TemplatePrimaryDataStoreVO; +import org.springframework.stereotype.Component; + +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.utils.db.SearchCriteria2; +import com.cloud.utils.db.SearchCriteriaService; +import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.StateMachine2; + +@Component +public class TemplatePrimaryDataStoreManagerImpl implements TemplatePrimaryDataStoreManager { + @Inject + TemplatePrimaryDataStoreDao templateStoreDao; + protected long waitingTime = 1800; //half an hour + protected long waitingReties = 10; + protected StateMachine2 stateMachines; + public TemplatePrimaryDataStoreManagerImpl() { + stateMachines = new StateMachine2(); + stateMachines.addTransition(ObjectInDataStoreStateMachine.State.Allocated, Event.CreateRequested, ObjectInDataStoreStateMachine.State.Creating); + stateMachines.addTransition(ObjectInDataStoreStateMachine.State.Creating, Event.OperationSuccessed, ObjectInDataStoreStateMachine.State.Ready); + stateMachines.addTransition(ObjectInDataStoreStateMachine.State.Creating, Event.OperationFailed, ObjectInDataStoreStateMachine.State.Failed); + stateMachines.addTransition(ObjectInDataStoreStateMachine.State.Failed, Event.CreateRequested, ObjectInDataStoreStateMachine.State.Creating); + stateMachines.addTransition(ObjectInDataStoreStateMachine.State.Ready, Event.DestroyRequested, ObjectInDataStoreStateMachine.State.Destroying); + stateMachines.addTransition(ObjectInDataStoreStateMachine.State.Destroying, Event.OperationSuccessed, ObjectInDataStoreStateMachine.State.Destroyed); + stateMachines.addTransition(ObjectInDataStoreStateMachine.State.Destroying, Event.OperationFailed, ObjectInDataStoreStateMachine.State.Destroying); + stateMachines.addTransition(ObjectInDataStoreStateMachine.State.Destroying, Event.DestroyRequested, ObjectInDataStoreStateMachine.State.Destroying); + } + + private TemplatePrimaryDataStoreVO waitingForTemplateDownload(TemplateInfo template, PrimaryDataStoreInfo dataStore) { + //the naive version, polling. + long retries = waitingReties; + TemplatePrimaryDataStoreVO templateStoreVO = null; + do { + try { + Thread.sleep(waitingTime); + } catch (InterruptedException e) { + + } + + templateStoreVO = templateStoreDao.findByTemplateIdAndPoolIdAndReady(template.getId(), dataStore.getId()); + if (templateStoreVO != null) { + break; + } + retries--; + } while (retries > 0); + + if (templateStoreVO == null) { + throw new CloudRuntimeException("waiting too long for template downloading, marked it as failed"); + } + + return templateStoreVO; + } + @Override + public TemplateOnPrimaryDataStoreObject createTemplateOnPrimaryDataStore(TemplateInfo template, PrimaryDataStore dataStore) { + TemplatePrimaryDataStoreVO templateStoreVO = null; + boolean freshNewTemplate = false; + templateStoreVO = templateStoreDao.findByTemplateIdAndPoolId(template.getId(), dataStore.getId()); + if (templateStoreVO == null) { + try { + templateStoreVO = new TemplatePrimaryDataStoreVO(dataStore.getId(), template.getId()); + templateStoreVO = templateStoreDao.persist(templateStoreVO); + freshNewTemplate = true; + } catch (Throwable th) { + templateStoreVO = templateStoreDao.findByTemplateIdAndPoolId(template.getId(), dataStore.getId()); + if (templateStoreVO == null) { + throw new CloudRuntimeException("Failed create db entry: " + th.toString()); + } + } + } + + //If it's not a fresh template downloading, waiting for other people downloading finished. + if (!freshNewTemplate && templateStoreVO.getState() != ObjectInDataStoreStateMachine.State.Ready) { + templateStoreVO = waitingForTemplateDownload(template, dataStore); + } + + TemplateOnPrimaryDataStoreObject templateStoreObject = new TemplateOnPrimaryDataStoreObject(dataStore, template, templateStoreVO, templateStoreDao, this); + return templateStoreObject; + } + + @Override + public TemplateOnPrimaryDataStoreObject findTemplateOnPrimaryDataStore(TemplateInfo template, PrimaryDataStore dataStore) { + SearchCriteriaService sc = SearchCriteria2.create(TemplatePrimaryDataStoreVO.class); + sc.addAnd(sc.getEntity().getTemplateId(), Op.EQ, template.getId()); + sc.addAnd(sc.getEntity().getPoolId(), Op.EQ, dataStore.getId()); + sc.addAnd(sc.getEntity().getDownloadState(), Op.EQ, VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + TemplatePrimaryDataStoreVO templateStoreVO = sc.find(); + if (templateStoreVO == null) { + return null; + } + + TemplateOnPrimaryDataStoreObject templateStoreObject = new TemplateOnPrimaryDataStoreObject(dataStore, template, templateStoreVO, templateStoreDao, this); + return templateStoreObject; + } + + @Override + public StateMachine2 getStateMachine() { + return stateMachines; + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java deleted file mode 100644 index 38c35f811cd..00000000000 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.volume; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.storage.datastore.DataStoreManager; -import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.datastore.VolumeDataFactory; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.volume.db.VolumeDao2; -import org.apache.cloudstack.storage.volume.db.VolumeVO; -import org.springframework.stereotype.Component; - -@Component -public class VolumeDataFactoryImpl implements VolumeDataFactory { - @Inject - VolumeDao2 volumeDao; - @Inject - ObjectInDataStoreManager objMap; - @Inject - DataStoreManager storeMgr; - @Override - public VolumeInfo getVolume(long volumeId, DataStore store) { - VolumeVO volumeVO = volumeDao.findById(volumeId); - ObjectInDataStoreVO obj = objMap.findObject(volumeId, DataObjectType.VOLUME, store.getId(), store.getRole()); - if (obj == null) { - return null; - } - VolumeObject vol = VolumeObject.getVolumeObject(store, volumeVO); - return vol; - } - -} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java index 98c6b69ee0f..77a59497e69 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java @@ -18,6 +18,8 @@ */ package org.apache.cloudstack.storage.volume; +import javax.inject.Inject; + import org.apache.cloudstack.engine.subsystem.api.storage.VolumeProfile; import org.apache.cloudstack.storage.volume.db.VolumeDao2; import org.apache.cloudstack.storage.volume.db.VolumeVO; @@ -26,7 +28,6 @@ import org.springframework.stereotype.Component; import com.cloud.storage.Volume; import com.cloud.storage.Volume.Event; import com.cloud.storage.Volume.State; -import com.cloud.utils.component.Inject; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; @@ -39,6 +40,7 @@ public class VolumeManagerImpl implements VolumeManager { initStateMachine(); } + @Override public VolumeVO allocateDuplicateVolume(VolumeVO oldVol) { /* VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(), oldVol.getName(), oldVol.getDataCenterId(), oldVol.getDomainId(), oldVol.getAccountId(), oldVol.getDiskOfferingId(), oldVol.getSize()); @@ -47,58 +49,62 @@ public class VolumeManagerImpl implements VolumeManager { newVol.setInstanceId(oldVol.getInstanceId()); newVol.setRecreatable(oldVol.isRecreatable()); newVol.setReservationId(oldVol.getReservationId()); - */ + */ return null; // return _volumeDao.persist(newVol); } - + private void initStateMachine() { - s_fsm.addTransition(Volume.State.Allocated, Event.CreateRequested, Volume.State.Creating); - s_fsm.addTransition(Volume.State.Allocated, Event.DestroyRequested, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Creating, Event.OperationRetry, Volume.State.Creating); - s_fsm.addTransition(Volume.State.Creating, Event.OperationFailed, Volume.State.Allocated); - s_fsm.addTransition(Volume.State.Creating, Event.OperationSucceeded, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Creating, Event.DestroyRequested, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Creating, Event.CreateRequested, Volume.State.Creating); - s_fsm.addTransition(Volume.State.Allocated, Event.UploadRequested, Volume.State.UploadOp); - s_fsm.addTransition(Volume.State.UploadOp, Event.CopyRequested, Volume.State.Creating);// CopyRequested for volume from sec to primary storage - s_fsm.addTransition(Volume.State.Creating, Event.CopySucceeded, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Creating, Event.CopyFailed, Volume.State.UploadOp);// Copying volume from sec to primary failed. - s_fsm.addTransition(Volume.State.UploadOp, Event.DestroyRequested, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Ready, Event.DestroyRequested, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Destroy, Event.ExpungingRequested, Volume.State.Expunging); - s_fsm.addTransition(Volume.State.Ready, Event.SnapshotRequested, Volume.State.Snapshotting); - s_fsm.addTransition(Volume.State.Snapshotting, Event.OperationSucceeded, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Snapshotting, Event.OperationFailed, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Ready, Event.MigrationRequested, Volume.State.Migrating); - s_fsm.addTransition(Volume.State.Migrating, Event.OperationSucceeded, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Migrating, Event.OperationFailed, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Destroy, Event.OperationSucceeded, Volume.State.Destroy); - s_fsm.addTransition(Volume.State.Destroying, Event.OperationSucceeded, Volume.State.Destroy); - s_fsm.addTransition(Volume.State.Destroying, Event.OperationFailed, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Destroying, Event.DestroyRequested, Volume.State.Destroying); + s_fsm.addTransition(Volume.State.Allocated, Event.CreateRequested, Volume.State.Creating); + s_fsm.addTransition(Volume.State.Allocated, Event.DestroyRequested, Volume.State.Destroying); + s_fsm.addTransition(Volume.State.Creating, Event.OperationRetry, Volume.State.Creating); + s_fsm.addTransition(Volume.State.Creating, Event.OperationFailed, Volume.State.Allocated); + s_fsm.addTransition(Volume.State.Creating, Event.OperationSucceeded, Volume.State.Ready); + s_fsm.addTransition(Volume.State.Creating, Event.DestroyRequested, Volume.State.Destroying); + s_fsm.addTransition(Volume.State.Creating, Event.CreateRequested, Volume.State.Creating); + s_fsm.addTransition(Volume.State.Allocated, Event.UploadRequested, Volume.State.UploadOp); + s_fsm.addTransition(Volume.State.UploadOp, Event.CopyRequested, Volume.State.Creating);// CopyRequested for volume from sec to primary storage + s_fsm.addTransition(Volume.State.Creating, Event.CopySucceeded, Volume.State.Ready); + s_fsm.addTransition(Volume.State.Creating, Event.CopyFailed, Volume.State.UploadOp);// Copying volume from sec to primary failed. + s_fsm.addTransition(Volume.State.UploadOp, Event.DestroyRequested, Volume.State.Destroying); + s_fsm.addTransition(Volume.State.Ready, Event.DestroyRequested, Volume.State.Destroying); + s_fsm.addTransition(Volume.State.Destroy, Event.ExpungingRequested, Volume.State.Expunging); + s_fsm.addTransition(Volume.State.Ready, Event.SnapshotRequested, Volume.State.Snapshotting); + s_fsm.addTransition(Volume.State.Snapshotting, Event.OperationSucceeded, Volume.State.Ready); + s_fsm.addTransition(Volume.State.Snapshotting, Event.OperationFailed, Volume.State.Ready); + s_fsm.addTransition(Volume.State.Ready, Event.MigrationRequested, Volume.State.Migrating); + s_fsm.addTransition(Volume.State.Migrating, Event.OperationSucceeded, Volume.State.Ready); + s_fsm.addTransition(Volume.State.Migrating, Event.OperationFailed, Volume.State.Ready); + s_fsm.addTransition(Volume.State.Destroy, Event.OperationSucceeded, Volume.State.Destroy); + s_fsm.addTransition(Volume.State.Destroying, Event.OperationSucceeded, Volume.State.Destroy); + s_fsm.addTransition(Volume.State.Destroying, Event.OperationFailed, Volume.State.Destroying); + s_fsm.addTransition(Volume.State.Destroying, Event.DestroyRequested, Volume.State.Destroying); } - + @Override public StateMachine2 getStateMachine() { return s_fsm; } + @Override public VolumeVO processEvent(Volume vol, Volume.Event event) throws NoTransitionException { // _volStateMachine.transitTo(vol, event, null, _volumeDao); return _volumeDao.findById(vol.getId()); } + @Override public VolumeProfile getProfile(long volumeId) { // TODO Auto-generated method stub return null; } + @Override public VolumeVO getVolume(long volumeId) { // TODO Auto-generated method stub return null; } + @Override public VolumeVO updateVolume(VolumeVO volume) { // TODO Auto-generated method stub return null; diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java index 1faacbb9aeb..13ae35c6ec4 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -1,20 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.volume; -import java.io.File; +import java.util.Date; import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskTypeHelper; +import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeTypeHelper; +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.volume.db.VolumeDao2; import org.apache.cloudstack.storage.volume.db.VolumeVO; import org.apache.log4j.Logger; import com.cloud.storage.Volume; -import com.cloud.utils.component.ComponentInject; +import com.cloud.storage.Volume.State; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; @@ -23,71 +42,84 @@ public class VolumeObject implements VolumeInfo { private static final Logger s_logger = Logger.getLogger(VolumeObject.class); protected VolumeVO volumeVO; private StateMachine2 _volStateMachine; - protected DataStore dataStore; - + protected PrimaryDataStore dataStore; + @Inject + VolumeDiskTypeHelper diskTypeHelper; @Inject VolumeTypeHelper volumeTypeHelper; @Inject VolumeDao2 volumeDao; @Inject VolumeManager volumeMgr; - private VolumeObject(DataStore dataStore, VolumeVO volumeVO) { + private VolumeObject(PrimaryDataStore dataStore, VolumeVO volumeVO) { this.volumeVO = volumeVO; this.dataStore = dataStore; } - - public static VolumeObject getVolumeObject(DataStore dataStore, VolumeVO volumeVO) { + + public static VolumeObject getVolumeObject(PrimaryDataStore dataStore, VolumeVO volumeVO) { VolumeObject vo = new VolumeObject(dataStore, volumeVO); - vo = ComponentInject.inject(vo); + vo = ComponentContext.inject(vo); return vo; } + @Override public String getUuid() { return volumeVO.getUuid(); } public void setPath(String uuid) { - volumeVO.setPath(uuid); + volumeVO.setUuid(uuid); } + @Override public String getPath() { return volumeVO.getPath(); } + @Override public String getTemplateUuid() { return null; } + @Override public String getTemplatePath() { return null; } + public PrimaryDataStoreInfo getDataStoreInfo() { + return dataStore; + } + public Volume.State getState() { return volumeVO.getState(); } - public DataStore getDataStore() { + @Override + public PrimaryDataStore getDataStore() { return dataStore; } + @Override public long getSize() { return volumeVO.getSize(); } - public DiskFormat getDiskType() { - return null; + @Override + public VolumeDiskType getDiskType() { + return diskTypeHelper.getDiskType(volumeVO.getDiskType()); } - public DataObjectType getType() { - return DataObjectType.VOLUME; + @Override + public VolumeType getType() { + return volumeTypeHelper.getType(volumeVO.getVolumeType()); } public long getVolumeId() { return volumeVO.getId(); } - public void setVolumeDiskType(DiskFormat type) { - //volumeVO.setDiskType(type.toString()); + public void setVolumeDiskType(VolumeDiskType type) { + volumeVO.setDiskType(type.toString()); } public boolean stateTransit(Volume.Event event) { @@ -110,22 +142,47 @@ public class VolumeObject implements VolumeInfo { @Override public long getId() { - return this.volumeVO.getId(); + // TODO Auto-generated method stub + return 0; + } + + @Override + public State getCurrentState() { + // TODO Auto-generated method stub + return null; + } + + @Override + public State getDesiredState() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Date getCreatedDate() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Date getUpdatedDate() { + // TODO Auto-generated method stub + return null; + } + + @Override + public String getOwner() { + // TODO Auto-generated method stub + return null; + } + + @Override + public String getName() { + return this.volumeVO.getName(); } @Override public boolean isAttachedVM() { return (this.volumeVO.getInstanceId() == null) ? false : true; } - - @Override - public String getUri() { - return this.dataStore.getUri() + File.separator + "?type=volume&path=" + this.volumeVO.getPath(); - } - - @Override - public DiskFormat getFormat() { - // TODO Auto-generated method stub - return null; - } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index f3da3680a7e..afe985b369c 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -21,27 +21,30 @@ package org.apache.cloudstack.storage.volume; import javax.inject.Inject; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; -import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.disktype.VolumeDiskType; import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcConext; +import org.apache.cloudstack.storage.EndPoint; +import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; +import org.apache.cloudstack.storage.datastore.manager.PrimaryDataStoreManager; +import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.image.motion.ImageMotionService; +import org.apache.cloudstack.storage.volume.VolumeService.VolumeApiResult; import org.apache.cloudstack.storage.volume.db.VolumeDao2; import org.apache.cloudstack.storage.volume.db.VolumeVO; -import org.springframework.stereotype.Component; +import org.springframework.stereotype.Component; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.Volume; import com.cloud.utils.db.DB; +import com.cloud.utils.exception.CloudRuntimeException; //1. change volume state //2. orchestrator of volume, control most of the information of volume, storage pool id, voluem state, scope etc. @@ -51,13 +54,11 @@ public class VolumeServiceImpl implements VolumeService { @Inject VolumeDao2 volDao; @Inject - PrimaryDataStoreProviderManager dataStoreMgr; + PrimaryDataStoreManager dataStoreMgr; @Inject ObjectInDataStoreManager objectInDataStoreMgr; @Inject ImageMotionService imageMotion; - @Inject - TemplateInstallStrategy templateInstallStrategy; public VolumeServiceImpl() { } @@ -86,7 +87,7 @@ public class VolumeServiceImpl implements VolumeService { } @Override - public AsyncCallFuture createVolumeAsync(VolumeInfo volume, long dataStoreId) { + public AsyncCallFuture createVolumeAsync(VolumeInfo volume, long dataStoreId, VolumeDiskType diskType) { PrimaryDataStore dataStore = dataStoreMgr.getPrimaryDataStore(dataStoreId); AsyncCallFuture future = new AsyncCallFuture(); VolumeApiResult result = new VolumeApiResult(volume); @@ -111,7 +112,7 @@ public class VolumeServiceImpl implements VolumeService { caller.setCallback(caller.getTarget().createVolumeCallback(null, null)) .setContext(context); - //dataStore.createVolumeAsync(vo, null, caller); + dataStore.createVolumeAsync(vo, diskType, caller); return future; } @@ -158,7 +159,7 @@ public class VolumeServiceImpl implements VolumeService { AsyncCallFuture future = new AsyncCallFuture(); VolumeApiResult result = new VolumeApiResult(volume); - DataStore dataStore = vo.getDataStore(); + PrimaryDataStore dataStore = vo.getDataStore(); vo.stateTransit(Volume.Event.DestroyRequested); if (dataStore == null) { vo.stateTransit(Volume.Event.OperationSucceeded); @@ -172,7 +173,7 @@ public class VolumeServiceImpl implements VolumeService { caller.setCallback(caller.getTarget().deleteVolumeCallback(null, null)) .setContext(context); - dataStore.getDriver().deleteAsync(volume, caller); + dataStore.deleteVolumeAsync(volume, caller); return future; } @@ -236,19 +237,18 @@ public class VolumeServiceImpl implements VolumeService { return null; } - class CreateBaseImageContext extends AsyncRpcConext { + private class CreateBaseImageContext extends AsyncRpcConext { private final VolumeInfo volume; private final PrimaryDataStore dataStore; - private final TemplateInfo srcTemplate; + private final TemplateInfo template; private final AsyncCallFuture future; - public CreateBaseImageContext(AsyncCompletionCallback callback, VolumeInfo volume, PrimaryDataStore datastore, - TemplateInfo srcTemplate, + public CreateBaseImageContext(AsyncCompletionCallback callback, VolumeInfo volume, PrimaryDataStore datastore, TemplateInfo template, AsyncCallFuture future) { super(callback); this.volume = volume; this.dataStore = datastore; + this.template = template; this.future = future; - this.srcTemplate = srcTemplate; } public VolumeInfo getVolume() { @@ -258,9 +258,9 @@ public class VolumeServiceImpl implements VolumeService { public PrimaryDataStore getDataStore() { return this.dataStore; } - - public TemplateInfo getSrcTemplate() { - return this.srcTemplate; + + public TemplateInfo getTemplate() { + return this.template; } public AsyncCallFuture getFuture() { @@ -268,46 +268,48 @@ public class VolumeServiceImpl implements VolumeService { } } - - static class CreateBaseImageResult extends CommandResult { - final TemplateInfo template; - public CreateBaseImageResult(TemplateInfo template) { - super(); - this.template = template; - } - } - @DB protected void createBaseImageAsync(VolumeInfo volume, PrimaryDataStore dataStore, TemplateInfo template, AsyncCallFuture future) { - CreateBaseImageContext context = new CreateBaseImageContext(null, volume, - dataStore, - template, - future); - - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); - caller.setCallback(caller.getTarget().createBaseImageCallback(null, null)) - .setContext(context); - - templateInstallStrategy.installAsync(template, dataStore, caller); + TemplateInfo templateOnPrimaryStoreObj = objectInDataStoreMgr.create(template, dataStore); + /*templateOnPrimaryStoreObj.stateTransit(ObjectInDataStoreStateMachine.Event.CreateRequested); + templateOnPrimaryStoreObj.updateStatus(Status.CREATING); + try { + dataStore.installTemplate(templateOnPrimaryStoreObj); + templateOnPrimaryStoreObj.updateStatus(Status.CREATED); + } catch (Exception e) { + templateOnPrimaryStoreObj.updateStatus(Status.ABANDONED); + templateOnPrimaryStoreObj.stateTransit(ObjectInDataStoreStateMachine.Event.OperationFailed); + VolumeApiResult result = new VolumeApiResult(volume); + result.setResult(e.toString()); + future.complete(result); + return; + } + + templateOnPrimaryStoreObj.updateStatus(Status.DOWNLOAD_IN_PROGRESS); + */ + CreateBaseImageContext context = new CreateBaseImageContext(null, volume, dataStore, templateOnPrimaryStoreObj, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().createBaseImageCallback(null, null)) + .setContext(context); + + objectInDataStoreMgr.update(templateOnPrimaryStoreObj, ObjectInDataStoreStateMachine.Event.CreateRequested); + + imageMotion.copyTemplateAsync(templateOnPrimaryStoreObj, template, caller); } @DB - protected Void createBaseImageCallback(AsyncCallbackDispatcher callback, CreateBaseImageContext context) { - CreateBaseImageResult result = callback.getResult(); - VolumeApiResult res = new VolumeApiResult(context.getVolume()); - - AsyncCallFuture future = context.getFuture(); - if (!result.isSuccess()) { - res.setResult(result.getResult()); - future.complete(res); - return null; + protected Void createBaseImageCallback(AsyncCallbackDispatcher callback, CreateBaseImageContext context) { + CommandResult result = callback.getResult(); + TemplateInfo templateOnPrimaryStoreObj = context.getTemplate(); + if (result.isSuccess()) { + objectInDataStoreMgr.update(templateOnPrimaryStoreObj, ObjectInDataStoreStateMachine.Event.OperationSuccessed); + } else { + objectInDataStoreMgr.update(templateOnPrimaryStoreObj, ObjectInDataStoreStateMachine.Event.OperationFailed); } - //now create volume on base image - TemplateInfo templateOnPrimaryStoreObj = result.template; + AsyncCallFuture future = context.getFuture(); VolumeInfo volume = context.getVolume(); PrimaryDataStore pd = context.getDataStore(); - createVolumeFromBaseImageAsync(volume, templateOnPrimaryStoreObj, pd, future); return null; } @@ -343,22 +345,19 @@ public class VolumeServiceImpl implements VolumeService { } CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext(null, vo, future); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().createVolumeFromBaseImageCallback(null, null)) .setContext(context); - pd.getDriver().copyAsync(volume, templateOnPrimaryStore, caller); + pd.createVoluemFromBaseImageAsync(volume, templateOnPrimaryStore, caller); } @DB - public Object createVolumeFromBaseImageCallback(AsyncCallbackDispatcher callback, CreateVolumeFromBaseImageContext context) { + public Object createVolumeFromBaseImageCallback(AsyncCallbackDispatcher callback, CreateVolumeFromBaseImageContext context) { VolumeObject vo = context.getVolumeObject(); - CopyCommandResult result = callback.getResult(); + CommandResult result = callback.getResult(); VolumeApiResult volResult = new VolumeApiResult(vo); if (result.isSuccess()) { - if (result.getPath() != null) { - vo.setPath(result.getPath()); - } vo.stateTransit(Volume.Event.OperationSucceeded); } else { vo.stateTransit(Volume.Event.OperationFailed); @@ -372,9 +371,9 @@ public class VolumeServiceImpl implements VolumeService { @DB @Override - public AsyncCallFuture createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, TemplateInfo template) { + public AsyncCallFuture createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, VolumeDiskType diskType, TemplateInfo template) { PrimaryDataStore pd = dataStoreMgr.getPrimaryDataStore(dataStoreId); - TemplateInfo templateOnPrimaryStore = pd.getTemplate(template.getId()); + TemplateOnPrimaryDataStoreInfo templateOnPrimaryStore = pd.getTemplate(template); AsyncCallFuture future = new AsyncCallFuture(); VolumeApiResult result = new VolumeApiResult(volume); diff --git a/engine/storage/volume/test/org/apache/cloudstack/storage/volume/test/ConfiguratorTest.java b/engine/storage/volume/test/org/apache/cloudstack/storage/volume/test/ConfiguratorTest.java index 829694bd753..4ad20d536c2 100644 --- a/engine/storage/volume/test/org/apache/cloudstack/storage/volume/test/ConfiguratorTest.java +++ b/engine/storage/volume/test/org/apache/cloudstack/storage/volume/test/ConfiguratorTest.java @@ -18,8 +18,7 @@ */ package org.apache.cloudstack.storage.volume.test; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.*; import java.util.HashMap; import java.util.List; @@ -28,11 +27,14 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.storage.datastore.configurator.PrimaryDataStoreConfigurator; +import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProviderManager; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mockito; +import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; @@ -43,15 +45,20 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations="classpath:/testContext.xml") public class ConfiguratorTest { - + @Inject + @Qualifier("defaultProvider") + List configurators; + @Inject List providers; + @Inject + PrimaryDataStoreProviderManager providerMgr; @Inject ClusterDao clusterDao; @Before public void setup() { - /* ClusterVO cluster = new ClusterVO(); + ClusterVO cluster = new ClusterVO(); cluster.setHypervisorType(HypervisorType.XenServer.toString()); Mockito.when(clusterDao.findById(Mockito.anyLong())).thenReturn(cluster); try { @@ -59,13 +66,13 @@ public class ConfiguratorTest { } catch (ConfigurationException e) { // TODO Auto-generated catch block e.printStackTrace(); - }*/ + } } @Test public void testLoadConfigurator() { - /*for (PrimaryDataStoreConfigurator configurator : configurators) { + for (PrimaryDataStoreConfigurator configurator : configurators) { System.out.println(configurator.getClass().getName()); - }*/ + } } @Test @@ -79,16 +86,16 @@ public class ConfiguratorTest { @Test public void getProvider() { - // assertNotNull(providerMgr.getDataStoreProvider("default primary data store provider")); + assertNotNull(providerMgr.getDataStoreProvider("default primary data store provider")); } @Test public void createDataStore() { - /*PrimaryDataStoreProvider provider = providerMgr.getDataStoreProvider("default primary data store provider"); + PrimaryDataStoreProvider provider = providerMgr.getDataStoreProvider("default primary data store provider"); Map params = new HashMap(); params.put("url", "nfs://localhost/mnt"); params.put("clusterId", "1"); params.put("name", "nfsprimary"); - assertNotNull(provider.registerDataStore(params));*/ + assertNotNull(provider.registerDataStore(params)); } } diff --git a/engine/storage/volume/test/resource/testContext.xml b/engine/storage/volume/test/resource/testContext.xml index 83fe842c722..67f242273f3 100644 --- a/engine/storage/volume/test/resource/testContext.xml +++ b/engine/storage/volume/test/resource/testContext.xml @@ -1,3 +1,21 @@ + 4.0.0 org.apache.cloudstack diff --git a/framework/rest/pom.xml b/framework/rest/pom.xml index e9009bf72d1..2f890562190 100644 --- a/framework/rest/pom.xml +++ b/framework/rest/pom.xml @@ -1,3 +1,21 @@ + 4.0.0 diff --git a/framework/rest/test/org/apache/cloudstack/framework/ws/jackson/CSJacksonAnnotationTest.java b/framework/rest/test/org/apache/cloudstack/framework/ws/jackson/CSJacksonAnnotationTest.java index 52b2d7fb9c6..fef6ba28e33 100644 --- a/framework/rest/test/org/apache/cloudstack/framework/ws/jackson/CSJacksonAnnotationTest.java +++ b/framework/rest/test/org/apache/cloudstack/framework/ws/jackson/CSJacksonAnnotationTest.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.framework.ws.jackson; import java.io.IOException; diff --git a/plugins/acl/static-role-based/src/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java b/plugins/acl/static-role-based/src/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java index 9236fbace4e..2eaa6b03bb5 100644 --- a/plugins/acl/static-role-based/src/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java +++ b/plugins/acl/static-role-based/src/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java @@ -16,164 +16,85 @@ // under the License. package org.apache.cloudstack.acl; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Arrays; +import static org.apache.cloudstack.acl.RoleType.Admin; +import static org.apache.cloudstack.acl.RoleType.DomainAdmin; +import static org.apache.cloudstack.acl.RoleType.ResourceAdmin; +import static org.apache.cloudstack.acl.RoleType.User; + +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Properties; +import java.util.Set; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; +import com.cloud.server.ManagementServer; import com.cloud.exception.PermissionDeniedException; -import com.cloud.user.Account; import com.cloud.user.AccountManager; -import com.cloud.user.User; -import com.cloud.utils.PropertiesUtil; import com.cloud.utils.component.AdapterBase; -import com.cloud.utils.component.Inject; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.PluggableService; -/* - * This is the default API access checker that grab's the user's account - * based on the account type, access is granted referring to commands in all *.properties files. - */ - -@Local(value=APIAccessChecker.class) -public class StaticRoleBasedAPIAccessChecker extends AdapterBase implements APIAccessChecker { +// This is the default API access checker that grab's the user's account +// based on the account type, access is granted +@Component +@Local(value=APIChecker.class) +public class StaticRoleBasedAPIAccessChecker extends AdapterBase implements APIChecker { protected static final Logger s_logger = Logger.getLogger(StaticRoleBasedAPIAccessChecker.class); - public static final short ADMIN_COMMAND = 1; - public static final short DOMAIN_ADMIN_COMMAND = 4; - public static final short RESOURCE_DOMAIN_ADMIN_COMMAND = 2; - public static final short USER_COMMAND = 8; - private static List s_userCommands = null; - private static List s_resellerCommands = null; // AKA domain-admin - private static List s_adminCommands = null; - private static List s_resourceDomainAdminCommands = null; - private static List s_allCommands = null; - protected @Inject AccountManager _accountMgr; - @Inject protected List _services; + private static Map> s_roleBasedApisMap = + new HashMap>(); + + @Inject List _services; protected StaticRoleBasedAPIAccessChecker() { super(); - s_allCommands = new ArrayList(); - s_userCommands = new ArrayList(); - s_resellerCommands = new ArrayList(); - s_adminCommands = new ArrayList(); - s_resourceDomainAdminCommands = new ArrayList(); + for (RoleType roleType: RoleType.values()) + s_roleBasedApisMap.put(roleType, new HashSet()); } @Override - public boolean canAccessAPI(User user, String apiCommandName) - throws PermissionDeniedException{ - - boolean commandExists = s_allCommands.contains(apiCommandName); - - if(commandExists && user != null){ - Long accountId = user.getAccountId(); - Account userAccount = _accountMgr.getAccount(accountId); - short accountType = userAccount.getType(); - return isCommandAvailableForAccount(accountType, apiCommandName); + public boolean checkAccess(RoleType roleType, String commandName) + throws PermissionDeniedException { + boolean isAllowed = s_roleBasedApisMap.get(roleType).contains(commandName); + if (!isAllowed) { + throw new PermissionDeniedException("The API does not exist or is blacklisted. Role type=" + roleType.toString() + " is not allowed to request the api: " + commandName); } - - return commandExists; - } - - private static boolean isCommandAvailableForAccount(short accountType, String commandName) { - boolean isCommandAvailable = false; - switch (accountType) { - case Account.ACCOUNT_TYPE_ADMIN: - isCommandAvailable = s_adminCommands.contains(commandName); - break; - case Account.ACCOUNT_TYPE_DOMAIN_ADMIN: - isCommandAvailable = s_resellerCommands.contains(commandName); - break; - case Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN: - isCommandAvailable = s_resourceDomainAdminCommands.contains(commandName); - break; - case Account.ACCOUNT_TYPE_NORMAL: - isCommandAvailable = s_userCommands.contains(commandName); - break; - } - return isCommandAvailable; + return isAllowed; } @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); - - List configFiles = new ArrayList(); + _services.add((PluggableService) ComponentContext.getComponent(ManagementServer.Name)); for (PluggableService service : _services) { - configFiles.addAll(Arrays.asList(service.getPropertiesFiles())); + processConfigFiles(service.getProperties(), service.getClass().toString()); + s_logger.info("Processed role based acl for: " + service.toString()); } - - processConfigFiles(configFiles); return true; } - private void processConfigFiles(List configFiles) { - Properties preProcessedCommands = new Properties(); - - for (String configFile : configFiles) { - File commandsFile = PropertiesUtil.findConfigFile(configFile); - if (commandsFile != null) { - try { - preProcessedCommands.load(new FileInputStream(commandsFile)); - } catch (FileNotFoundException fnfex) { - // in case of a file within a jar in classpath, try to open stream using url - InputStream stream = PropertiesUtil.openStreamFromURL(configFile); - if (stream != null) { - try { - preProcessedCommands.load(stream); - } catch (IOException e) { - s_logger.error("IO Exception, unable to find properties file:", fnfex); - } - } else { - s_logger.error("Unable to find properites file", fnfex); - } - } catch (IOException ioe) { - s_logger.error("IO Exception loading properties file", ioe); - } - } - } - - for (Object key : preProcessedCommands.keySet()) { - String preProcessedCommand = preProcessedCommands.getProperty((String) key); - int splitIndex = preProcessedCommand.lastIndexOf(";"); - // Backward compatible to old style, apiname=pkg;mask - String mask = preProcessedCommand.substring(splitIndex+1); - + private void processConfigFiles(Map configMap, String service) { + for (Map.Entry entry: configMap.entrySet()) { + String apiName = entry.getKey(); + String roleMask = entry.getValue(); try { - short cmdPermissions = Short.parseShort(mask); - if ((cmdPermissions & ADMIN_COMMAND) != 0) { - s_adminCommands.add((String) key); + short cmdPermissions = Short.parseShort(roleMask); + for (RoleType roleType: RoleType.values()) { + if ((cmdPermissions & roleType.getValue()) != 0) + s_roleBasedApisMap.get(roleType).add(apiName); } - if ((cmdPermissions & RESOURCE_DOMAIN_ADMIN_COMMAND) != 0) { - s_resourceDomainAdminCommands.add((String) key); - } - if ((cmdPermissions & DOMAIN_ADMIN_COMMAND) != 0) { - s_resellerCommands.add((String) key); - } - if ((cmdPermissions & USER_COMMAND) != 0) { - s_userCommands.add((String) key); - } - s_allCommands.addAll(s_adminCommands); - s_allCommands.addAll(s_resourceDomainAdminCommands); - s_allCommands.addAll(s_userCommands); - s_allCommands.addAll(s_resellerCommands); } catch (NumberFormatException nfe) { - s_logger.info("Malformed command.properties permissions value, key = " + key + ", value = " + preProcessedCommand); + s_logger.info("Malformed getProperties() value for service: " + service + + " for entry: " + entry.toString()); } } } - } diff --git a/plugins/api/discovery/src/org/apache/cloudstack/api/command/user/discovery/ListApisCmd.java b/plugins/api/discovery/src/org/apache/cloudstack/api/command/user/discovery/ListApisCmd.java index dcbaec1d160..ed3e1751027 100644 --- a/plugins/api/discovery/src/org/apache/cloudstack/api/command/user/discovery/ListApisCmd.java +++ b/plugins/api/discovery/src/org/apache/cloudstack/api/command/user/discovery/ListApisCmd.java @@ -16,9 +16,12 @@ // under the License. package org.apache.cloudstack.api.command.user.discovery; +import com.cloud.user.UserContext; +import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; -import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.PlugService; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.ListResponse; @@ -27,8 +30,8 @@ import org.apache.cloudstack.api.response.ApiDiscoveryResponse; import org.apache.log4j.Logger; -@APICommand(name = "listApis", responseObject = ApiDiscoveryResponse.class, description = "lists all available apis on the server, provided by Api Discovery plugin", since = "4.1.0") -public class ListApisCmd extends BaseListCmd { +@APICommand(name = "listApis", responseObject = ApiDiscoveryResponse.class, description = "lists all available apis on the server, provided by the Api Discovery plugin", since = "4.1.0") +public class ListApisCmd extends BaseCmd { public static final Logger s_logger = Logger.getLogger(ListApisCmd.class.getName()); private static final String s_name = "listapisresponse"; @@ -36,12 +39,16 @@ public class ListApisCmd extends BaseListCmd { @PlugService ApiDiscoveryService _apiDiscoveryService; + @Parameter(name=ApiConstants.NAME, type=CommandType.STRING, description="API name") + private String name; + @Override public void execute() throws ServerApiException { if (_apiDiscoveryService != null) { - ListResponse response = (ListResponse) _apiDiscoveryService.listApis(); + RoleType roleType = _accountService.getRoleType(UserContext.current().getCaller()); + ListResponse response = (ListResponse) _apiDiscoveryService.listApis(roleType, name); if (response == null) { - throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Api Discovery plugin was unable to find and process any apis"); + throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Api Discovery plugin was unable to find an api by that name or process any apis"); } response.setResponseName(getCommandName()); this.setResponseObject(response); @@ -52,4 +59,10 @@ public class ListApisCmd extends BaseListCmd { public String getCommandName() { return s_name; } + + @Override + public long getEntityOwnerId() { + // no owner is needed for list command + return 0; + } } diff --git a/plugins/api/discovery/src/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java b/plugins/api/discovery/src/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java index dd1298bfec5..de6a9f93965 100644 --- a/plugins/api/discovery/src/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java +++ b/plugins/api/discovery/src/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java @@ -16,18 +16,15 @@ // under the License. package org.apache.cloudstack.api.response; -import com.cloud.user.Account; import org.apache.cloudstack.api.ApiConstants; import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; import org.apache.cloudstack.api.BaseResponse; -import org.apache.cloudstack.api.EntityReference; import java.util.HashSet; import java.util.Set; @SuppressWarnings("unused") -@EntityReference(value = Account.class) public class ApiDiscoveryResponse extends BaseResponse { @SerializedName(ApiConstants.NAME) @Param(description="the name of the api command") private String name; @@ -41,11 +38,18 @@ public class ApiDiscoveryResponse extends BaseResponse { @SerializedName(ApiConstants.IS_ASYNC) @Param(description="true if api is asynchronous") private Boolean isAsync; + @SerializedName("related") @Param(description="comma separated related apis") + private String related; + @SerializedName(ApiConstants.PARAMS) @Param(description="the list params the api accepts", responseObject = ApiParameterResponse.class) private Set params; + @SerializedName(ApiConstants.RESPONSE) @Param(description="api response fields", responseObject = ApiResponseResponse.class) + private Set apiResponse; + public ApiDiscoveryResponse(){ params = new HashSet(); + apiResponse = new HashSet(); isAsync = false; } @@ -65,6 +69,18 @@ public class ApiDiscoveryResponse extends BaseResponse { this.isAsync = isAsync; } + public String getRelated() { + return related; + } + + public void setRelated(String related) { + this.related = related; + } + + public Set getParams() { + return params; + } + public void setParams(Set params) { this.params = params; } @@ -72,4 +88,8 @@ public class ApiDiscoveryResponse extends BaseResponse { public void addParam(ApiParameterResponse param) { this.params.add(param); } + + public void addApiResponse(ApiResponseResponse apiResponse) { + this.apiResponse.add(apiResponse); + } } diff --git a/plugins/api/discovery/src/org/apache/cloudstack/api/response/ApiParameterResponse.java b/plugins/api/discovery/src/org/apache/cloudstack/api/response/ApiParameterResponse.java index 9138288e102..fa6dc1752d2 100644 --- a/plugins/api/discovery/src/org/apache/cloudstack/api/response/ApiParameterResponse.java +++ b/plugins/api/discovery/src/org/apache/cloudstack/api/response/ApiParameterResponse.java @@ -40,6 +40,9 @@ public class ApiParameterResponse extends BaseResponse { @SerializedName(ApiConstants.SINCE) @Param(description="version of CloudStack the api was introduced in") private String since; + @SerializedName("related") @Param(description="comma separated related apis to get the parameter") + private String related; + public ApiParameterResponse(){ } @@ -67,4 +70,12 @@ public class ApiParameterResponse extends BaseResponse { this.since = since; } + public String getRelated() { + return related; + } + + public void setRelated(String related) { + this.related = related; + } + } diff --git a/plugins/api/discovery/src/org/apache/cloudstack/api/response/ApiResponseResponse.java b/plugins/api/discovery/src/org/apache/cloudstack/api/response/ApiResponseResponse.java new file mode 100644 index 00000000000..b96295e1290 --- /dev/null +++ b/plugins/api/discovery/src/org/apache/cloudstack/api/response/ApiResponseResponse.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.ApiConstants; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.BaseResponse; + +public class ApiResponseResponse extends BaseResponse { + @SerializedName(ApiConstants.NAME) @Param(description="the name of the api response field") + private String name; + + @SerializedName(ApiConstants.DESCRIPTION) @Param(description="description of the api response field") + private String description; + + @SerializedName(ApiConstants.TYPE) @Param(description="response field type") + private String type; + + public void setName(String name) { + this.name = name; + } + + public void setDescription(String description) { + this.description = description; + } + + public void setType(String type) { + this.type = type; + } +} diff --git a/api/src/org/apache/cloudstack/discovery/ApiDiscoveryService.java b/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryService.java similarity index 80% rename from api/src/org/apache/cloudstack/discovery/ApiDiscoveryService.java rename to plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryService.java index 12206949db3..611493bfc08 100644 --- a/api/src/org/apache/cloudstack/discovery/ApiDiscoveryService.java +++ b/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryService.java @@ -16,14 +16,11 @@ // under the License. package org.apache.cloudstack.discovery; -import com.cloud.utils.component.Adapter; import com.cloud.utils.component.PluggableService; +import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.BaseResponse; import org.apache.cloudstack.api.response.ListResponse; -import java.util.Map; - -public interface ApiDiscoveryService extends Adapter, PluggableService { - ListResponse listApis(); - Map> getApiNameCmdClassMapping(); +public interface ApiDiscoveryService extends PluggableService { + ListResponse listApis(RoleType roleType, String apiName); } diff --git a/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java b/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java index 5363e559a5f..6ff4085b047 100644 --- a/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java +++ b/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java @@ -16,8 +16,14 @@ // under the License. package org.apache.cloudstack.discovery; +import com.cloud.serializer.Param; +import com.cloud.server.ManagementServer; import com.cloud.utils.ReflectUtil; -import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.StringUtils; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.component.PluggableService; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseAsyncCmd; @@ -26,54 +32,77 @@ import org.apache.cloudstack.api.BaseResponse; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ApiDiscoveryResponse; import org.apache.cloudstack.api.response.ApiParameterResponse; +import org.apache.cloudstack.api.response.ApiResponseResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import javax.ejb.Local; -import javax.naming.ConfigurationException; +import javax.inject.Inject; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +@Component @Local(value = ApiDiscoveryService.class) -public class ApiDiscoveryServiceImpl extends AdapterBase implements ApiDiscoveryService { - +public class ApiDiscoveryServiceImpl implements ApiDiscoveryService { private static final Logger s_logger = Logger.getLogger(ApiDiscoveryServiceImpl.class); - private Map> _apiNameCmdClassMap; - private ListResponse _discoveryResponse; + + private static Map> _roleTypeDiscoveryResponseListMap; + + private static Map _apiNameDiscoveryResponseMap = + new HashMap(); + + private static Map> _apiNameRoleTypeListMap = null; + + @Inject List _services; protected ApiDiscoveryServiceImpl() { super(); - } - - private void generateApiNameCmdClassMapping() { - _apiNameCmdClassMap = new HashMap>(); - Set> cmdClasses = ReflectUtil.getClassesWithAnnotation(APICommand.class, new String[]{"org.apache.cloudstack.api", "com.cloud.api"}); - - for(Class cmdClass: cmdClasses) { - String apiName = cmdClass.getAnnotation(APICommand.class).name(); - if (_apiNameCmdClassMap.containsKey(apiName)) { - s_logger.error("API Cmd class " + cmdClass.getName() + " has non-unique apiname" + apiName); - continue; - } - _apiNameCmdClassMap.put(apiName, cmdClass); + if (_roleTypeDiscoveryResponseListMap == null) { + long startTime = System.nanoTime(); + _roleTypeDiscoveryResponseListMap = new HashMap>(); + for (RoleType roleType: RoleType.values()) + _roleTypeDiscoveryResponseListMap.put(roleType, new ArrayList()); + cacheResponseMap(); + long endTime = System.nanoTime(); + s_logger.info("Api Discovery Service: Annotation, docstrings, api relation graph processed in " + (endTime - startTime) / 1000000.0 + " ms"); } } - private void precacheListApiResponse() { + private Map> getApiNameRoleTypeListMap() { + Map> apiNameRoleTypeMap = new HashMap>(); + _services.add((PluggableService) ComponentContext.getComponent(ManagementServer.Name)); + for (PluggableService service : _services) { + for (Map.Entry entry: service.getProperties().entrySet()) { + String apiName = entry.getKey(); + String roleMask = entry.getValue(); + try { + short cmdPermissions = Short.parseShort(roleMask); + if (!apiNameRoleTypeMap.containsKey(apiName)) + apiNameRoleTypeMap.put(apiName, new ArrayList()); + for (RoleType roleType: RoleType.values()) { + if ((cmdPermissions & roleType.getValue()) != 0) + apiNameRoleTypeMap.get(apiName).add(roleType); + } + } catch (NumberFormatException nfe) { + } + } + } + return apiNameRoleTypeMap; + } - if(_apiNameCmdClassMap == null) - return; + private void cacheResponseMap() { + Set> cmdClasses = ReflectUtil.getClassesWithAnnotation(APICommand.class, + new String[]{"org.apache.cloudstack.api", "com.cloud.api"}); - _discoveryResponse = new ListResponse(); + Map> responseApiNameListMap = new HashMap>(); - List apiDiscoveryResponses = new ArrayList(); - - for(String key: _apiNameCmdClassMap.keySet()) { - Class cmdClass = _apiNameCmdClassMap.get(key); + for(Class cmdClass: cmdClasses) { APICommand apiCmdAnnotation = cmdClass.getAnnotation(APICommand.class); if (apiCmdAnnotation == null) apiCmdAnnotation = cmdClass.getSuperclass().getAnnotation(APICommand.class); @@ -82,10 +111,33 @@ public class ApiDiscoveryServiceImpl extends AdapterBase implements ApiDiscovery || apiCmdAnnotation.name().isEmpty()) continue; + String apiName = apiCmdAnnotation.name(); + String responseName = apiCmdAnnotation.responseObject().getName(); + if (!responseName.contains("SuccessResponse")) { + if (!responseApiNameListMap.containsKey(responseName)) + responseApiNameListMap.put(responseName, new ArrayList()); + responseApiNameListMap.get(responseName).add(apiName); + } ApiDiscoveryResponse response = new ApiDiscoveryResponse(); - response.setName(apiCmdAnnotation.name()); + response.setName(apiName); response.setDescription(apiCmdAnnotation.description()); - response.setSince(apiCmdAnnotation.since()); + if (!apiCmdAnnotation.since().isEmpty()) + response.setSince(apiCmdAnnotation.since()); + response.setRelated(responseName); + + Field[] responseFields = apiCmdAnnotation.responseObject().getDeclaredFields(); + for(Field responseField: responseFields) { + SerializedName serializedName = responseField.getAnnotation(SerializedName.class); + if(serializedName != null) { + ApiResponseResponse responseResponse = new ApiResponseResponse(); + responseResponse.setName(serializedName.value()); + Param param = responseField.getAnnotation(Param.class); + if (param != null) + responseResponse.setDescription(param.description()); + responseResponse.setType(responseField.getType().getSimpleName().toLowerCase()); + response.addApiResponse(responseResponse); + } + } Field[] fields = ReflectUtil.getAllFieldsForClass(cmdClass, new Class[] {BaseCmd.class, BaseAsyncCmd.class, BaseAsyncCreateCmd.class}); @@ -104,41 +156,80 @@ public class ApiDiscoveryServiceImpl extends AdapterBase implements ApiDiscovery ApiParameterResponse paramResponse = new ApiParameterResponse(); paramResponse.setName(parameterAnnotation.name()); paramResponse.setDescription(parameterAnnotation.description()); - paramResponse.setType(parameterAnnotation.type().toString()); + paramResponse.setType(parameterAnnotation.type().toString().toLowerCase()); paramResponse.setLength(parameterAnnotation.length()); paramResponse.setRequired(parameterAnnotation.required()); - paramResponse.setSince(parameterAnnotation.since()); + if (!parameterAnnotation.since().isEmpty()) + paramResponse.setSince(parameterAnnotation.since()); + paramResponse.setRelated(parameterAnnotation.entityType()[0].getName()); response.addParam(paramResponse); } } - response.setObjectName("apis"); - apiDiscoveryResponses.add(response); + response.setObjectName("api"); + _apiNameDiscoveryResponseMap.put(apiName, response); + } + + for (String apiName: _apiNameDiscoveryResponseMap.keySet()) { + ApiDiscoveryResponse response = _apiNameDiscoveryResponseMap.get(apiName); + Set processedParams = new HashSet(); + for (ApiParameterResponse param: response.getParams()) { + if (responseApiNameListMap.containsKey(param.getRelated())) { + List relatedApis = responseApiNameListMap.get(param.getRelated()); + param.setRelated(StringUtils.join(relatedApis, ",")); + } else { + param.setRelated(null); + } + processedParams.add(param); + } + response.setParams(processedParams); + + if (responseApiNameListMap.containsKey(response.getRelated())) { + List relatedApis = responseApiNameListMap.get(response.getRelated()); + relatedApis.remove(apiName); + response.setRelated(StringUtils.join(relatedApis, ",")); + } else { + response.setRelated(null); + } + _apiNameDiscoveryResponseMap.put(apiName, response); } - _discoveryResponse.setResponses(apiDiscoveryResponses); } @Override - public boolean configure(String name, Map params) - throws ConfigurationException { - super.configure(name, params); + public ListResponse listApis(RoleType roleType, String name) { + // Creates roles based response list cache the first time listApis is called + // Due to how adapters work, this cannot be done when mgmt loads + if (_apiNameRoleTypeListMap == null) { + long startTime = System.nanoTime(); + _apiNameRoleTypeListMap = getApiNameRoleTypeListMap(); + for (Map.Entry> entry: _apiNameRoleTypeListMap.entrySet()) { + String apiName = entry.getKey(); + for (RoleType roleTypeInList: entry.getValue()) { + _roleTypeDiscoveryResponseListMap.get(roleTypeInList).add( + _apiNameDiscoveryResponseMap.get(apiName)); + } + } + long endTime = System.nanoTime(); + s_logger.info("Api Discovery Service: List apis cached in " + (endTime - startTime) / 1000000.0 + " ms"); + } + ListResponse response = new ListResponse(); + if (name != null) { + if (!_apiNameDiscoveryResponseMap.containsKey(name)) + return null; - generateApiNameCmdClassMapping(); - precacheListApiResponse(); + List singleResponse = new ArrayList(); + singleResponse.add(_apiNameDiscoveryResponseMap.get(name)); + response.setResponses(singleResponse); - return true; - } - - public Map> getApiNameCmdClassMapping() { - return _apiNameCmdClassMap; + } else { + response.setResponses(_roleTypeDiscoveryResponseListMap.get(roleType)); + } + return response; } @Override - public ListResponse listApis() { - return _discoveryResponse; - } - - @Override - public String[] getPropertiesFiles() { - return new String[] { "api-discovery_commands.properties" }; + public Map getProperties() { + Map apiDiscoveryPropertyMap = new HashMap(); + apiDiscoveryPropertyMap.put("listApis", "15"); + return apiDiscoveryPropertyMap; } } diff --git a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/AssociateLunCmd.java b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/AssociateLunCmd.java index c87c9242010..64865bb0777 100644 --- a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/AssociateLunCmd.java +++ b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/AssociateLunCmd.java @@ -29,7 +29,7 @@ import com.cloud.exception.InvalidParameterValueException; import com.cloud.netapp.NetappManager; import com.cloud.server.ManagementService; import com.cloud.server.api.response.netapp.AssociateLunCmdResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "associateLun", description="Associate a LUN with a guest IQN", responseObject = AssociateLunCmdResponse.class) public class AssociateLunCmd extends BaseCmd { diff --git a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/CreateLunCmd.java b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/CreateLunCmd.java index 8c89730b978..c8d8d04e6f1 100644 --- a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/CreateLunCmd.java +++ b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/CreateLunCmd.java @@ -33,7 +33,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.netapp.NetappManager; import com.cloud.server.ManagementService; import com.cloud.server.api.response.netapp.CreateLunCmdResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "createLunOnFiler", description="Create a LUN from a pool", responseObject = CreateLunCmdResponse.class) public class CreateLunCmd extends BaseCmd { diff --git a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/CreateVolumeOnFilerCmd.java b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/CreateVolumeOnFilerCmd.java index a2d4b96e6dd..72a9efae978 100644 --- a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/CreateVolumeOnFilerCmd.java +++ b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/CreateVolumeOnFilerCmd.java @@ -32,7 +32,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.netapp.NetappManager; import com.cloud.server.ManagementService; import com.cloud.server.api.response.netapp.CreateVolumeOnFilerCmdResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "createVolumeOnFiler", description="Create a volume", responseObject = CreateVolumeOnFilerCmdResponse.class) public class CreateVolumeOnFilerCmd extends BaseCmd { diff --git a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/CreateVolumePoolCmd.java b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/CreateVolumePoolCmd.java index 9e38c5fc097..f7ff567e838 100644 --- a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/CreateVolumePoolCmd.java +++ b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/CreateVolumePoolCmd.java @@ -31,7 +31,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.netapp.NetappManager; import com.cloud.server.ManagementService; import com.cloud.server.api.response.netapp.CreateVolumePoolCmdResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "createPool", description="Create a pool", responseObject = CreateVolumePoolCmdResponse.class) public class CreateVolumePoolCmd extends BaseCmd { diff --git a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DeleteVolumePoolCmd.java b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DeleteVolumePoolCmd.java index 1105ea53e9d..7106c580125 100644 --- a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DeleteVolumePoolCmd.java +++ b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DeleteVolumePoolCmd.java @@ -33,7 +33,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.netapp.NetappManager; import com.cloud.server.ManagementService; import com.cloud.server.api.response.netapp.DeleteVolumePoolCmdResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "deletePool", description="Delete a pool", responseObject = DeleteVolumePoolCmdResponse.class) public class DeleteVolumePoolCmd extends BaseCmd { diff --git a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DestroyLunCmd.java b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DestroyLunCmd.java index c5f7b117f5b..8afd14342d4 100644 --- a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DestroyLunCmd.java +++ b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DestroyLunCmd.java @@ -33,7 +33,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.netapp.NetappManager; import com.cloud.server.ManagementService; import com.cloud.server.api.response.netapp.DeleteLUNCmdResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "destroyLunOnFiler", description="Destroy a LUN", responseObject = DeleteLUNCmdResponse.class) public class DestroyLunCmd extends BaseCmd { diff --git a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DestroyVolumeOnFilerCmd.java b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DestroyVolumeOnFilerCmd.java index 4ddc0c9f6d0..730f1c0bb43 100644 --- a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DestroyVolumeOnFilerCmd.java +++ b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DestroyVolumeOnFilerCmd.java @@ -31,7 +31,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.netapp.NetappManager; import com.cloud.server.ManagementService; import com.cloud.server.api.response.netapp.DeleteVolumeOnFilerCmdResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "destroyVolumeOnFiler", description="Destroy a Volume", responseObject = DeleteVolumeOnFilerCmdResponse.class) public class DestroyVolumeOnFilerCmd extends BaseCmd { diff --git a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DissociateLunCmd.java b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DissociateLunCmd.java index 0a6c1a70ef1..5061f497521 100644 --- a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DissociateLunCmd.java +++ b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/DissociateLunCmd.java @@ -30,7 +30,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.netapp.NetappManager; import com.cloud.server.ManagementService; import com.cloud.server.api.response.netapp.DissociateLunCmdResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "dissociateLun", description="Dissociate a LUN", responseObject = DissociateLunCmdResponse.class) public class DissociateLunCmd extends BaseCmd { diff --git a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ListLunsCmd.java b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ListLunsCmd.java index 630b14994e7..7c2ed45ebc3 100644 --- a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ListLunsCmd.java +++ b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ListLunsCmd.java @@ -36,7 +36,7 @@ import com.cloud.netapp.LunVO; import com.cloud.netapp.NetappManager; import com.cloud.server.ManagementService; import com.cloud.server.api.response.netapp.ListLunsCmdResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "listLunsOnFiler", description="List LUN", responseObject = ListLunsCmdResponse.class) public class ListLunsCmd extends BaseCmd diff --git a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ListVolumePoolsCmd.java b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ListVolumePoolsCmd.java index d77f4fad849..5857f4340af 100644 --- a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ListVolumePoolsCmd.java +++ b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ListVolumePoolsCmd.java @@ -34,7 +34,7 @@ import com.cloud.netapp.NetappManager; import com.cloud.netapp.PoolVO; import com.cloud.server.ManagementService; import com.cloud.server.api.response.netapp.ListVolumePoolsCmdResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "listPools", description="List Pool", responseObject = ListVolumePoolsCmdResponse.class) public class ListVolumePoolsCmd extends BaseCmd { diff --git a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ListVolumesOnFilerCmd.java b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ListVolumesOnFilerCmd.java index 66a96f3a221..17548cd65be 100644 --- a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ListVolumesOnFilerCmd.java +++ b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ListVolumesOnFilerCmd.java @@ -33,7 +33,7 @@ import com.cloud.netapp.NetappManager; import com.cloud.netapp.NetappVolumeVO; import com.cloud.server.ManagementService; import com.cloud.server.api.response.netapp.ListVolumesOnFilerCmdResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "listVolumesOnFiler", description="List Volumes", responseObject = ListVolumesOnFilerCmdResponse.class) public class ListVolumesOnFilerCmd extends BaseCmd { diff --git a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ModifyVolumePoolCmd.java b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ModifyVolumePoolCmd.java index 3e32caebef3..6282a648a99 100644 --- a/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ModifyVolumePoolCmd.java +++ b/plugins/file-systems/netapp/src/com/cloud/api/commands/netapp/ModifyVolumePoolCmd.java @@ -31,7 +31,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.netapp.NetappManager; import com.cloud.server.ManagementService; import com.cloud.server.api.response.netapp.ModifyVolumePoolCmdResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "modifyPool", description="Modify pool", responseObject = ModifyVolumePoolCmdResponse.class) public class ModifyVolumePoolCmd extends BaseCmd { diff --git a/plugins/file-systems/netapp/src/com/cloud/netapp/NetappManagerImpl.java b/plugins/file-systems/netapp/src/com/cloud/netapp/NetappManagerImpl.java index f98a15c8d0a..7fe22dd1c1b 100644 --- a/plugins/file-systems/netapp/src/com/cloud/netapp/NetappManagerImpl.java +++ b/plugins/file-systems/netapp/src/com/cloud/netapp/NetappManagerImpl.java @@ -662,7 +662,7 @@ public class NetappManagerImpl implements NetappManager lun = _lunDao.persist(lun); //Lun id created: 6 digits right justified eg. 000045 - String lunIdStr = lun.getId().toString(); + String lunIdStr = String.valueOf(lun.getId()); String zeroStr = "000000"; int length = lunIdStr.length(); int offset = 6-length; diff --git a/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java b/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java index 241b114589a..c302cdd293f 100755 --- a/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java +++ b/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java @@ -36,7 +36,6 @@ import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.offering.ServiceOffering; import com.cloud.resource.ResourceManager; -import com.cloud.utils.component.ComponentLocator; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -53,39 +52,39 @@ public class RandomAllocator implements HostAllocator { ExcludeList avoid, int returnUpTo) { return allocateTo(vmProfile, plan, type, avoid, returnUpTo, true); } - + @Override public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan plan, Type type, - ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity) { + ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity) { + + long dcId = plan.getDataCenterId(); + Long podId = plan.getPodId(); + Long clusterId = plan.getClusterId(); + ServiceOffering offering = vmProfile.getServiceOffering(); + + List suitableHosts = new ArrayList(); - long dcId = plan.getDataCenterId(); - Long podId = plan.getPodId(); - Long clusterId = plan.getClusterId(); - ServiceOffering offering = vmProfile.getServiceOffering(); - - List suitableHosts = new ArrayList(); - if (type == Host.Type.Storage) { return suitableHosts; } String hostTag = offering.getHostTag(); if(hostTag != null){ - s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having host tag:" + hostTag); + s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having host tag:" + hostTag); }else{ - s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); + s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); } // list all computing hosts, regardless of whether they support routing...it's random after all List hosts = new ArrayList(); if(hostTag != null){ - hosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTag); + hosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTag); }else{ - hosts = _resourceMgr.listAllUpAndEnabledHosts(type, clusterId, podId, dcId); + hosts = _resourceMgr.listAllUpAndEnabledHosts(type, clusterId, podId, dcId); } - + s_logger.debug("Random Allocator found " + hosts.size() + " hosts"); - + if (hosts.size() == 0) { return suitableHosts; } @@ -93,12 +92,12 @@ public class RandomAllocator implements HostAllocator { Collections.shuffle(hosts); for (Host host : hosts) { - if(suitableHosts.size() == returnUpTo){ - break; - } - + if(suitableHosts.size() == returnUpTo){ + break; + } + if (!avoid.shouldAvoid(host)) { - suitableHosts.add(host); + suitableHosts.add(host); }else{ if (s_logger.isDebugEnabled()) { s_logger.debug("Host name: " + host.getName() + ", hostId: "+ host.getId() +" is in avoid set, skipping this and trying other available hosts"); @@ -121,7 +120,7 @@ public class RandomAllocator implements HostAllocator { @Override public boolean configure(String name, Map params) { _name=name; - + return true; } diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index b52e2d8a0b0..af5b2a4c33b 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -25,9 +25,7 @@ import java.io.FileReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; -import java.lang.reflect.InvocationTargetException; import java.net.InetAddress; -import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.net.URLConnection; @@ -44,15 +42,14 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; -import java.util.regex.Pattern; -import java.util.regex.Matcher; -import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import javax.ejb.Local; import javax.naming.ConfigurationException; @@ -167,7 +164,13 @@ import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.api.to.VolumeTO; +import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource; +import com.cloud.dc.Vlan; +import com.cloud.exception.InternalErrorException; +import com.cloud.host.Host.Type; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.kvm.resource.KVMHABase.NfsStoragePool; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ClockDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ConsoleDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.CpuTuneDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DevicesDef; @@ -182,16 +185,10 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef.hostNicType; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.SerialDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.TermPolicy; -import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ClockDef; -import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource; import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk.PhysicalDiskFormat; import com.cloud.hypervisor.kvm.storage.KVMStoragePool; import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; -import com.cloud.dc.Vlan; -import com.cloud.exception.InternalErrorException; -import com.cloud.host.Host.Type; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.IsolationType; import com.cloud.network.Networks.RouterPrivateIpStrategy; @@ -199,6 +196,7 @@ import com.cloud.network.Networks.TrafficType; import com.cloud.network.PhysicalNetworkSetupInfo; import com.cloud.resource.ServerResource; import com.cloud.resource.ServerResourceBase; +import com.cloud.storage.JavaStorageLayer; import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.StoragePoolType; @@ -212,7 +210,6 @@ import com.cloud.storage.template.TemplateLocation; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.script.OutputInterpreter; @@ -246,7 +243,7 @@ import com.cloud.vm.VirtualMachineName; **/ @Local(value = { ServerResource.class }) public class LibvirtComputingResource extends ServerResourceBase implements - ServerResource { +ServerResource { private static final Logger s_logger = Logger .getLogger(LibvirtComputingResource.class); @@ -329,8 +326,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements private boolean _can_bridge_firewall; protected String _localStoragePath; protected String _localStorageUUID; - private Map _pifs = new HashMap(); - private Map> hostNetInfo = new HashMap>(); + private final Map _pifs = new HashMap(); + private final Map> hostNetInfo = new HashMap>(); private final Map _vmStats = new ConcurrentHashMap(); protected boolean _disconnected = true; @@ -375,7 +372,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements tokens[3] = Integer.toString(lastbyte); StringBuilder end = new StringBuilder(15); end.append(tokens[0]).append(".").append(tokens[1]).append(".") - .append(tokens[2]).append(".").append(tokens[3]); + .append(tokens[2]).append(".").append(tokens[3]); return end.toString(); } @@ -444,16 +441,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements return false; } - try { - Class clazz = Class - .forName("com.cloud.storage.JavaStorageLayer"); - _storage = (StorageLayer) ComponentLocator.inject(clazz); - _storage.configure("StorageLayer", params); - } catch (ClassNotFoundException e) { - throw new ConfigurationException("Unable to find class " - + "com.cloud.storage.JavaStorageLayer"); - } - + _storage = new JavaStorageLayer(); + _storage.configure("StorageLayer", params); String domrScriptsDir = (String) params.get("domr.scripts.dir"); if (domrScriptsDir == null) { @@ -685,7 +674,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements String[] isoPaths = { "/usr/lib64/cloud/agent/vms/systemvm.iso", "/usr/lib/cloud/agent/vms/systemvm.iso", "/usr/lib64/cloud/common/vms/systemvm.iso", - "/usr/lib/cloud/common/vms/systemvm.iso" }; + "/usr/lib/cloud/common/vms/systemvm.iso" }; for (String isoPath : isoPaths) { if (_storage.exists(isoPath)) { _sysvmISOPath = isoPath; @@ -723,7 +712,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements if (_mountPoint == null) { _mountPoint = "/mnt"; } - + value = (String) params.get("vm.migrate.speed"); _migrateSpeed = NumbersUtil.parseInt(value, -1); if (_migrateSpeed == -1) { @@ -736,7 +725,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements try { _migrateSpeed = Integer.parseInt(tokens[0]); } catch (Exception e) { - + } s_logger.debug("device " + _pifs.get("public") + " has speed: " + String.valueOf(_migrateSpeed)); } @@ -750,28 +739,28 @@ public class LibvirtComputingResource extends ServerResourceBase implements bridges.put("private", _privBridgeName); bridges.put("guest", _guestBridgeName); - params.put("libvirt.host.bridges", (Object) bridges); - params.put("libvirt.host.pifs", (Object) _pifs); + params.put("libvirt.host.bridges", bridges); + params.put("libvirt.host.pifs", _pifs); // Load the vif driver String vifDriverName = (String) params.get("libvirt.vif.driver"); if (vifDriverName == null) { - s_logger.info("No libvirt.vif.driver specififed. Defaults to BridgeVifDriver."); - vifDriverName = "com.cloud.hypervisor.kvm.resource.BridgeVifDriver"; + s_logger.info("No libvirt.vif.driver specififed. Defaults to BridgeVifDriver."); + vifDriverName = "com.cloud.hypervisor.kvm.resource.BridgeVifDriver"; } - params.put("libvirt.computing.resource", (Object) this); + params.put("libvirt.computing.resource", this); try { - Class clazz = Class.forName(vifDriverName); - _vifDriver = (VifDriver) clazz.newInstance(); - _vifDriver.configure(params); + Class clazz = Class.forName(vifDriverName); + _vifDriver = (VifDriver) clazz.newInstance(); + _vifDriver.configure(params); } catch (ClassNotFoundException e) { - throw new ConfigurationException("Unable to find class for libvirt.vif.driver " + e); + throw new ConfigurationException("Unable to find class for libvirt.vif.driver " + e); } catch (InstantiationException e) { - throw new ConfigurationException("Unable to instantiate class for libvirt.vif.driver " + e); + throw new ConfigurationException("Unable to instantiate class for libvirt.vif.driver " + e); } catch (Exception e) { - throw new ConfigurationException("Failed to initialize libvirt.vif.driver " + e); + throw new ConfigurationException("Failed to initialize libvirt.vif.driver " + e); } @@ -802,7 +791,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements String vlan = Script.runSimpleBashScript("ls /proc/net/vlan/" + pif); if (vlan != null && !vlan.isEmpty()) { - pif = Script.runSimpleBashScript("grep ^Device\\: /proc/net/vlan/" + pif + " | awk {'print $2'}"); + pif = Script.runSimpleBashScript("grep ^Device\\: /proc/net/vlan/" + pif + " | awk {'print $2'}"); } return pif; @@ -1105,8 +1094,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements KVMStoragePool secondaryStoragePool = null; try { KVMStoragePool primaryPool = _storagePoolMgr.getStoragePool( - pool.getType(), - pool.getUuid()); + pool.getType(), + pool.getUuid()); String volumeName = UUID.randomUUID().toString(); if (copyToSecondary) { @@ -1116,20 +1105,20 @@ public class LibvirtComputingResource extends ServerResourceBase implements String volumeDestPath = "/volumes/" + cmd.getVolumeId() + File.separator; secondaryStoragePool = _storagePoolMgr.getStoragePoolByURI( - secondaryStorageUrl); + secondaryStorageUrl); secondaryStoragePool.createFolder(volumeDestPath); secondaryStoragePool.delete(); secondaryStoragePool = _storagePoolMgr.getStoragePoolByURI( - secondaryStorageUrl - + volumeDestPath); + secondaryStorageUrl + + volumeDestPath); _storagePoolMgr.copyPhysicalDisk(volume, destVolumeName,secondaryStoragePool); return new CopyVolumeAnswer(cmd, true, null, null, volumeName); } else { volumePath = "/volumes/" + cmd.getVolumeId() + File.separator; secondaryStoragePool = _storagePoolMgr.getStoragePoolByURI( - secondaryStorageUrl - + volumePath); + secondaryStorageUrl + + volumePath); KVMPhysicalDisk volume = secondaryStoragePool .getPhysicalDisk(cmd.getVolumePath() + ".qcow2"); _storagePoolMgr.copyPhysicalDisk(volume, volumeName, @@ -1148,7 +1137,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements protected Answer execute(DeleteStoragePoolCommand cmd) { try { _storagePoolMgr.deleteStoragePool(cmd.getPool().getType(), - cmd.getPool().getUuid()); + cmd.getPool().getUuid()); return new Answer(cmd); } catch (CloudRuntimeException e) { return new Answer(cmd, false, e.toString()); @@ -1190,7 +1179,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements long disksize; try { primaryPool = _storagePoolMgr.getStoragePool(pool.getType(), - pool.getUuid()); + pool.getUuid()); disksize = dskch.getSize(); if (cmd.getTemplateUrl() != null) { @@ -1199,7 +1188,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements } else { BaseVol = primaryPool.getPhysicalDisk(cmd.getTemplateUrl()); vol = _storagePoolMgr.createDiskFromTemplate(BaseVol, UUID - .randomUUID().toString(), primaryPool); + .randomUUID().toString(), primaryPool); } if (vol == null) { return new Answer(cmd, false, @@ -1273,8 +1262,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements try { KVMStoragePool pool = _storagePoolMgr.getStoragePool( - vol.getPoolType(), - vol.getPoolUuid()); + vol.getPoolType(), + vol.getPoolUuid()); pool.deletePhysicalDisk(vol.getPath()); String vmName = cmd.getVmName(); String poolPath = pool.getLocalPath(); @@ -1289,7 +1278,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements _storagePoolMgr.deleteVbdByPath(vol.getPoolType(),patchVbd.getAbsolutePath()); } catch(CloudRuntimeException e) { s_logger.warn("unable to destroy patch disk '" + patchVbd.getAbsolutePath() + - "' while removing root disk for " + vmName + " : " + e); + "' while removing root disk for " + vmName + " : " + e); } } else { s_logger.debug("file '" +patchVbd.getAbsolutePath()+ "' not found"); @@ -1425,7 +1414,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements String dev = "eth" + nic.getDeviceId(); String netmask = NetUtils.getSubNet(routerGIP, nic.getNetmask()); String result = _virtRouterResource.assignGuestNetwork(dev, routerIP, - routerGIP, gateway, cidr, netmask, dns, domainName ); + routerGIP, gateway, cidr, netmask, dns, domainName ); if (result != null) { return new SetupGuestNetworkAnswer(cmd, false, "Creating guest network failed due to " + result); @@ -1461,7 +1450,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements String rule = sb.toString(); String result = _virtRouterResource.assignNetworkACL(routerIp, - dev, nic.getIp(), netmask, rule); + dev, nic.getIp(), netmask, rule); if (result != null) { for (int i=0; i < results.length; i++) { @@ -1492,21 +1481,21 @@ public class LibvirtComputingResource extends ServerResourceBase implements List pluggedNics = getInterfaces(conn, routerName); for (InterfaceDef pluggedNic : pluggedNics) { - String pluggedVlanBr = pluggedNic.getBrName(); - String pluggedVlanId = getVlanIdFromBridge(pluggedVlanBr); - if (pubVlan.equalsIgnoreCase(Vlan.UNTAGGED) - && pluggedVlanBr.equalsIgnoreCase(_publicBridgeName)) { - break; - } else if (pluggedVlanBr.equalsIgnoreCase(_linkLocalBridgeName)){ - /*skip over, no physical bridge device exists*/ - } else if (pluggedVlanId == null) { - /*this should only be true in the case of link local bridge*/ - return new SetSourceNatAnswer(cmd, false, "unable to find the vlan id for bridge "+pluggedVlanBr+ - " when attempting to set up" + pubVlan + " on router " + routerName); - } else if (pluggedVlanId.equals(pubVlan)) { - break; - } - devNum++; + String pluggedVlanBr = pluggedNic.getBrName(); + String pluggedVlanId = getVlanIdFromBridge(pluggedVlanBr); + if (pubVlan.equalsIgnoreCase(Vlan.UNTAGGED) + && pluggedVlanBr.equalsIgnoreCase(_publicBridgeName)) { + break; + } else if (pluggedVlanBr.equalsIgnoreCase(_linkLocalBridgeName)){ + /*skip over, no physical bridge device exists*/ + } else if (pluggedVlanId == null) { + /*this should only be true in the case of link local bridge*/ + return new SetSourceNatAnswer(cmd, false, "unable to find the vlan id for bridge "+pluggedVlanBr+ + " when attempting to set up" + pubVlan + " on router " + routerName); + } else if (pluggedVlanId.equals(pubVlan)) { + break; + } + devNum++; } String dev = "eth" + devNum; @@ -1544,8 +1533,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements vlanToNicNum.put("LinkLocal",devNum); } else if (pluggedVlan.equalsIgnoreCase(_publicBridgeName) - || pluggedVlan.equalsIgnoreCase(_privBridgeName) - || pluggedVlan.equalsIgnoreCase(_guestBridgeName)) { + || pluggedVlan.equalsIgnoreCase(_privBridgeName) + || pluggedVlan.equalsIgnoreCase(_guestBridgeName)) { vlanToNicNum.put(Vlan.UNTAGGED,devNum); } else { @@ -1560,7 +1549,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements String netmask = Long.toString(NetUtils.getCidrSize(ip.getVlanNetmask())); String subnet = NetUtils.getSubNet(ip.getPublicIp(), ip.getVlanNetmask()); _virtRouterResource.assignVpcIpToRouter(routerIP, ip.isAdd(), ip.getPublicIp(), - nicName, ip.getVlanGateway(), netmask, subnet); + nicName, ip.getVlanGateway(), netmask, subnet); results[i++] = ip.getPublicIp() + " - success"; } @@ -1587,14 +1576,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements if (nic.getBrName().equalsIgnoreCase(_linkLocalBridgeName)) { vlanAllocatedToVM.put("LinkLocal", nicPos); } else { - if (nic.getBrName().equalsIgnoreCase(_publicBridgeName) - || nic.getBrName().equalsIgnoreCase(_privBridgeName) - || nic.getBrName().equalsIgnoreCase(_guestBridgeName)) { - vlanAllocatedToVM.put(Vlan.UNTAGGED, nicPos); - } else { - String vlanId = getVlanIdFromBridge(nic.getBrName()); - vlanAllocatedToVM.put(vlanId, nicPos); - } + if (nic.getBrName().equalsIgnoreCase(_publicBridgeName) + || nic.getBrName().equalsIgnoreCase(_privBridgeName) + || nic.getBrName().equalsIgnoreCase(_guestBridgeName)) { + vlanAllocatedToVM.put(Vlan.UNTAGGED, nicPos); + } else { + String vlanId = getVlanIdFromBridge(nic.getBrName()); + vlanAllocatedToVM.put(vlanId, nicPos); + } } nicPos++; } @@ -1649,13 +1638,13 @@ public class LibvirtComputingResource extends ServerResourceBase implements } KVMStoragePool primaryPool = _storagePoolMgr.getStoragePool( - cmd.getPool().getType(), - cmd.getPool().getUuid()); + cmd.getPool().getType(), + cmd.getPool().getUuid()); if (primaryPool.getType() == StoragePoolType.RBD) { s_logger.debug("Snapshots are not supported on RBD volumes"); return new ManageSnapshotAnswer(cmd, false, - "Snapshots are not supported on RBD volumes"); + "Snapshots are not supported on RBD volumes"); } KVMPhysicalDisk disk = primaryPool.getPhysicalDisk(cmd @@ -1728,7 +1717,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Connect conn = LibvirtConnection.getConnection(); secondaryStoragePool = _storagePoolMgr.getStoragePoolByURI( - secondaryStoragePoolUrl); + secondaryStoragePoolUrl); String ssPmountPath = secondaryStoragePool.getLocalPath(); snapshotRelPath = File.separator + "snapshots" + File.separator @@ -1739,8 +1728,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements + File.separator + dcId + File.separator + accountId + File.separator + volumeId; KVMStoragePool primaryPool = _storagePoolMgr.getStoragePool( - cmd.getPool().getType(), - cmd.getPrimaryStoragePoolNameLabel()); + cmd.getPool().getType(), + cmd.getPrimaryStoragePoolNameLabel()); KVMPhysicalDisk snapshotDisk = primaryPool.getPhysicalDisk(cmd .getVolumePath()); Script command = new Script(_manageSnapshotPath, _cmdsTimeout, @@ -1768,8 +1757,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements } KVMStoragePool primaryStorage = _storagePoolMgr.getStoragePool( - cmd.getPool().getType(), - cmd.getPool().getUuid()); + cmd.getPool().getType(), + cmd.getPool().getUuid()); if (state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING && !primaryStorage.isExternalSnapshot()) { String vmUuid = vm.getUUIDString(); @@ -1853,7 +1842,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements KVMStoragePool secondaryStoragePool = null; try { secondaryStoragePool = _storagePoolMgr.getStoragePoolByURI(cmd - .getSecondaryStorageUrl()); + .getSecondaryStorageUrl()); String ssPmountPath = secondaryStoragePool.getLocalPath(); String snapshotDestPath = ssPmountPath + File.separator @@ -1884,15 +1873,15 @@ public class LibvirtComputingResource extends ServerResourceBase implements int index = snapshotPath.lastIndexOf("/"); snapshotPath = snapshotPath.substring(0, index); KVMStoragePool secondaryPool = _storagePoolMgr.getStoragePoolByURI( - cmd.getSecondaryStorageUrl() - + snapshotPath); + cmd.getSecondaryStorageUrl() + + snapshotPath); KVMPhysicalDisk snapshot = secondaryPool.getPhysicalDisk(cmd .getSnapshotName()); String primaryUuid = cmd.getPrimaryStoragePoolNameLabel(); KVMStoragePool primaryPool = _storagePoolMgr .getStoragePool(cmd.getPool().getType(), - primaryUuid); + primaryUuid); String volUuid = UUID.randomUUID().toString(); KVMPhysicalDisk disk = _storagePoolMgr.copyPhysicalDisk(snapshot, volUuid, primaryPool); @@ -1928,7 +1917,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements .getSnapshotName()); secondaryPool = _storagePoolMgr.getStoragePoolByURI( - cmd.getSecondaryStorageUrl()); + cmd.getSecondaryStorageUrl()); String templatePath = secondaryPool.getLocalPath() + File.separator + templateInstallFolder; @@ -1978,8 +1967,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements protected GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) { try { KVMStoragePool sp = _storagePoolMgr.getStoragePool( - cmd.getPooltype(), - cmd.getStorageId()); + cmd.getPooltype(), + cmd.getStorageId()); return new GetStorageStatsAnswer(cmd, sp.getCapacity(), sp.getUsed()); } catch (CloudRuntimeException e) { @@ -1999,11 +1988,11 @@ public class LibvirtComputingResource extends ServerResourceBase implements String templateInstallFolder = "/template/tmpl/" + templateFolder; secondaryStorage = _storagePoolMgr.getStoragePoolByURI( - secondaryStorageURL); + secondaryStorageURL); KVMStoragePool primary = _storagePoolMgr.getStoragePool( - cmd.getPool().getType(), - cmd.getPrimaryStoragePoolNameLabel()); + cmd.getPool().getType(), + cmd.getPrimaryStoragePoolNameLabel()); KVMPhysicalDisk disk = primary.getPhysicalDisk(cmd.getVolumePath()); String tmpltPath = secondaryStorage.getLocalPath() + File.separator + templateInstallFolder; @@ -2024,12 +2013,12 @@ public class LibvirtComputingResource extends ServerResourceBase implements } else { s_logger.debug("Converting RBD disk " + disk.getPath() + " into template " + cmd.getUniqueName()); Script.runSimpleBashScript("qemu-img convert" - + " -f raw -O qcow2 " - + KVMPhysicalDisk.RBDStringBuilder(primary.getSourceHost(), - primary.getSourcePort(), - primary.getAuthUserName(), - primary.getAuthSecret(), - disk.getPath()) + + " -f raw -O qcow2 " + + KVMPhysicalDisk.RBDStringBuilder(primary.getSourceHost(), + primary.getSourcePort(), + primary.getAuthUserName(), + primary.getAuthSecret(), + disk.getPath()) + " " + tmpltPath + "/" + cmd.getUniqueName() + ".qcow2"); File templateProp = new File(tmpltPath + "/template.properties"); if (!templateProp.exists()) { @@ -2126,8 +2115,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements /* Copy volume to primary storage */ KVMStoragePool primaryPool = _storagePoolMgr.getStoragePool( - cmd.getPool().getType(), - cmd.getPoolUuid()); + cmd.getPool().getType(), + cmd.getPoolUuid()); KVMPhysicalDisk primaryVol = _storagePoolMgr.copyPhysicalDisk( tmplVol, UUID.randomUUID().toString(), primaryPool); @@ -2233,7 +2222,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements final StringBuffer sb = new StringBuffer(); sb.append("http://").append(proxyManagementIp).append(":" + cmdPort) - .append("/cmd/getstatus"); + .append("/cmd/getstatus"); boolean success = true; try { @@ -2291,8 +2280,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements try { Connect conn = LibvirtConnection.getConnection(); KVMStoragePool primary = _storagePoolMgr.getStoragePool( - cmd.getPooltype(), - cmd.getPoolUuid()); + cmd.getPooltype(), + cmd.getPoolUuid()); KVMPhysicalDisk disk = primary.getPhysicalDisk(cmd.getVolumePath()); attachOrDetachDisk(conn, cmd.getAttach(), cmd.getVmName(), disk, cmd.getDeviceId().intValue()); @@ -2364,10 +2353,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements private Answer execute(PingTestCommand cmd) { String result = null; final String computingHostIp = cmd.getComputingHostIp(); // TODO, split - // the - // command - // into 2 - // types + // the + // command + // into 2 + // types if (computingHostIp != null) { result = doPingTest(computingHostIp); @@ -2507,7 +2496,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements final Script cpuScript = new Script("/bin/bash", s_logger); cpuScript.add("-c"); cpuScript - .add("idle=$(top -b -n 1|grep Cpu\\(s\\):|cut -d% -f4|cut -d, -f2);echo $idle"); + .add("idle=$(top -b -n 1|grep Cpu\\(s\\):|cut -d% -f4|cut -d, -f2);echo $idle"); final OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); String result = cpuScript.execute(parser); @@ -2521,7 +2510,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements final Script memScript = new Script("/bin/bash", s_logger); memScript.add("-c"); memScript - .add("freeMem=$(free|grep cache:|awk '{print $4}');echo $freeMem"); + .add("freeMem=$(free|grep cache:|awk '{print $4}');echo $freeMem"); final OutputInterpreter.OneLineParser Memparser = new OutputInterpreter.OneLineParser(); result = memScript.execute(Memparser); if (result != null) { @@ -2721,7 +2710,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements script.add("-m","700"); script.add(_SSHKEYSPATH); script.execute(); - + if(!sshKeysDir.exists()) { s_logger.debug("failed to create directory " + _SSHKEYSPATH); } @@ -2903,7 +2892,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements for (NicTO nic : nics) { if (nic.getIsolationUri() != null && nic.getIsolationUri().getScheme() - .equalsIgnoreCase(IsolationType.Ec2.toString())) { + .equalsIgnoreCase(IsolationType.Ec2.toString())) { if (vmSpec.getType() != VirtualMachine.Type.User) { default_network_rules_for_systemvm(conn, vmName); break; @@ -2940,7 +2929,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements String path = isoPath.substring(0, index); String name = isoPath.substring(index + 1); KVMStoragePool secondaryPool = _storagePoolMgr.getStoragePoolByURI( - path); + path); KVMPhysicalDisk isoVol = secondaryPool.getPhysicalDisk(name); return isoVol.getPath(); } else { @@ -2958,7 +2947,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements return arg0.getDeviceId() > arg1.getDeviceId() ? 1 : -1; } }); - + for (VolumeTO volume : disks) { KVMPhysicalDisk physicalDisk = null; KVMStoragePool pool = null; @@ -2968,12 +2957,12 @@ public class LibvirtComputingResource extends ServerResourceBase implements String volDir = volPath.substring(0, index); String volName = volPath.substring(index + 1); KVMStoragePool secondaryStorage = _storagePoolMgr. - getStoragePoolByURI(volDir); + getStoragePoolByURI(volDir); physicalDisk = secondaryStorage.getPhysicalDisk(volName); } else if (volume.getType() != Volume.Type.ISO) { pool = _storagePoolMgr.getStoragePool( - volume.getPoolType(), - volume.getPoolUuid()); + volume.getPoolType(), + volume.getPoolUuid()); physicalDisk = pool.getPhysicalDisk(volume.getPath()); } @@ -2999,23 +2988,23 @@ public class LibvirtComputingResource extends ServerResourceBase implements For RBD pools we use the secret mechanism in libvirt. We store the secret under the UUID of the pool, that's why we pass the pool's UUID as the authSecret - */ + */ disk.defNetworkBasedDisk(physicalDisk.getPath().replace("rbd:", ""), pool.getSourceHost(), pool.getSourcePort(), - pool.getAuthUserName(), pool.getUuid(), - devId, diskBusType, diskProtocol.RBD); + pool.getAuthUserName(), pool.getUuid(), + devId, diskBusType, diskProtocol.RBD); } else if (pool.getType() == StoragePoolType.CLVM) { disk.defBlockBasedDisk(physicalDisk.getPath(), devId, - diskBusType); + diskBusType); } else { if (volume.getType() == Volume.Type.DATADISK) { - disk.defFileBasedDisk(physicalDisk.getPath(), devId, - DiskDef.diskBus.VIRTIO, - DiskDef.diskFmtType.QCOW2); - } else { - disk.defFileBasedDisk(physicalDisk.getPath(), devId, diskBusType, DiskDef.diskFmtType.QCOW2); - } + disk.defFileBasedDisk(physicalDisk.getPath(), devId, + DiskDef.diskBus.VIRTIO, + DiskDef.diskFmtType.QCOW2); + } else { + disk.defFileBasedDisk(physicalDisk.getPath(), devId, diskBusType, DiskDef.diskFmtType.QCOW2); + } - } + } } @@ -3052,8 +3041,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements VolumeTO rootVol = getVolume(vmSpec, Volume.Type.ROOT); String patchName = vmName + "-patchdisk"; KVMStoragePool pool = _storagePoolMgr.getStoragePool( - rootVol.getPoolType(), - rootVol.getPoolUuid()); + rootVol.getPoolType(), + rootVol.getPoolUuid()); String patchDiskPath = pool.getLocalPath() + "/" + patchName; List phyDisks = pool.listPhysicalDisks(); @@ -3069,7 +3058,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements if (!foundDisk) { s_logger.debug("generating new patch disk for " + vmName + " since none was found"); KVMPhysicalDisk disk = pool.createPhysicalDisk(patchName, KVMPhysicalDisk.PhysicalDiskFormat.RAW, - 10L * 1024 * 1024); + 10L * 1024 * 1024); } else { s_logger.debug("found existing patch disk at " + patchDiskPath + " using it for " + vmName); } @@ -3091,9 +3080,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements patchDisk.defBlockBasedDisk(patchDiskPath, 1, rootDisk.getBusType()); } else { patchDisk.defFileBasedDisk(patchDiskPath, 1, rootDisk.getBusType(), - DiskDef.diskFmtType.RAW); + DiskDef.diskFmtType.RAW); } - + disks.add(patchDisk); String bootArgs = vmSpec.getBootArgs(); @@ -3162,14 +3151,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements protected synchronized String attachOrDetachISO(Connect conn, String vmName, String isoPath, boolean isAttach) - throws LibvirtException, URISyntaxException, InternalErrorException { + throws LibvirtException, URISyntaxException, InternalErrorException { String isoXml = null; if (isoPath != null && isAttach) { int index = isoPath.lastIndexOf("/"); String path = isoPath.substring(0, index); String name = isoPath.substring(index + 1); KVMStoragePool secondaryPool = _storagePoolMgr.getStoragePoolByURI( - path); + path); KVMPhysicalDisk isoVol = secondaryPool.getPhysicalDisk(name); isoPath = isoVol.getPath(); @@ -3691,9 +3680,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements info.add(ram); info.add(cap); long dom0ram = Math.min(ram / 10, 768 * 1024 * 1024L);// save a maximum - // of 10% of - // system ram or - // 768M + // of 10% of + // system ram or + // 768M dom0ram = Math.max(dom0ram, _dom0MinMem); info.add(dom0ram); s_logger.debug("cpus=" + cpus + ", speed=" + speed + ", ram=" + ram @@ -4162,7 +4151,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements NodeInfo node = conn.nodeInfo(); utilization = utilization / node.cpus; if(utilization > 0){ - stats.setCPUUtilization(utilization * 100); + stats.setCPUUtilization(utilization * 100); } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java index f6bc8fc7fee..0a9f93f6497 100755 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java @@ -29,6 +29,7 @@ import java.util.concurrent.TimeUnit; import java.util.regex.PatternSyntaxException; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; @@ -43,9 +44,6 @@ import com.cloud.agent.api.GetHostStatsCommand; import com.cloud.agent.api.HostStatsEntry; import com.cloud.agent.api.MaintainAnswer; import com.cloud.agent.api.PingTestCommand; -import com.cloud.agent.api.PrepareForMigrationAnswer; -import com.cloud.agent.api.PrepareForMigrationCommand; -import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.dc.dao.HostPodDao; import com.cloud.host.Host; import com.cloud.resource.AgentResourceBase; @@ -58,7 +56,6 @@ import com.cloud.simulator.MockVMVO; import com.cloud.simulator.dao.MockHostDao; import com.cloud.simulator.dao.MockVMDao; import com.cloud.utils.Pair; -import com.cloud.utils.component.Inject; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; @@ -67,393 +64,393 @@ import com.cloud.utils.net.NetUtils; @Local(value = { MockAgentManager.class }) public class MockAgentManagerImpl implements MockAgentManager { - private static final Logger s_logger = Logger.getLogger(MockAgentManagerImpl.class); - @Inject - HostPodDao _podDao = null; - @Inject - MockHostDao _mockHostDao = null; - @Inject - MockVMDao _mockVmDao = null; - @Inject - SimulatorManager _simulatorMgr = null; - @Inject - AgentManager _agentMgr = null; - @Inject - MockStorageManager _storageMgr = null; - @Inject - ResourceManager _resourceMgr; - private SecureRandom random; - private Map _resources = new ConcurrentHashMap(); - private ThreadPoolExecutor _executor; + private static final Logger s_logger = Logger.getLogger(MockAgentManagerImpl.class); + @Inject + HostPodDao _podDao = null; + @Inject + MockHostDao _mockHostDao = null; + @Inject + MockVMDao _mockVmDao = null; + @Inject + SimulatorManager _simulatorMgr = null; + @Inject + AgentManager _agentMgr = null; + @Inject + MockStorageManager _storageMgr = null; + @Inject + ResourceManager _resourceMgr; + private SecureRandom random; + private final Map _resources = new ConcurrentHashMap(); + private ThreadPoolExecutor _executor; - private Pair getPodCidr(long podId, long dcId) { - try { + private Pair getPodCidr(long podId, long dcId) { + try { - HashMap> podMap = _podDao.getCurrentPodCidrSubnets(dcId, 0); - List cidrPair = podMap.get(podId); - String cidrAddress = (String) cidrPair.get(0); - Long cidrSize = (Long) cidrPair.get(1); - return new Pair(cidrAddress, cidrSize); - } catch (PatternSyntaxException e) { - s_logger.error("Exception while splitting pod cidr"); - return null; - } catch (IndexOutOfBoundsException e) { - s_logger.error("Invalid pod cidr. Please check"); - return null; - } - } + HashMap> podMap = _podDao.getCurrentPodCidrSubnets(dcId, 0); + List cidrPair = podMap.get(podId); + String cidrAddress = (String) cidrPair.get(0); + Long cidrSize = (Long) cidrPair.get(1); + return new Pair(cidrAddress, cidrSize); + } catch (PatternSyntaxException e) { + s_logger.error("Exception while splitting pod cidr"); + return null; + } catch (IndexOutOfBoundsException e) { + s_logger.error("Invalid pod cidr. Please check"); + return null; + } + } - private String getIpAddress(long instanceId, long dcId, long podId) { - Pair cidr = this.getPodCidr(podId, dcId); - return NetUtils.long2Ip(NetUtils.ip2Long(cidr.first()) + instanceId); - } + private String getIpAddress(long instanceId, long dcId, long podId) { + Pair cidr = this.getPodCidr(podId, dcId); + return NetUtils.long2Ip(NetUtils.ip2Long(cidr.first()) + instanceId); + } - private String getMacAddress(long dcId, long podId, long clusterId, int instanceId) { - return NetUtils.long2Mac((dcId << 40 + podId << 32 + clusterId << 24 + instanceId)); - } + private String getMacAddress(long dcId, long podId, long clusterId, int instanceId) { + return NetUtils.long2Mac((dcId << 40 + podId << 32 + clusterId << 24 + instanceId)); + } - public synchronized int getNextAgentId(long cidrSize) { - return random.nextInt((int) cidrSize); - } + public synchronized int getNextAgentId(long cidrSize) { + return random.nextInt((int) cidrSize); + } - @Override - @DB - public Map> createServerResources(Map params) { + @Override + @DB + public Map> createServerResources(Map params) { - Map args = new HashMap(); - Map> newResources = new HashMap>(); - AgentResourceBase agentResource; - long cpuCore = Long.parseLong((String) params.get("cpucore")); - long cpuSpeed = Long.parseLong((String) params.get("cpuspeed")); - long memory = Long.parseLong((String) params.get("memory")); - long localStorageSize = Long.parseLong((String) params.get("localstorage")); - synchronized (this) { - long dataCenterId = Long.parseLong((String) params.get("zone")); - long podId = Long.parseLong((String) params.get("pod")); - long clusterId = Long.parseLong((String) params.get("cluster")); - long cidrSize = getPodCidr(podId, dataCenterId).second(); + Map args = new HashMap(); + Map> newResources = new HashMap>(); + AgentResourceBase agentResource; + long cpuCore = Long.parseLong((String) params.get("cpucore")); + long cpuSpeed = Long.parseLong((String) params.get("cpuspeed")); + long memory = Long.parseLong((String) params.get("memory")); + long localStorageSize = Long.parseLong((String) params.get("localstorage")); + synchronized (this) { + long dataCenterId = Long.parseLong((String) params.get("zone")); + long podId = Long.parseLong((String) params.get("pod")); + long clusterId = Long.parseLong((String) params.get("cluster")); + long cidrSize = getPodCidr(podId, dataCenterId).second(); - int agentId = getNextAgentId(cidrSize); - String ipAddress = getIpAddress(agentId, dataCenterId, podId); - String macAddress = getMacAddress(dataCenterId, podId, clusterId, agentId); - MockHostVO mockHost = new MockHostVO(); - mockHost.setDataCenterId(dataCenterId); - mockHost.setPodId(podId); - mockHost.setClusterId(clusterId); - mockHost.setCapabilities("hvm"); - mockHost.setCpuCount(cpuCore); - mockHost.setCpuSpeed(cpuSpeed); - mockHost.setMemorySize(memory); - String guid = UUID.randomUUID().toString(); - mockHost.setGuid(guid); - mockHost.setName("SimulatedAgent." + guid); - mockHost.setPrivateIpAddress(ipAddress); - mockHost.setPublicIpAddress(ipAddress); - mockHost.setStorageIpAddress(ipAddress); - mockHost.setPrivateMacAddress(macAddress); - mockHost.setPublicMacAddress(macAddress); - mockHost.setStorageMacAddress(macAddress); - mockHost.setVersion(this.getClass().getPackage().getImplementationVersion()); - mockHost.setResource("com.cloud.agent.AgentRoutingResource"); + int agentId = getNextAgentId(cidrSize); + String ipAddress = getIpAddress(agentId, dataCenterId, podId); + String macAddress = getMacAddress(dataCenterId, podId, clusterId, agentId); + MockHostVO mockHost = new MockHostVO(); + mockHost.setDataCenterId(dataCenterId); + mockHost.setPodId(podId); + mockHost.setClusterId(clusterId); + mockHost.setCapabilities("hvm"); + mockHost.setCpuCount(cpuCore); + mockHost.setCpuSpeed(cpuSpeed); + mockHost.setMemorySize(memory); + String guid = UUID.randomUUID().toString(); + mockHost.setGuid(guid); + mockHost.setName("SimulatedAgent." + guid); + mockHost.setPrivateIpAddress(ipAddress); + mockHost.setPublicIpAddress(ipAddress); + mockHost.setStorageIpAddress(ipAddress); + mockHost.setPrivateMacAddress(macAddress); + mockHost.setPublicMacAddress(macAddress); + mockHost.setStorageMacAddress(macAddress); + mockHost.setVersion(this.getClass().getPackage().getImplementationVersion()); + mockHost.setResource("com.cloud.agent.AgentRoutingResource"); - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - mockHost = _mockHostDao.persist(mockHost); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - s_logger.error("Error while configuring mock agent " + ex.getMessage()); - throw new CloudRuntimeException("Error configuring agent", ex); - } finally { - txn.close(); + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + mockHost = _mockHostDao.persist(mockHost); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + s_logger.error("Error while configuring mock agent " + ex.getMessage()); + throw new CloudRuntimeException("Error configuring agent", ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - _storageMgr.getLocalStorage(guid, localStorageSize); + _storageMgr.getLocalStorage(guid, localStorageSize); - agentResource = new AgentRoutingResource(); - if (agentResource != null) { - try { - params.put("guid", mockHost.getGuid()); - agentResource.start(); - agentResource.configure(mockHost.getName(), params); + agentResource = new AgentRoutingResource(); + if (agentResource != null) { + try { + params.put("guid", mockHost.getGuid()); + agentResource.start(); + agentResource.configure(mockHost.getName(), params); - newResources.put(agentResource, args); - } catch (ConfigurationException e) { - s_logger.error("error while configuring server resource" + e.getMessage()); - } - } - } - return newResources; - } + newResources.put(agentResource, args); + } catch (ConfigurationException e) { + s_logger.error("error while configuring server resource" + e.getMessage()); + } + } + } + return newResources; + } - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - try { - random = SecureRandom.getInstance("SHA1PRNG"); - _executor = new ThreadPoolExecutor(1, 5, 1, TimeUnit.DAYS, new LinkedBlockingQueue(), - new NamedThreadFactory("Simulator-Agent-Mgr")); - // ComponentLocator locator = ComponentLocator.getCurrentLocator(); - // _simulatorMgr = (SimulatorManager) - // locator.getComponent(SimulatorManager.Name); - } catch (NoSuchAlgorithmException e) { - s_logger.debug("Failed to initialize random:" + e.toString()); - return false; - } - return true; - } + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + try { + random = SecureRandom.getInstance("SHA1PRNG"); + _executor = new ThreadPoolExecutor(1, 5, 1, TimeUnit.DAYS, new LinkedBlockingQueue(), + new NamedThreadFactory("Simulator-Agent-Mgr")); + // ComponentLocator locator = ComponentLocator.getCurrentLocator(); + // _simulatorMgr = (SimulatorManager) + // locator.getComponent(SimulatorManager.Name); + } catch (NoSuchAlgorithmException e) { + s_logger.debug("Failed to initialize random:" + e.toString()); + return false; + } + return true; + } - @Override - public boolean handleSystemVMStart(long vmId, String privateIpAddress, String privateMacAddress, - String privateNetMask, long dcId, long podId, String name, String vmType, String url) { - _executor.execute(new SystemVMHandler(vmId, privateIpAddress, privateMacAddress, privateNetMask, dcId, podId, - name, vmType, _simulatorMgr, url)); - return true; - } + @Override + public boolean handleSystemVMStart(long vmId, String privateIpAddress, String privateMacAddress, + String privateNetMask, long dcId, long podId, String name, String vmType, String url) { + _executor.execute(new SystemVMHandler(vmId, privateIpAddress, privateMacAddress, privateNetMask, dcId, podId, + name, vmType, _simulatorMgr, url)); + return true; + } - @Override - public boolean handleSystemVMStop(long vmId) { - _executor.execute(new SystemVMHandler(vmId)); - return true; - } + @Override + public boolean handleSystemVMStop(long vmId) { + _executor.execute(new SystemVMHandler(vmId)); + return true; + } - private class SystemVMHandler implements Runnable { - private long vmId; - private String privateIpAddress; - private String privateMacAddress; - private String privateNetMask; - private long dcId; - private long podId; - private String guid; - private String name; - private String vmType; - private SimulatorManager mgr; - private String mode; - private String url; + private class SystemVMHandler implements Runnable { + private final long vmId; + private String privateIpAddress; + private String privateMacAddress; + private String privateNetMask; + private long dcId; + private long podId; + private String guid; + private String name; + private String vmType; + private SimulatorManager mgr; + private final String mode; + private String url; - public SystemVMHandler(long vmId, String privateIpAddress, String privateMacAddress, String privateNetMask, - long dcId, long podId, String name, String vmType, SimulatorManager mgr, String url) { - this.vmId = vmId; - this.privateIpAddress = privateIpAddress; - this.privateMacAddress = privateMacAddress; - this.privateNetMask = privateNetMask; - this.dcId = dcId; - this.guid = "SystemVM-" + UUID.randomUUID().toString(); - this.name = name; - this.vmType = vmType; - this.mgr = mgr; - this.mode = "Start"; - this.url = url; - this.podId = podId; - } + public SystemVMHandler(long vmId, String privateIpAddress, String privateMacAddress, String privateNetMask, + long dcId, long podId, String name, String vmType, SimulatorManager mgr, String url) { + this.vmId = vmId; + this.privateIpAddress = privateIpAddress; + this.privateMacAddress = privateMacAddress; + this.privateNetMask = privateNetMask; + this.dcId = dcId; + this.guid = "SystemVM-" + UUID.randomUUID().toString(); + this.name = name; + this.vmType = vmType; + this.mgr = mgr; + this.mode = "Start"; + this.url = url; + this.podId = podId; + } - public SystemVMHandler(long vmId) { - this.vmId = vmId; - this.mode = "Stop"; - } + public SystemVMHandler(long vmId) { + this.vmId = vmId; + this.mode = "Stop"; + } - @Override - @DB - public void run() { + @Override + @DB + public void run() { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - if (this.mode.equalsIgnoreCase("Stop")) { - txn.start(); - MockHost host = _mockHostDao.findByVmId(this.vmId); - if (host != null) { - String guid = host.getGuid(); - if (guid != null) { - AgentResourceBase res = _resources.get(guid); - if (res != null) { - res.stop(); - _resources.remove(guid); - } - } - } - txn.commit(); - return; - } - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Unable to get host " + guid + " due to " + ex.getMessage(), ex); - } finally { - txn.close(); + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + if (this.mode.equalsIgnoreCase("Stop")) { + txn.start(); + MockHost host = _mockHostDao.findByVmId(this.vmId); + if (host != null) { + String guid = host.getGuid(); + if (guid != null) { + AgentResourceBase res = _resources.get(guid); + if (res != null) { + res.stop(); + _resources.remove(guid); + } + } + } + txn.commit(); + return; + } + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Unable to get host " + guid + " due to " + ex.getMessage(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - String resource = null; - if (vmType.equalsIgnoreCase("secstorage")) { - resource = "com.cloud.agent.AgentStorageResource"; - } - MockHostVO mockHost = new MockHostVO(); - mockHost.setDataCenterId(this.dcId); - mockHost.setPodId(this.podId); - mockHost.setCpuCount(DEFAULT_HOST_CPU_CORES); - mockHost.setCpuSpeed(DEFAULT_HOST_SPEED_MHZ); - mockHost.setMemorySize(DEFAULT_HOST_MEM_SIZE); - mockHost.setGuid(this.guid); - mockHost.setName(name); - mockHost.setPrivateIpAddress(this.privateIpAddress); - mockHost.setPublicIpAddress(this.privateIpAddress); - mockHost.setStorageIpAddress(this.privateIpAddress); - mockHost.setPrivateMacAddress(this.privateMacAddress); - mockHost.setPublicMacAddress(this.privateMacAddress); - mockHost.setStorageMacAddress(this.privateMacAddress); - mockHost.setVersion(this.getClass().getPackage().getImplementationVersion()); - mockHost.setResource(resource); - mockHost.setVmId(vmId); - Transaction simtxn = Transaction.open(Transaction.SIMULATOR_DB); - try { - simtxn.start(); - mockHost = _mockHostDao.persist(mockHost); - simtxn.commit(); - } catch (Exception ex) { - simtxn.rollback(); - throw new CloudRuntimeException("Unable to persist host " + mockHost.getGuid() + " due to " - + ex.getMessage(), ex); - } finally { - simtxn.close(); + String resource = null; + if (vmType.equalsIgnoreCase("secstorage")) { + resource = "com.cloud.agent.AgentStorageResource"; + } + MockHostVO mockHost = new MockHostVO(); + mockHost.setDataCenterId(this.dcId); + mockHost.setPodId(this.podId); + mockHost.setCpuCount(DEFAULT_HOST_CPU_CORES); + mockHost.setCpuSpeed(DEFAULT_HOST_SPEED_MHZ); + mockHost.setMemorySize(DEFAULT_HOST_MEM_SIZE); + mockHost.setGuid(this.guid); + mockHost.setName(name); + mockHost.setPrivateIpAddress(this.privateIpAddress); + mockHost.setPublicIpAddress(this.privateIpAddress); + mockHost.setStorageIpAddress(this.privateIpAddress); + mockHost.setPrivateMacAddress(this.privateMacAddress); + mockHost.setPublicMacAddress(this.privateMacAddress); + mockHost.setStorageMacAddress(this.privateMacAddress); + mockHost.setVersion(this.getClass().getPackage().getImplementationVersion()); + mockHost.setResource(resource); + mockHost.setVmId(vmId); + Transaction simtxn = Transaction.open(Transaction.SIMULATOR_DB); + try { + simtxn.start(); + mockHost = _mockHostDao.persist(mockHost); + simtxn.commit(); + } catch (Exception ex) { + simtxn.rollback(); + throw new CloudRuntimeException("Unable to persist host " + mockHost.getGuid() + " due to " + + ex.getMessage(), ex); + } finally { + simtxn.close(); simtxn = Transaction.open(Transaction.CLOUD_DB); simtxn.close(); - } + } - if (vmType.equalsIgnoreCase("secstorage")) { - AgentStorageResource storageResource = new AgentStorageResource(); - try { - Map params = new HashMap(); - Map details = new HashMap(); - params.put("guid", this.guid); - details.put("guid", this.guid); - storageResource.configure("secondaryStorage", params); - storageResource.start(); - // on the simulator the ssvm is as good as a direct - // agent - _resourceMgr.addHost(mockHost.getDataCenterId(), storageResource, Host.Type.SecondaryStorageVM, - details); - _resources.put(this.guid, storageResource); - } catch (ConfigurationException e) { - s_logger.debug("Failed to load secondary storage resource: " + e.toString()); - return; - } - } - } - } + if (vmType.equalsIgnoreCase("secstorage")) { + AgentStorageResource storageResource = new AgentStorageResource(); + try { + Map params = new HashMap(); + Map details = new HashMap(); + params.put("guid", this.guid); + details.put("guid", this.guid); + storageResource.configure("secondaryStorage", params); + storageResource.start(); + // on the simulator the ssvm is as good as a direct + // agent + _resourceMgr.addHost(mockHost.getDataCenterId(), storageResource, Host.Type.SecondaryStorageVM, + details); + _resources.put(this.guid, storageResource); + } catch (ConfigurationException e) { + s_logger.debug("Failed to load secondary storage resource: " + e.toString()); + return; + } + } + } + } - @Override - public MockHost getHost(String guid) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - MockHost _host = _mockHostDao.findByGuid(guid); - txn.commit(); - if (_host != null) { - return _host; - } else { - s_logger.error("Host with guid " + guid + " was not found"); - return null; - } - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Unable to get host " + guid + " due to " + ex.getMessage(), ex); - } finally { - txn.close(); + @Override + public MockHost getHost(String guid) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + MockHost _host = _mockHostDao.findByGuid(guid); + txn.commit(); + if (_host != null) { + return _host; + } else { + s_logger.error("Host with guid " + guid + " was not found"); + return null; + } + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Unable to get host " + guid + " due to " + ex.getMessage(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } + } - @Override - public GetHostStatsAnswer getHostStatistic(GetHostStatsCommand cmd) { - String hostGuid = cmd.getHostGuid(); - MockHost host = null; - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - host = _mockHostDao.findByGuid(hostGuid); - txn.commit(); - if (host == null) { - return null; - } - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Unable to get host " + hostGuid + " due to " + ex.getMessage(), ex); - } finally { - txn.close(); + @Override + public GetHostStatsAnswer getHostStatistic(GetHostStatsCommand cmd) { + String hostGuid = cmd.getHostGuid(); + MockHost host = null; + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + host = _mockHostDao.findByGuid(hostGuid); + txn.commit(); + if (host == null) { + return null; + } + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Unable to get host " + hostGuid + " due to " + ex.getMessage(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - Transaction vmtxn = Transaction.open(Transaction.SIMULATOR_DB); - try { - vmtxn.start(); - List vms = _mockVmDao.findByHostId(host.getId()); - vmtxn.commit(); - double usedMem = 0.0; - double usedCpu = 0.0; - for (MockVMVO vm : vms) { - usedMem += vm.getMemory(); - usedCpu += vm.getCpu(); - } + Transaction vmtxn = Transaction.open(Transaction.SIMULATOR_DB); + try { + vmtxn.start(); + List vms = _mockVmDao.findByHostId(host.getId()); + vmtxn.commit(); + double usedMem = 0.0; + double usedCpu = 0.0; + for (MockVMVO vm : vms) { + usedMem += vm.getMemory(); + usedCpu += vm.getCpu(); + } - HostStatsEntry hostStats = new HostStatsEntry(); - hostStats.setTotalMemoryKBs(host.getMemorySize()); - hostStats.setFreeMemoryKBs(host.getMemorySize() - usedMem); - hostStats.setNetworkReadKBs(32768); - hostStats.setNetworkWriteKBs(16384); - hostStats.setCpuUtilization(usedCpu / (host.getCpuCount() * host.getCpuSpeed())); - hostStats.setEntityType("simulator-host"); - hostStats.setHostId(cmd.getHostId()); - return new GetHostStatsAnswer(cmd, hostStats); - } catch (Exception ex) { - vmtxn.rollback(); - throw new CloudRuntimeException("Unable to get Vms on host " + host.getGuid() + " due to " - + ex.getMessage(), ex); - } finally { - vmtxn.close(); + HostStatsEntry hostStats = new HostStatsEntry(); + hostStats.setTotalMemoryKBs(host.getMemorySize()); + hostStats.setFreeMemoryKBs(host.getMemorySize() - usedMem); + hostStats.setNetworkReadKBs(32768); + hostStats.setNetworkWriteKBs(16384); + hostStats.setCpuUtilization(usedCpu / (host.getCpuCount() * host.getCpuSpeed())); + hostStats.setEntityType("simulator-host"); + hostStats.setHostId(cmd.getHostId()); + return new GetHostStatsAnswer(cmd, hostStats); + } catch (Exception ex) { + vmtxn.rollback(); + throw new CloudRuntimeException("Unable to get Vms on host " + host.getGuid() + " due to " + + ex.getMessage(), ex); + } finally { + vmtxn.close(); vmtxn = Transaction.open(Transaction.CLOUD_DB); vmtxn.close(); - } - } + } + } - @Override - public Answer checkHealth(CheckHealthCommand cmd) { - return new Answer(cmd); - } + @Override + public Answer checkHealth(CheckHealthCommand cmd) { + return new Answer(cmd); + } - @Override - public Answer pingTest(PingTestCommand cmd) { - return new Answer(cmd); - } + @Override + public Answer pingTest(PingTestCommand cmd) { + return new Answer(cmd); + } - @Override - public boolean start() { - return true; - } + @Override + public boolean start() { + return true; + } - @Override - public boolean stop() { - return true; - } + @Override + public boolean stop() { + return true; + } - @Override - public String getName() { - return this.getClass().getSimpleName(); - } + @Override + public String getName() { + return this.getClass().getSimpleName(); + } - @Override - public MaintainAnswer maintain(com.cloud.agent.api.MaintainCommand cmd) { - return new MaintainAnswer(cmd); - } + @Override + public MaintainAnswer maintain(com.cloud.agent.api.MaintainCommand cmd) { + return new MaintainAnswer(cmd); + } - @Override - public Answer checkNetworkCommand(CheckNetworkCommand cmd) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if network name setup is done on the resource"); - } - return new CheckNetworkAnswer(cmd, true, "Network Setup check by names is done"); - } + @Override + public Answer checkNetworkCommand(CheckNetworkCommand cmd) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Checking if network name setup is done on the resource"); + } + return new CheckNetworkAnswer(cmd, true, "Network Setup check by names is done"); + } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java index 1076089dcd6..3c371bc4363 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java @@ -28,9 +28,9 @@ import java.util.Map; import java.util.UUID; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.agent.api.storage.*; import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; @@ -57,6 +57,22 @@ import com.cloud.agent.api.SecStorageSetupAnswer; import com.cloud.agent.api.SecStorageSetupCommand; import com.cloud.agent.api.SecStorageVMSetupCommand; import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.agent.api.storage.CopyVolumeAnswer; +import com.cloud.agent.api.storage.CopyVolumeCommand; +import com.cloud.agent.api.storage.CreateAnswer; +import com.cloud.agent.api.storage.CreateCommand; +import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer; +import com.cloud.agent.api.storage.DeleteTemplateCommand; +import com.cloud.agent.api.storage.DestroyCommand; +import com.cloud.agent.api.storage.DownloadAnswer; +import com.cloud.agent.api.storage.DownloadCommand; +import com.cloud.agent.api.storage.DownloadProgressCommand; +import com.cloud.agent.api.storage.ListTemplateAnswer; +import com.cloud.agent.api.storage.ListTemplateCommand; +import com.cloud.agent.api.storage.ListVolumeAnswer; +import com.cloud.agent.api.storage.ListVolumeCommand; +import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; +import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.agent.api.to.VolumeTO; import com.cloud.simulator.MockHost; @@ -71,13 +87,11 @@ import com.cloud.simulator.dao.MockSecStorageDao; import com.cloud.simulator.dao.MockStoragePoolDao; import com.cloud.simulator.dao.MockVMDao; import com.cloud.simulator.dao.MockVolumeDao; -import com.cloud.storage.Storage; -import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.template.TemplateInfo; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; @@ -85,1244 +99,1244 @@ import com.cloud.vm.VirtualMachine.State; @Local(value = { MockStorageManager.class }) public class MockStorageManagerImpl implements MockStorageManager { - private static final Logger s_logger = Logger.getLogger(MockStorageManagerImpl.class); - @Inject - MockStoragePoolDao _mockStoragePoolDao = null; - @Inject - MockSecStorageDao _mockSecStorageDao = null; - @Inject - MockVolumeDao _mockVolumeDao = null; - @Inject - MockVMDao _mockVMDao = null; - @Inject - MockHostDao _mockHostDao = null; + private static final Logger s_logger = Logger.getLogger(MockStorageManagerImpl.class); + @Inject + MockStoragePoolDao _mockStoragePoolDao = null; + @Inject + MockSecStorageDao _mockSecStorageDao = null; + @Inject + MockVolumeDao _mockVolumeDao = null; + @Inject + MockVMDao _mockVMDao = null; + @Inject + MockHostDao _mockHostDao = null; - private MockVolumeVO findVolumeFromSecondary(String path, String ssUrl, MockVolumeType type) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - String volumePath = path.replaceAll(ssUrl, ""); - MockSecStorageVO secStorage = _mockSecStorageDao.findByUrl(ssUrl); - if (secStorage == null) { - return null; - } - volumePath = secStorage.getMountPoint() + volumePath; - volumePath = volumePath.replaceAll("//", "/"); - MockVolumeVO volume = _mockVolumeDao.findByStoragePathAndType(volumePath); - txn.commit(); - if (volume == null) { - return null; - } - return volume; - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Unable to find volume " + path + " on secondary " + ssUrl, ex); - } finally { - txn.close(); + private MockVolumeVO findVolumeFromSecondary(String path, String ssUrl, MockVolumeType type) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + String volumePath = path.replaceAll(ssUrl, ""); + MockSecStorageVO secStorage = _mockSecStorageDao.findByUrl(ssUrl); + if (secStorage == null) { + return null; + } + volumePath = secStorage.getMountPoint() + volumePath; + volumePath = volumePath.replaceAll("//", "/"); + MockVolumeVO volume = _mockVolumeDao.findByStoragePathAndType(volumePath); + txn.commit(); + if (volume == null) { + return null; + } + return volume; + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Unable to find volume " + path + " on secondary " + ssUrl, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } - - @Override - public PrimaryStorageDownloadAnswer primaryStorageDownload(PrimaryStorageDownloadCommand cmd) { - MockVolumeVO template = findVolumeFromSecondary(cmd.getUrl(), cmd.getSecondaryStorageUrl(), - MockVolumeType.TEMPLATE); - if (template == null) { - return new PrimaryStorageDownloadAnswer("Can't find primary storage"); - } - - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockStoragePoolVO primaryStorage = null; - try { - txn.start(); - primaryStorage = _mockStoragePoolDao.findByUuid(cmd.getPoolUuid()); - txn.commit(); - if (primaryStorage == null) { - return new PrimaryStorageDownloadAnswer("Can't find primary storage"); - } - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when finding primary storagee " + cmd.getPoolUuid(), ex); - } finally { - txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); - txn.close(); - } - - String volumeName = UUID.randomUUID().toString(); - MockVolumeVO newVolume = new MockVolumeVO(); - newVolume.setName(volumeName); - newVolume.setPath(primaryStorage.getMountPoint() + volumeName); - newVolume.setPoolId(primaryStorage.getId()); - newVolume.setSize(template.getSize()); - newVolume.setType(MockVolumeType.VOLUME); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - _mockVolumeDao.persist(newVolume); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when saving volume " + newVolume, ex); - } finally { - txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); - txn.close(); - } - return new PrimaryStorageDownloadAnswer(newVolume.getPath(), newVolume.getSize()); - } - - @Override - public CreateAnswer createVolume(CreateCommand cmd) { - StorageFilerTO sf = cmd.getPool(); - DiskProfile dskch = cmd.getDiskCharacteristics(); - MockStoragePoolVO storagePool = null; - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - storagePool = _mockStoragePoolDao.findByUuid(sf.getUuid()); - txn.commit(); - if (storagePool == null) { - return new CreateAnswer(cmd, "Failed to find storage pool: " + sf.getUuid()); - } - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when finding storage " + sf.getUuid(), ex); - } finally { - txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); - txn.close(); - } - - String volumeName = UUID.randomUUID().toString(); - MockVolumeVO volume = new MockVolumeVO(); - volume.setPoolId(storagePool.getId()); - volume.setName(volumeName); - volume.setPath(storagePool.getMountPoint() + volumeName); - volume.setSize(dskch.getSize()); - volume.setType(MockVolumeType.VOLUME); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - volume = _mockVolumeDao.persist(volume); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when saving volume " + volume, ex); - } finally { - txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); - txn.close(); - } - - VolumeTO volumeTo = new VolumeTO(cmd.getVolumeId(), dskch.getType(), sf.getType(), sf.getUuid(), - volume.getName(), storagePool.getMountPoint(), volume.getPath(), volume.getSize(), null); - - return new CreateAnswer(cmd, volumeTo); - } - - @Override - public AttachVolumeAnswer AttachVolume(AttachVolumeCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - String poolid = cmd.getPoolUuid(); - String volumeName = cmd.getVolumeName(); - MockVolumeVO volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolumePath()); - if (volume == null) { - return new AttachVolumeAnswer(cmd, "Can't find volume:" + volumeName + "on pool:" + poolid); - } - - String vmName = cmd.getVmName(); - MockVMVO vm = _mockVMDao.findByVmName(vmName); - if (vm == null) { - return new AttachVolumeAnswer(cmd, "can't vm :" + vmName); - } - txn.commit(); - - return new AttachVolumeAnswer(cmd, cmd.getDeviceId()); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when attaching volume " + cmd.getVolumeName() + " to VM " - + cmd.getVmName(), ex); - } finally { - txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); - txn.close(); - } - } - - @Override - public Answer AttachIso(AttachIsoCommand cmd) { - MockVolumeVO iso = findVolumeFromSecondary(cmd.getIsoPath(), cmd.getStoreUrl(), MockVolumeType.ISO); - if (iso == null) { - return new Answer(cmd, false, "Failed to find the iso: " + cmd.getIsoPath() + "on secondary storage " - + cmd.getStoreUrl()); - } - - String vmName = cmd.getVmName(); - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockVMVO vm = null; - try { - txn.start(); - vm = _mockVMDao.findByVmName(vmName); - txn.commit(); - if (vm == null) { - return new Answer(cmd, false, "can't vm :" + vmName); - } - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when attaching iso to vm " + vm.getName(), ex); - } finally { - txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); - txn.close(); - } - return new Answer(cmd); - } - - @Override - public Answer DeleteStoragePool(DeleteStoragePoolCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - MockStoragePoolVO storage = _mockStoragePoolDao.findByUuid(cmd.getPool().getUuid()); - if (storage == null) { - return new Answer(cmd, false, "can't find storage pool:" + cmd.getPool().getUuid()); - } - _mockStoragePoolDao.remove(storage.getId()); - txn.commit(); - return new Answer(cmd); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when deleting storage pool " + cmd.getPool().getPath(), ex); - } finally { - txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); - txn.close(); - } - } - - @Override - public ModifyStoragePoolAnswer ModifyStoragePool(ModifyStoragePoolCommand cmd) { - StorageFilerTO sf = cmd.getPool(); - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockStoragePoolVO storagePool = null; - try { - txn.start(); - storagePool = _mockStoragePoolDao.findByUuid(sf.getUuid()); - if (storagePool == null) { - storagePool = new MockStoragePoolVO(); - storagePool.setUuid(sf.getUuid()); - storagePool.setMountPoint("/mnt/" + sf.getUuid() + File.separator); - - Long size = DEFAULT_HOST_STORAGE_SIZE; - String path = sf.getPath(); - int index = path.lastIndexOf("/"); - if (index != -1) { - path = path.substring(index + 1); - if (path != null) { - String values[] = path.split("="); - if (values.length > 1 && values[0].equalsIgnoreCase("size")) { - size = Long.parseLong(values[1]); - } - } - } - storagePool.setCapacity(size); - storagePool.setStorageType(sf.getType()); - storagePool = _mockStoragePoolDao.persist(storagePool); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when modifying storage pool " + cmd.getPool().getPath(), ex); - } finally { - txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); - txn.close(); - } - return new ModifyStoragePoolAnswer(cmd, storagePool.getCapacity(), 0, new HashMap()); - } - - @Override - public Answer CreateStoragePool(CreateStoragePoolCommand cmd) { - StorageFilerTO sf = cmd.getPool(); - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockStoragePoolVO storagePool = null; - try { - txn.start(); - storagePool = _mockStoragePoolDao.findByUuid(sf.getUuid()); - if (storagePool == null) { - storagePool = new MockStoragePoolVO(); - storagePool.setUuid(sf.getUuid()); - storagePool.setMountPoint("/mnt/" + sf.getUuid() + File.separator); - - Long size = DEFAULT_HOST_STORAGE_SIZE; - String path = sf.getPath(); - int index = path.lastIndexOf("/"); - if (index != -1) { - path = path.substring(index + 1); - if (path != null) { - String values[] = path.split("="); - if (values.length > 1 && values[0].equalsIgnoreCase("size")) { - size = Long.parseLong(values[1]); - } - } - } - storagePool.setCapacity(size); - storagePool.setStorageType(sf.getType()); - storagePool = _mockStoragePoolDao.persist(storagePool); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when creating storage pool " + cmd.getPool().getPath(), ex); - } finally { - txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); - txn.close(); - } - return new ModifyStoragePoolAnswer(cmd, storagePool.getCapacity(), 0, new HashMap()); - } - - @Override - public Answer SecStorageSetup(SecStorageSetupCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockSecStorageVO storage = null; - try { - txn.start(); - storage = _mockSecStorageDao.findByUrl(cmd.getSecUrl()); - if (storage == null) { - return new Answer(cmd, false, "can't find the storage"); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when setting up sec storage" + cmd.getSecUrl(), ex); - } finally { - txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); - txn.close(); - } - return new SecStorageSetupAnswer(storage.getMountPoint()); - } + } + } @Override - public Answer ListVolumes(ListVolumeCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockSecStorageVO storage = null; - try { - txn.start(); - storage = _mockSecStorageDao.findByUrl(cmd.getSecUrl()); - if (storage == null) { - return new Answer(cmd, false, "Failed to get secondary storage"); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when finding sec storage " + cmd.getSecUrl(), ex); - } finally { - txn.close(); + public PrimaryStorageDownloadAnswer primaryStorageDownload(PrimaryStorageDownloadCommand cmd) { + MockVolumeVO template = findVolumeFromSecondary(cmd.getUrl(), cmd.getSecondaryStorageUrl(), + MockVolumeType.TEMPLATE); + if (template == null) { + return new PrimaryStorageDownloadAnswer("Can't find primary storage"); + } + + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockStoragePoolVO primaryStorage = null; + try { + txn.start(); + primaryStorage = _mockStoragePoolDao.findByUuid(cmd.getPoolUuid()); + txn.commit(); + if (primaryStorage == null) { + return new PrimaryStorageDownloadAnswer("Can't find primary storage"); + } + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when finding primary storagee " + cmd.getPoolUuid(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - List volumes = _mockVolumeDao.findByStorageIdAndType(storage.getId(), - MockVolumeType.VOLUME); - - Map templateInfos = new HashMap(); - for (MockVolumeVO volume : volumes) { - templateInfos.put(volume.getId(), new TemplateInfo(volume.getName(), volume.getPath() - .replaceAll(storage.getMountPoint(), ""), volume.getSize(), volume.getSize(), true, false)); - } - txn.commit(); - return new ListVolumeAnswer(cmd.getSecUrl(), templateInfos); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when finding template on sec storage " + storage.getId(), ex); - } finally { - txn.close(); + String volumeName = UUID.randomUUID().toString(); + MockVolumeVO newVolume = new MockVolumeVO(); + newVolume.setName(volumeName); + newVolume.setPath(primaryStorage.getMountPoint() + volumeName); + newVolume.setPoolId(primaryStorage.getId()); + newVolume.setSize(template.getSize()); + newVolume.setType(MockVolumeType.VOLUME); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + _mockVolumeDao.persist(newVolume); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when saving volume " + newVolume, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } + return new PrimaryStorageDownloadAnswer(newVolume.getPath(), newVolume.getSize()); + } - @Override - public Answer ListTemplates(ListTemplateCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockSecStorageVO storage = null; - try { - txn.start(); - storage = _mockSecStorageDao.findByUrl(cmd.getSecUrl()); - if (storage == null) { - return new Answer(cmd, false, "Failed to get secondary storage"); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when finding sec storage " + cmd.getSecUrl(), ex); - } finally { - txn.close(); + @Override + public CreateAnswer createVolume(CreateCommand cmd) { + StorageFilerTO sf = cmd.getPool(); + DiskProfile dskch = cmd.getDiskCharacteristics(); + MockStoragePoolVO storagePool = null; + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + storagePool = _mockStoragePoolDao.findByUuid(sf.getUuid()); + txn.commit(); + if (storagePool == null) { + return new CreateAnswer(cmd, "Failed to find storage pool: " + sf.getUuid()); + } + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when finding storage " + sf.getUuid(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - List templates = _mockVolumeDao.findByStorageIdAndType(storage.getId(), - MockVolumeType.TEMPLATE); - - Map templateInfos = new HashMap(); - for (MockVolumeVO template : templates) { - templateInfos.put(template.getName(), new TemplateInfo(template.getName(), template.getPath() - .replaceAll(storage.getMountPoint(), ""), template.getSize(), template.getSize(), true, false)); - } - txn.commit(); - return new ListTemplateAnswer(cmd.getSecUrl(), templateInfos); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when finding template on sec storage " + storage.getId(), ex); - } finally { - txn.close(); + String volumeName = UUID.randomUUID().toString(); + MockVolumeVO volume = new MockVolumeVO(); + volume.setPoolId(storagePool.getId()); + volume.setName(volumeName); + volume.setPath(storagePool.getMountPoint() + volumeName); + volume.setSize(dskch.getSize()); + volume.setType(MockVolumeType.VOLUME); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + volume = _mockVolumeDao.persist(volume); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when saving volume " + volume, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } - @Override - public Answer Destroy(DestroyCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - MockVolumeVO volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolume().getPath()); - if (volume != null) { - _mockVolumeDao.remove(volume.getId()); - } + VolumeTO volumeTo = new VolumeTO(cmd.getVolumeId(), dskch.getType(), sf.getType(), sf.getUuid(), + volume.getName(), storagePool.getMountPoint(), volume.getPath(), volume.getSize(), null); - if (cmd.getVmName() != null) { - MockVm vm = _mockVMDao.findByVmName(cmd.getVmName()); - vm.setState(State.Expunging); - if (vm != null) { - MockVMVO vmVo = _mockVMDao.createForUpdate(vm.getId()); - _mockVMDao.update(vm.getId(), vmVo); - } - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when destroying volume " + cmd.getVolume().getPath(), ex); - } finally { - txn.close(); + return new CreateAnswer(cmd, volumeTo); + } + + @Override + public AttachVolumeAnswer AttachVolume(AttachVolumeCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + String poolid = cmd.getPoolUuid(); + String volumeName = cmd.getVolumeName(); + MockVolumeVO volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolumePath()); + if (volume == null) { + return new AttachVolumeAnswer(cmd, "Can't find volume:" + volumeName + "on pool:" + poolid); + } + + String vmName = cmd.getVmName(); + MockVMVO vm = _mockVMDao.findByVmName(vmName); + if (vm == null) { + return new AttachVolumeAnswer(cmd, "can't vm :" + vmName); + } + txn.commit(); + + return new AttachVolumeAnswer(cmd, cmd.getDeviceId()); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when attaching volume " + cmd.getVolumeName() + " to VM " + + cmd.getVmName(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - return new Answer(cmd); - } + } + } - @Override - public DownloadAnswer Download(DownloadCommand cmd) { - MockSecStorageVO ssvo = null; - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - ssvo = _mockSecStorageDao.findByUrl(cmd.getSecUrl()); - if (ssvo == null) { - return new DownloadAnswer("can't find secondary storage", - VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error accessing secondary storage " + cmd.getSecUrl(), ex); - } finally { - txn.close(); + @Override + public Answer AttachIso(AttachIsoCommand cmd) { + MockVolumeVO iso = findVolumeFromSecondary(cmd.getIsoPath(), cmd.getStoreUrl(), MockVolumeType.ISO); + if (iso == null) { + return new Answer(cmd, false, "Failed to find the iso: " + cmd.getIsoPath() + "on secondary storage " + + cmd.getStoreUrl()); + } + + String vmName = cmd.getVmName(); + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockVMVO vm = null; + try { + txn.start(); + vm = _mockVMDao.findByVmName(vmName); + txn.commit(); + if (vm == null) { + return new Answer(cmd, false, "can't vm :" + vmName); + } + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when attaching iso to vm " + vm.getName(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } + return new Answer(cmd); + } - MockVolumeVO volume = new MockVolumeVO(); - volume.setPoolId(ssvo.getId()); - volume.setName(cmd.getName()); - volume.setPath(ssvo.getMountPoint() + cmd.getName()); - volume.setSize(0); - volume.setType(MockVolumeType.TEMPLATE); - volume.setStatus(Status.DOWNLOAD_IN_PROGRESS); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - volume = _mockVolumeDao.persist(volume); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when saving volume " + volume, ex); - } finally { - txn.close(); + @Override + public Answer DeleteStoragePool(DeleteStoragePoolCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + MockStoragePoolVO storage = _mockStoragePoolDao.findByUuid(cmd.getPool().getUuid()); + if (storage == null) { + return new Answer(cmd, false, "can't find storage pool:" + cmd.getPool().getUuid()); + } + _mockStoragePoolDao.remove(storage.getId()); + txn.commit(); + return new Answer(cmd); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when deleting storage pool " + cmd.getPool().getPath(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - return new DownloadAnswer(String.valueOf(volume.getId()), 0, "Downloading", Status.DOWNLOAD_IN_PROGRESS, - cmd.getName(), cmd.getName(), volume.getSize(), volume.getSize(), null); - } + } + } - @Override - public DownloadAnswer DownloadProcess(DownloadProgressCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - String volumeId = cmd.getJobId(); - MockVolumeVO volume = _mockVolumeDao.findById(Long.parseLong(volumeId)); - if (volume == null) { - return new DownloadAnswer("Can't find the downloading volume", Status.ABANDONED); - } + @Override + public ModifyStoragePoolAnswer ModifyStoragePool(ModifyStoragePoolCommand cmd) { + StorageFilerTO sf = cmd.getPool(); + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockStoragePoolVO storagePool = null; + try { + txn.start(); + storagePool = _mockStoragePoolDao.findByUuid(sf.getUuid()); + if (storagePool == null) { + storagePool = new MockStoragePoolVO(); + storagePool.setUuid(sf.getUuid()); + storagePool.setMountPoint("/mnt/" + sf.getUuid() + File.separator); - long size = Math.min(volume.getSize() + DEFAULT_TEMPLATE_SIZE / 5, DEFAULT_TEMPLATE_SIZE); - volume.setSize(size); - - double volumeSize = volume.getSize(); - double pct = volumeSize / DEFAULT_TEMPLATE_SIZE; - if (pct >= 1.0) { - volume.setStatus(Status.DOWNLOADED); - _mockVolumeDao.update(volume.getId(), volume); - txn.commit(); - return new DownloadAnswer(cmd.getJobId(), 100, cmd, - com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED, volume.getPath(), - volume.getName()); - } else { - _mockVolumeDao.update(volume.getId(), volume); - txn.commit(); - return new DownloadAnswer(cmd.getJobId(), (int) (pct * 100.0), cmd, - com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS, volume.getPath(), - volume.getName()); - } - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error during download job " + cmd.getJobId(), ex); - } finally { - txn.close(); + Long size = DEFAULT_HOST_STORAGE_SIZE; + String path = sf.getPath(); + int index = path.lastIndexOf("/"); + if (index != -1) { + path = path.substring(index + 1); + if (path != null) { + String values[] = path.split("="); + if (values.length > 1 && values[0].equalsIgnoreCase("size")) { + size = Long.parseLong(values[1]); + } + } + } + storagePool.setCapacity(size); + storagePool.setStorageType(sf.getType()); + storagePool = _mockStoragePoolDao.persist(storagePool); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when modifying storage pool " + cmd.getPool().getPath(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } + return new ModifyStoragePoolAnswer(cmd, storagePool.getCapacity(), 0, new HashMap()); + } - @Override - public GetStorageStatsAnswer GetStorageStats(GetStorageStatsCommand cmd) { - String uuid = cmd.getStorageId(); - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - if (uuid == null) { - String secUrl = cmd.getSecUrl(); - MockSecStorageVO secondary = _mockSecStorageDao.findByUrl(secUrl); - if (secondary == null) { - return new GetStorageStatsAnswer(cmd, "Can't find the secondary storage:" + secUrl); - } - Long totalUsed = _mockVolumeDao.findTotalStorageId(secondary.getId()); - txn.commit(); - return new GetStorageStatsAnswer(cmd, secondary.getCapacity(), totalUsed); - } else { - MockStoragePoolVO pool = _mockStoragePoolDao.findByUuid(uuid); - if (pool == null) { - return new GetStorageStatsAnswer(cmd, "Can't find the pool"); - } - Long totalUsed = _mockVolumeDao.findTotalStorageId(pool.getId()); - if (totalUsed == null) { - totalUsed = 0L; - } - txn.commit(); - return new GetStorageStatsAnswer(cmd, pool.getCapacity(), totalUsed); - } - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("DBException during storage stats collection for pool " + uuid, ex); - } finally { - txn.close(); + @Override + public Answer CreateStoragePool(CreateStoragePoolCommand cmd) { + StorageFilerTO sf = cmd.getPool(); + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockStoragePoolVO storagePool = null; + try { + txn.start(); + storagePool = _mockStoragePoolDao.findByUuid(sf.getUuid()); + if (storagePool == null) { + storagePool = new MockStoragePoolVO(); + storagePool.setUuid(sf.getUuid()); + storagePool.setMountPoint("/mnt/" + sf.getUuid() + File.separator); + + Long size = DEFAULT_HOST_STORAGE_SIZE; + String path = sf.getPath(); + int index = path.lastIndexOf("/"); + if (index != -1) { + path = path.substring(index + 1); + if (path != null) { + String values[] = path.split("="); + if (values.length > 1 && values[0].equalsIgnoreCase("size")) { + size = Long.parseLong(values[1]); + } + } + } + storagePool.setCapacity(size); + storagePool.setStorageType(sf.getType()); + storagePool = _mockStoragePoolDao.persist(storagePool); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when creating storage pool " + cmd.getPool().getPath(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } + return new ModifyStoragePoolAnswer(cmd, storagePool.getCapacity(), 0, new HashMap()); + } - @Override - public ManageSnapshotAnswer ManageSnapshot(ManageSnapshotCommand cmd) { - String volPath = cmd.getVolumePath(); - MockVolumeVO volume = null; - MockStoragePoolVO storagePool = null; - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - volume = _mockVolumeDao.findByStoragePathAndType(volPath); - if (volume == null) { - return new ManageSnapshotAnswer(cmd, false, "Can't find the volume"); - } - storagePool = _mockStoragePoolDao.findById(volume.getPoolId()); - if (storagePool == null) { - return new ManageSnapshotAnswer(cmd, false, "Can't find the storage pooll"); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Unable to perform snapshot", ex); - } finally { - txn.close(); + @Override + public Answer SecStorageSetup(SecStorageSetupCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockSecStorageVO storage = null; + try { + txn.start(); + storage = _mockSecStorageDao.findByUrl(cmd.getSecUrl()); + if (storage == null) { + return new Answer(cmd, false, "can't find the storage"); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when setting up sec storage" + cmd.getSecUrl(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } + return new SecStorageSetupAnswer(storage.getMountPoint()); + } - String mountPoint = storagePool.getMountPoint(); - MockVolumeVO snapshot = new MockVolumeVO(); - - snapshot.setName(cmd.getSnapshotName()); - snapshot.setPath(mountPoint + cmd.getSnapshotName()); - snapshot.setSize(volume.getSize()); - snapshot.setPoolId(storagePool.getId()); - snapshot.setType(MockVolumeType.SNAPSHOT); - snapshot.setStatus(Status.DOWNLOADED); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - snapshot = _mockVolumeDao.persist(snapshot); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when saving snapshot " + snapshot, ex); - } finally { - txn.close(); + @Override + public Answer ListVolumes(ListVolumeCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockSecStorageVO storage = null; + try { + txn.start(); + storage = _mockSecStorageDao.findByUrl(cmd.getSecUrl()); + if (storage == null) { + return new Answer(cmd, false, "Failed to get secondary storage"); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when finding sec storage " + cmd.getSecUrl(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - return new ManageSnapshotAnswer(cmd, snapshot.getId(), snapshot.getPath(), true, ""); - } + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + List volumes = _mockVolumeDao.findByStorageIdAndType(storage.getId(), + MockVolumeType.VOLUME); - @Override - public BackupSnapshotAnswer BackupSnapshot(BackupSnapshotCommand cmd, SimulatorInfo info) { - // emulate xenserver backupsnapshot, if the base volume is deleted, then - // backupsnapshot failed - MockVolumeVO volume = null; - MockVolumeVO snapshot = null; - MockSecStorageVO secStorage = null; - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolumePath()); - if (volume == null) { - return new BackupSnapshotAnswer(cmd, false, "Can't find base volume: " + cmd.getVolumePath(), null, - true); - } - String snapshotPath = cmd.getSnapshotUuid(); - snapshot = _mockVolumeDao.findByStoragePathAndType(snapshotPath); - if (snapshot == null) { - return new BackupSnapshotAnswer(cmd, false, "can't find snapshot" + snapshotPath, null, true); - } - - String secStorageUrl = cmd.getSecondaryStorageUrl(); - secStorage = _mockSecStorageDao.findByUrl(secStorageUrl); - if (secStorage == null) { - return new BackupSnapshotAnswer(cmd, false, "can't find sec storage" + snapshotPath, null, true); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when backing up snapshot"); - } finally { - txn.close(); + Map templateInfos = new HashMap(); + for (MockVolumeVO volume : volumes) { + templateInfos.put(volume.getId(), new TemplateInfo(volume.getName(), volume.getPath() + .replaceAll(storage.getMountPoint(), ""), volume.getSize(), volume.getSize(), true, false)); + } + txn.commit(); + return new ListVolumeAnswer(cmd.getSecUrl(), templateInfos); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when finding template on sec storage " + storage.getId(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } + } - MockVolumeVO newsnapshot = new MockVolumeVO(); - String name = UUID.randomUUID().toString(); - newsnapshot.setName(name); - newsnapshot.setPath(secStorage.getMountPoint() + name); - newsnapshot.setPoolId(secStorage.getId()); - newsnapshot.setSize(snapshot.getSize()); - newsnapshot.setStatus(Status.DOWNLOADED); - newsnapshot.setType(MockVolumeType.SNAPSHOT); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - snapshot = _mockVolumeDao.persist(snapshot); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when backing up snapshot " + newsnapshot, ex); - } finally { - txn.close(); + @Override + public Answer ListTemplates(ListTemplateCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockSecStorageVO storage = null; + try { + txn.start(); + storage = _mockSecStorageDao.findByUrl(cmd.getSecUrl()); + if (storage == null) { + return new Answer(cmd, false, "Failed to get secondary storage"); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when finding sec storage " + cmd.getSecUrl(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - return new BackupSnapshotAnswer(cmd, true, null, newsnapshot.getName(), true); - } + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + List templates = _mockVolumeDao.findByStorageIdAndType(storage.getId(), + MockVolumeType.TEMPLATE); - @Override - public Answer DeleteSnapshotBackup(DeleteSnapshotBackupCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - MockVolumeVO backSnapshot = _mockVolumeDao.findByName(cmd.getSnapshotUuid()); - if (backSnapshot == null) { - return new Answer(cmd, false, "can't find the backupsnapshot: " + cmd.getSnapshotUuid()); - } - _mockVolumeDao.remove(backSnapshot.getId()); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when deleting snapshot"); - } finally { - txn.close(); + Map templateInfos = new HashMap(); + for (MockVolumeVO template : templates) { + templateInfos.put(template.getName(), new TemplateInfo(template.getName(), template.getPath() + .replaceAll(storage.getMountPoint(), ""), template.getSize(), template.getSize(), true, false)); + } + txn.commit(); + return new ListTemplateAnswer(cmd.getSecUrl(), templateInfos); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when finding template on sec storage " + storage.getId(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - return new Answer(cmd); - } + } + } - @Override - public CreateVolumeFromSnapshotAnswer CreateVolumeFromSnapshot(CreateVolumeFromSnapshotCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockVolumeVO backSnapshot = null; - MockStoragePoolVO primary = null; - try { - txn.start(); - backSnapshot = _mockVolumeDao.findByName(cmd.getSnapshotUuid()); - if (backSnapshot == null) { - return new CreateVolumeFromSnapshotAnswer(cmd, false, "can't find the backupsnapshot: " - + cmd.getSnapshotUuid(), null); - } + @Override + public Answer Destroy(DestroyCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + MockVolumeVO volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolume().getPath()); + if (volume != null) { + _mockVolumeDao.remove(volume.getId()); + } - primary = _mockStoragePoolDao.findByUuid(cmd.getPrimaryStoragePoolNameLabel()); - if (primary == null) { - return new CreateVolumeFromSnapshotAnswer(cmd, false, "can't find the primary storage: " - + cmd.getPrimaryStoragePoolNameLabel(), null); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when creating volume from snapshot", ex); - } finally { - txn.close(); + if (cmd.getVmName() != null) { + MockVm vm = _mockVMDao.findByVmName(cmd.getVmName()); + vm.setState(State.Expunging); + if (vm != null) { + MockVMVO vmVo = _mockVMDao.createForUpdate(vm.getId()); + _mockVMDao.update(vm.getId(), vmVo); + } + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when destroying volume " + cmd.getVolume().getPath(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } + return new Answer(cmd); + } - String uuid = UUID.randomUUID().toString(); - MockVolumeVO volume = new MockVolumeVO(); - - volume.setName(uuid); - volume.setPath(primary.getMountPoint() + uuid); - volume.setPoolId(primary.getId()); - volume.setSize(backSnapshot.getSize()); - volume.setStatus(Status.DOWNLOADED); - volume.setType(MockVolumeType.VOLUME); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - _mockVolumeDao.persist(volume); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when creating volume from snapshot " + volume, ex); - } finally { - txn.close(); + @Override + public DownloadAnswer Download(DownloadCommand cmd) { + MockSecStorageVO ssvo = null; + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + ssvo = _mockSecStorageDao.findByUrl(cmd.getSecUrl()); + if (ssvo == null) { + return new DownloadAnswer("can't find secondary storage", + VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error accessing secondary storage " + cmd.getSecUrl(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - return new CreateVolumeFromSnapshotAnswer(cmd, true, null, volume.getPath()); - } - - @Override - public Answer DeleteTemplate(DeleteTemplateCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - MockVolumeVO template = _mockVolumeDao.findByStoragePathAndType(cmd.getTemplatePath()); - if (template == null) { - return new Answer(cmd, false, "can't find template:" + cmd.getTemplatePath()); - } - _mockVolumeDao.remove(template.getId()); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when deleting template"); - } finally { - txn.close(); + MockVolumeVO volume = new MockVolumeVO(); + volume.setPoolId(ssvo.getId()); + volume.setName(cmd.getName()); + volume.setPath(ssvo.getMountPoint() + cmd.getName()); + volume.setSize(0); + volume.setType(MockVolumeType.TEMPLATE); + volume.setStatus(Status.DOWNLOAD_IN_PROGRESS); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + volume = _mockVolumeDao.persist(volume); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when saving volume " + volume, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - return new Answer(cmd); - } + } + return new DownloadAnswer(String.valueOf(volume.getId()), 0, "Downloading", Status.DOWNLOAD_IN_PROGRESS, + cmd.getName(), cmd.getName(), volume.getSize(), volume.getSize(), null); + } - @Override - public Answer SecStorageVMSetup(SecStorageVMSetupCommand cmd) { - return new Answer(cmd); - } + @Override + public DownloadAnswer DownloadProcess(DownloadProgressCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + String volumeId = cmd.getJobId(); + MockVolumeVO volume = _mockVolumeDao.findById(Long.parseLong(volumeId)); + if (volume == null) { + return new DownloadAnswer("Can't find the downloading volume", Status.ABANDONED); + } - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - // TODO Auto-generated method stub - return true; - } + long size = Math.min(volume.getSize() + DEFAULT_TEMPLATE_SIZE / 5, DEFAULT_TEMPLATE_SIZE); + volume.setSize(size); - @Override - public boolean start() { - // TODO Auto-generated method stub - return true; - } - - @Override - public boolean stop() { - // TODO Auto-generated method stub - return true; - } - - @Override - public String getName() { - return this.getClass().getSimpleName(); - } - - @Override - public void preinstallTemplates(String url, long zoneId) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockSecStorageVO storage = null; - try { - txn.start(); - storage = _mockSecStorageDao.findByUrl(url); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Unable to find sec storage at " + url, ex); - } finally { - txn.close(); + double volumeSize = volume.getSize(); + double pct = volumeSize / DEFAULT_TEMPLATE_SIZE; + if (pct >= 1.0) { + volume.setStatus(Status.DOWNLOADED); + _mockVolumeDao.update(volume.getId(), volume); + txn.commit(); + return new DownloadAnswer(cmd.getJobId(), 100, cmd, + com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED, volume.getPath(), + volume.getName()); + } else { + _mockVolumeDao.update(volume.getId(), volume); + txn.commit(); + return new DownloadAnswer(cmd.getJobId(), (int) (pct * 100.0), cmd, + com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS, volume.getPath(), + volume.getName()); + } + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error during download job " + cmd.getJobId(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - if (storage == null) { - storage = new MockSecStorageVO(); - URI uri; - try { - uri = new URI(url); - } catch (URISyntaxException e) { - return; - } + } + } - String nfsHost = uri.getHost(); - String nfsPath = uri.getPath(); - String path = nfsHost + ":" + nfsPath; - String dir = "/mnt/" + UUID.nameUUIDFromBytes(path.getBytes()).toString() + File.separator; + @Override + public GetStorageStatsAnswer GetStorageStats(GetStorageStatsCommand cmd) { + String uuid = cmd.getStorageId(); + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + if (uuid == null) { + String secUrl = cmd.getSecUrl(); + MockSecStorageVO secondary = _mockSecStorageDao.findByUrl(secUrl); + if (secondary == null) { + return new GetStorageStatsAnswer(cmd, "Can't find the secondary storage:" + secUrl); + } + Long totalUsed = _mockVolumeDao.findTotalStorageId(secondary.getId()); + txn.commit(); + return new GetStorageStatsAnswer(cmd, secondary.getCapacity(), totalUsed); + } else { + MockStoragePoolVO pool = _mockStoragePoolDao.findByUuid(uuid); + if (pool == null) { + return new GetStorageStatsAnswer(cmd, "Can't find the pool"); + } + Long totalUsed = _mockVolumeDao.findTotalStorageId(pool.getId()); + if (totalUsed == null) { + totalUsed = 0L; + } + txn.commit(); + return new GetStorageStatsAnswer(cmd, pool.getCapacity(), totalUsed); + } + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("DBException during storage stats collection for pool " + uuid, ex); + } finally { + txn.close(); + txn = Transaction.open(Transaction.CLOUD_DB); + txn.close(); + } + } - storage.setUrl(url); - storage.setCapacity(DEFAULT_HOST_STORAGE_SIZE); + @Override + public ManageSnapshotAnswer ManageSnapshot(ManageSnapshotCommand cmd) { + String volPath = cmd.getVolumePath(); + MockVolumeVO volume = null; + MockStoragePoolVO storagePool = null; + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + volume = _mockVolumeDao.findByStoragePathAndType(volPath); + if (volume == null) { + return new ManageSnapshotAnswer(cmd, false, "Can't find the volume"); + } + storagePool = _mockStoragePoolDao.findById(volume.getPoolId()); + if (storagePool == null) { + return new ManageSnapshotAnswer(cmd, false, "Can't find the storage pooll"); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Unable to perform snapshot", ex); + } finally { + txn.close(); + txn = Transaction.open(Transaction.CLOUD_DB); + txn.close(); + } - storage.setMountPoint(dir); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - storage = _mockSecStorageDao.persist(storage); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when saving storage " + storage, ex); - } finally { - txn.close(); + String mountPoint = storagePool.getMountPoint(); + MockVolumeVO snapshot = new MockVolumeVO(); + + snapshot.setName(cmd.getSnapshotName()); + snapshot.setPath(mountPoint + cmd.getSnapshotName()); + snapshot.setSize(volume.getSize()); + snapshot.setPoolId(storagePool.getId()); + snapshot.setType(MockVolumeType.SNAPSHOT); + snapshot.setStatus(Status.DOWNLOADED); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + snapshot = _mockVolumeDao.persist(snapshot); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when saving snapshot " + snapshot, ex); + } finally { + txn.close(); + txn = Transaction.open(Transaction.CLOUD_DB); + txn.close(); + } + + return new ManageSnapshotAnswer(cmd, snapshot.getId(), snapshot.getPath(), true, ""); + } + + @Override + public BackupSnapshotAnswer BackupSnapshot(BackupSnapshotCommand cmd, SimulatorInfo info) { + // emulate xenserver backupsnapshot, if the base volume is deleted, then + // backupsnapshot failed + MockVolumeVO volume = null; + MockVolumeVO snapshot = null; + MockSecStorageVO secStorage = null; + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolumePath()); + if (volume == null) { + return new BackupSnapshotAnswer(cmd, false, "Can't find base volume: " + cmd.getVolumePath(), null, + true); + } + String snapshotPath = cmd.getSnapshotUuid(); + snapshot = _mockVolumeDao.findByStoragePathAndType(snapshotPath); + if (snapshot == null) { + return new BackupSnapshotAnswer(cmd, false, "can't find snapshot" + snapshotPath, null, true); + } + + String secStorageUrl = cmd.getSecondaryStorageUrl(); + secStorage = _mockSecStorageDao.findByUrl(secStorageUrl); + if (secStorage == null) { + return new BackupSnapshotAnswer(cmd, false, "can't find sec storage" + snapshotPath, null, true); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when backing up snapshot"); + } finally { + txn.close(); + txn = Transaction.open(Transaction.CLOUD_DB); + txn.close(); + } + + MockVolumeVO newsnapshot = new MockVolumeVO(); + String name = UUID.randomUUID().toString(); + newsnapshot.setName(name); + newsnapshot.setPath(secStorage.getMountPoint() + name); + newsnapshot.setPoolId(secStorage.getId()); + newsnapshot.setSize(snapshot.getSize()); + newsnapshot.setStatus(Status.DOWNLOADED); + newsnapshot.setType(MockVolumeType.SNAPSHOT); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + snapshot = _mockVolumeDao.persist(snapshot); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when backing up snapshot " + newsnapshot, ex); + } finally { + txn.close(); + txn = Transaction.open(Transaction.CLOUD_DB); + txn.close(); + } + + return new BackupSnapshotAnswer(cmd, true, null, newsnapshot.getName(), true); + } + + @Override + public Answer DeleteSnapshotBackup(DeleteSnapshotBackupCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + MockVolumeVO backSnapshot = _mockVolumeDao.findByName(cmd.getSnapshotUuid()); + if (backSnapshot == null) { + return new Answer(cmd, false, "can't find the backupsnapshot: " + cmd.getSnapshotUuid()); + } + _mockVolumeDao.remove(backSnapshot.getId()); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when deleting snapshot"); + } finally { + txn.close(); + txn = Transaction.open(Transaction.CLOUD_DB); + txn.close(); + } + return new Answer(cmd); + } + + @Override + public CreateVolumeFromSnapshotAnswer CreateVolumeFromSnapshot(CreateVolumeFromSnapshotCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockVolumeVO backSnapshot = null; + MockStoragePoolVO primary = null; + try { + txn.start(); + backSnapshot = _mockVolumeDao.findByName(cmd.getSnapshotUuid()); + if (backSnapshot == null) { + return new CreateVolumeFromSnapshotAnswer(cmd, false, "can't find the backupsnapshot: " + + cmd.getSnapshotUuid(), null); + } + + primary = _mockStoragePoolDao.findByUuid(cmd.getPrimaryStoragePoolNameLabel()); + if (primary == null) { + return new CreateVolumeFromSnapshotAnswer(cmd, false, "can't find the primary storage: " + + cmd.getPrimaryStoragePoolNameLabel(), null); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when creating volume from snapshot", ex); + } finally { + txn.close(); + txn = Transaction.open(Transaction.CLOUD_DB); + txn.close(); + } + + String uuid = UUID.randomUUID().toString(); + MockVolumeVO volume = new MockVolumeVO(); + + volume.setName(uuid); + volume.setPath(primary.getMountPoint() + uuid); + volume.setPoolId(primary.getId()); + volume.setSize(backSnapshot.getSize()); + volume.setStatus(Status.DOWNLOADED); + volume.setType(MockVolumeType.VOLUME); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + _mockVolumeDao.persist(volume); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when creating volume from snapshot " + volume, ex); + } finally { + txn.close(); + txn = Transaction.open(Transaction.CLOUD_DB); + txn.close(); + } + + return new CreateVolumeFromSnapshotAnswer(cmd, true, null, volume.getPath()); + } + + @Override + public Answer DeleteTemplate(DeleteTemplateCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + MockVolumeVO template = _mockVolumeDao.findByStoragePathAndType(cmd.getTemplatePath()); + if (template == null) { + return new Answer(cmd, false, "can't find template:" + cmd.getTemplatePath()); + } + _mockVolumeDao.remove(template.getId()); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when deleting template"); + } finally { + txn.close(); + txn = Transaction.open(Transaction.CLOUD_DB); + txn.close(); + } + return new Answer(cmd); + } + + @Override + public Answer SecStorageVMSetup(SecStorageVMSetupCommand cmd) { + return new Answer(cmd); + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + // TODO Auto-generated method stub + return true; + } + + @Override + public boolean start() { + // TODO Auto-generated method stub + return true; + } + + @Override + public boolean stop() { + // TODO Auto-generated method stub + return true; + } + + @Override + public String getName() { + return this.getClass().getSimpleName(); + } + + @Override + public void preinstallTemplates(String url, long zoneId) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockSecStorageVO storage = null; + try { + txn.start(); + storage = _mockSecStorageDao.findByUrl(url); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Unable to find sec storage at " + url, ex); + } finally { + txn.close(); + txn = Transaction.open(Transaction.CLOUD_DB); + txn.close(); + } + if (storage == null) { + storage = new MockSecStorageVO(); + URI uri; + try { + uri = new URI(url); + } catch (URISyntaxException e) { + return; + } + + String nfsHost = uri.getHost(); + String nfsPath = uri.getPath(); + String path = nfsHost + ":" + nfsPath; + String dir = "/mnt/" + UUID.nameUUIDFromBytes(path.getBytes()).toString() + File.separator; + + storage.setUrl(url); + storage.setCapacity(DEFAULT_HOST_STORAGE_SIZE); + + storage.setMountPoint(dir); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + storage = _mockSecStorageDao.persist(storage); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when saving storage " + storage, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - // preinstall default templates into secondary storage - long defaultTemplateSize = 2 * 1024 * 1024 * 1024L; - MockVolumeVO template = new MockVolumeVO(); - template.setName("simulator-domR"); - template.setPath(storage.getMountPoint() + "template/tmpl/1/10/" + UUID.randomUUID().toString()); - template.setPoolId(storage.getId()); - template.setSize(defaultTemplateSize); - template.setType(MockVolumeType.TEMPLATE); - template.setStatus(Status.DOWNLOADED); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - template = _mockVolumeDao.persist(template); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when saving template " + template, ex); - } finally { - txn.close(); + // preinstall default templates into secondary storage + long defaultTemplateSize = 2 * 1024 * 1024 * 1024L; + MockVolumeVO template = new MockVolumeVO(); + template.setName("simulator-domR"); + template.setPath(storage.getMountPoint() + "template/tmpl/1/10/" + UUID.randomUUID().toString()); + template.setPoolId(storage.getId()); + template.setSize(defaultTemplateSize); + template.setType(MockVolumeType.TEMPLATE); + template.setStatus(Status.DOWNLOADED); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + template = _mockVolumeDao.persist(template); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when saving template " + template, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - template = new MockVolumeVO(); - template.setName("simulator-Centos"); - template.setPath(storage.getMountPoint() + "template/tmpl/1/11/" + UUID.randomUUID().toString()); - template.setPoolId(storage.getId()); - template.setSize(defaultTemplateSize); - template.setType(MockVolumeType.TEMPLATE); - template.setStatus(Status.DOWNLOADED); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - template = _mockVolumeDao.persist(template); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when saving template " + template, ex); - } finally { - txn.close(); + template = new MockVolumeVO(); + template.setName("simulator-Centos"); + template.setPath(storage.getMountPoint() + "template/tmpl/1/11/" + UUID.randomUUID().toString()); + template.setPoolId(storage.getId()); + template.setSize(defaultTemplateSize); + template.setType(MockVolumeType.TEMPLATE); + template.setStatus(Status.DOWNLOADED); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + template = _mockVolumeDao.persist(template); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when saving template " + template, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } + } - } + } - @Override - public StoragePoolInfo getLocalStorage(String hostGuid) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockHost host = null; - MockStoragePoolVO storagePool = null; - try { - txn.start(); - host = _mockHostDao.findByGuid(hostGuid); - storagePool = _mockStoragePoolDao.findByHost(hostGuid); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Unable to find host " + hostGuid, ex); - } finally { - txn.close(); + @Override + public StoragePoolInfo getLocalStorage(String hostGuid) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockHost host = null; + MockStoragePoolVO storagePool = null; + try { + txn.start(); + host = _mockHostDao.findByGuid(hostGuid); + storagePool = _mockStoragePoolDao.findByHost(hostGuid); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Unable to find host " + hostGuid, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - if (storagePool == null) { - String uuid = UUID.randomUUID().toString(); - storagePool = new MockStoragePoolVO(); - storagePool.setUuid(uuid); - storagePool.setMountPoint("/mnt/" + uuid + File.separator); - storagePool.setCapacity(DEFAULT_HOST_STORAGE_SIZE); - storagePool.setHostGuid(hostGuid); - storagePool.setStorageType(StoragePoolType.Filesystem); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - storagePool = _mockStoragePoolDao.persist(storagePool); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when saving storagePool " + storagePool, ex); - } finally { - txn.close(); + if (storagePool == null) { + String uuid = UUID.randomUUID().toString(); + storagePool = new MockStoragePoolVO(); + storagePool.setUuid(uuid); + storagePool.setMountPoint("/mnt/" + uuid + File.separator); + storagePool.setCapacity(DEFAULT_HOST_STORAGE_SIZE); + storagePool.setHostGuid(hostGuid); + storagePool.setStorageType(StoragePoolType.Filesystem); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + storagePool = _mockStoragePoolDao.persist(storagePool); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when saving storagePool " + storagePool, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } - return new StoragePoolInfo(storagePool.getUuid(), host.getPrivateIpAddress(), storagePool.getMountPoint(), - storagePool.getMountPoint(), storagePool.getPoolType(), storagePool.getCapacity(), 0); - } + } + } + return new StoragePoolInfo(storagePool.getUuid(), host.getPrivateIpAddress(), storagePool.getMountPoint(), + storagePool.getMountPoint(), storagePool.getPoolType(), storagePool.getCapacity(), 0); + } - @Override - public StoragePoolInfo getLocalStorage(String hostGuid, Long storageSize) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockHost host = null; - try { - txn.start(); - host = _mockHostDao.findByGuid(hostGuid); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Unable to find host " + hostGuid, ex); - } finally { - txn.close(); + @Override + public StoragePoolInfo getLocalStorage(String hostGuid, Long storageSize) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockHost host = null; + try { + txn.start(); + host = _mockHostDao.findByGuid(hostGuid); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Unable to find host " + hostGuid, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - if (storageSize == null) { - storageSize = DEFAULT_HOST_STORAGE_SIZE; - } - txn = Transaction.open(Transaction.SIMULATOR_DB); - MockStoragePoolVO storagePool = null; - try { - txn.start(); - storagePool = _mockStoragePoolDao.findByHost(hostGuid); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when finding storagePool " + storagePool, ex); - } finally { - txn.close(); + } + if (storageSize == null) { + storageSize = DEFAULT_HOST_STORAGE_SIZE; + } + txn = Transaction.open(Transaction.SIMULATOR_DB); + MockStoragePoolVO storagePool = null; + try { + txn.start(); + storagePool = _mockStoragePoolDao.findByHost(hostGuid); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when finding storagePool " + storagePool, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - if (storagePool == null) { - String uuid = UUID.randomUUID().toString(); - storagePool = new MockStoragePoolVO(); - storagePool.setUuid(uuid); - storagePool.setMountPoint("/mnt/" + uuid + File.separator); - storagePool.setCapacity(storageSize); - storagePool.setHostGuid(hostGuid); - storagePool.setStorageType(StoragePoolType.Filesystem); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - storagePool = _mockStoragePoolDao.persist(storagePool); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when saving storagePool " + storagePool, ex); - } finally { - txn.close(); + } + if (storagePool == null) { + String uuid = UUID.randomUUID().toString(); + storagePool = new MockStoragePoolVO(); + storagePool.setUuid(uuid); + storagePool.setMountPoint("/mnt/" + uuid + File.separator); + storagePool.setCapacity(storageSize); + storagePool.setHostGuid(hostGuid); + storagePool.setStorageType(StoragePoolType.Filesystem); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + storagePool = _mockStoragePoolDao.persist(storagePool); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when saving storagePool " + storagePool, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } - return new StoragePoolInfo(storagePool.getUuid(), host.getPrivateIpAddress(), storagePool.getMountPoint(), - storagePool.getMountPoint(), storagePool.getPoolType(), storagePool.getCapacity(), 0); - } + } + } + return new StoragePoolInfo(storagePool.getUuid(), host.getPrivateIpAddress(), storagePool.getMountPoint(), + storagePool.getMountPoint(), storagePool.getPoolType(), storagePool.getCapacity(), 0); + } - @Override - public CreatePrivateTemplateAnswer CreatePrivateTemplateFromSnapshot(CreatePrivateTemplateFromSnapshotCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockVolumeVO snapshot = null; - MockSecStorageVO sec = null; - try { - txn.start(); - String snapshotUUId = cmd.getSnapshotUuid(); - snapshot = _mockVolumeDao.findByName(snapshotUUId); - if (snapshot == null) { - snapshotUUId = cmd.getSnapshotName(); - snapshot = _mockVolumeDao.findByName(snapshotUUId); - if (snapshot == null) { - return new CreatePrivateTemplateAnswer(cmd, false, "can't find snapshot:" + snapshotUUId); - } - } + @Override + public CreatePrivateTemplateAnswer CreatePrivateTemplateFromSnapshot(CreatePrivateTemplateFromSnapshotCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockVolumeVO snapshot = null; + MockSecStorageVO sec = null; + try { + txn.start(); + String snapshotUUId = cmd.getSnapshotUuid(); + snapshot = _mockVolumeDao.findByName(snapshotUUId); + if (snapshot == null) { + snapshotUUId = cmd.getSnapshotName(); + snapshot = _mockVolumeDao.findByName(snapshotUUId); + if (snapshot == null) { + return new CreatePrivateTemplateAnswer(cmd, false, "can't find snapshot:" + snapshotUUId); + } + } - sec = _mockSecStorageDao.findByUrl(cmd.getSecondaryStorageUrl()); - if (sec == null) { - return new CreatePrivateTemplateAnswer(cmd, false, "can't find secondary storage"); - } - txn.commit(); - } finally { - txn.close(); + sec = _mockSecStorageDao.findByUrl(cmd.getSecondaryStorageUrl()); + if (sec == null) { + return new CreatePrivateTemplateAnswer(cmd, false, "can't find secondary storage"); + } + txn.commit(); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - MockVolumeVO template = new MockVolumeVO(); - String uuid = UUID.randomUUID().toString(); - template.setName(uuid); - template.setPath(sec.getMountPoint() + uuid); - template.setPoolId(sec.getId()); - template.setSize(snapshot.getSize()); - template.setStatus(Status.DOWNLOADED); - template.setType(MockVolumeType.TEMPLATE); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - template = _mockVolumeDao.persist(template); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when saving template " + template, ex); - } finally { - txn.close(); + MockVolumeVO template = new MockVolumeVO(); + String uuid = UUID.randomUUID().toString(); + template.setName(uuid); + template.setPath(sec.getMountPoint() + uuid); + template.setPoolId(sec.getId()); + template.setSize(snapshot.getSize()); + template.setStatus(Status.DOWNLOADED); + template.setType(MockVolumeType.TEMPLATE); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + template = _mockVolumeDao.persist(template); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when saving template " + template, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - return new CreatePrivateTemplateAnswer(cmd, true, "", template.getName(), template.getSize(), - template.getSize(), template.getName(), ImageFormat.QCOW2); - } + return new CreatePrivateTemplateAnswer(cmd, true, "", template.getName(), template.getSize(), + template.getSize(), template.getName(), ImageFormat.QCOW2); + } - @Override - public Answer ComputeChecksum(ComputeChecksumCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - MockVolumeVO volume = _mockVolumeDao.findByName(cmd.getTemplatePath()); - if (volume == null) { - return new Answer(cmd, false, "cant' find volume:" + cmd.getTemplatePath()); - } - String md5 = null; - try { - MessageDigest md = MessageDigest.getInstance("md5"); - md5 = String.format("%032x", new BigInteger(1, md.digest(cmd.getTemplatePath().getBytes()))); - } catch (NoSuchAlgorithmException e) { - s_logger.debug("failed to gernerate md5:" + e.toString()); - } - txn.commit(); - return new Answer(cmd, true, md5); - } finally { - txn.close(); + @Override + public Answer ComputeChecksum(ComputeChecksumCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + MockVolumeVO volume = _mockVolumeDao.findByName(cmd.getTemplatePath()); + if (volume == null) { + return new Answer(cmd, false, "cant' find volume:" + cmd.getTemplatePath()); + } + String md5 = null; + try { + MessageDigest md = MessageDigest.getInstance("md5"); + md5 = String.format("%032x", new BigInteger(1, md.digest(cmd.getTemplatePath().getBytes()))); + } catch (NoSuchAlgorithmException e) { + s_logger.debug("failed to gernerate md5:" + e.toString()); + } + txn.commit(); + return new Answer(cmd, true, md5); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } + } - @Override - public CreatePrivateTemplateAnswer CreatePrivateTemplateFromVolume(CreatePrivateTemplateFromVolumeCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockVolumeVO volume = null; - MockSecStorageVO sec = null; - try { - txn.start(); - volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolumePath()); - if (volume == null) { - return new CreatePrivateTemplateAnswer(cmd, false, "cant' find volume" + cmd.getVolumePath()); - } + @Override + public CreatePrivateTemplateAnswer CreatePrivateTemplateFromVolume(CreatePrivateTemplateFromVolumeCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockVolumeVO volume = null; + MockSecStorageVO sec = null; + try { + txn.start(); + volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolumePath()); + if (volume == null) { + return new CreatePrivateTemplateAnswer(cmd, false, "cant' find volume" + cmd.getVolumePath()); + } - sec = _mockSecStorageDao.findByUrl(cmd.getSecondaryStorageUrl()); - if (sec == null) { - return new CreatePrivateTemplateAnswer(cmd, false, "can't find secondary storage"); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Error when creating private template from volume"); - } finally { - txn.close(); + sec = _mockSecStorageDao.findByUrl(cmd.getSecondaryStorageUrl()); + if (sec == null) { + return new CreatePrivateTemplateAnswer(cmd, false, "can't find secondary storage"); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Error when creating private template from volume"); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - MockVolumeVO template = new MockVolumeVO(); - String uuid = UUID.randomUUID().toString(); - template.setName(uuid); - template.setPath(sec.getMountPoint() + uuid); - template.setPoolId(sec.getId()); - template.setSize(volume.getSize()); - template.setStatus(Status.DOWNLOADED); - template.setType(MockVolumeType.TEMPLATE); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - template = _mockVolumeDao.persist(template); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when persisting template " - + template.getName(), ex); - } finally { - txn.close(); + MockVolumeVO template = new MockVolumeVO(); + String uuid = UUID.randomUUID().toString(); + template.setName(uuid); + template.setPath(sec.getMountPoint() + uuid); + template.setPoolId(sec.getId()); + template.setSize(volume.getSize()); + template.setStatus(Status.DOWNLOADED); + template.setType(MockVolumeType.TEMPLATE); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + template = _mockVolumeDao.persist(template); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when persisting template " + + template.getName(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - return new CreatePrivateTemplateAnswer(cmd, true, "", template.getName(), template.getSize(), - template.getSize(), template.getName(), ImageFormat.QCOW2); - } + return new CreatePrivateTemplateAnswer(cmd, true, "", template.getName(), template.getSize(), + template.getSize(), template.getName(), ImageFormat.QCOW2); + } - @Override - public CopyVolumeAnswer CopyVolume(CopyVolumeCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - boolean toSecondaryStorage = cmd.toSecondaryStorage(); - MockSecStorageVO sec = null; - MockStoragePoolVO primaryStorage = null; - try { - txn.start(); - sec = _mockSecStorageDao.findByUrl(cmd.getSecondaryStorageURL()); - if (sec == null) { - return new CopyVolumeAnswer(cmd, false, "can't find secondary storage", null, null); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when accessing secondary at " - + cmd.getSecondaryStorageURL(), ex); - } finally { - txn.close(); + @Override + public CopyVolumeAnswer CopyVolume(CopyVolumeCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + boolean toSecondaryStorage = cmd.toSecondaryStorage(); + MockSecStorageVO sec = null; + MockStoragePoolVO primaryStorage = null; + try { + txn.start(); + sec = _mockSecStorageDao.findByUrl(cmd.getSecondaryStorageURL()); + if (sec == null) { + return new CopyVolumeAnswer(cmd, false, "can't find secondary storage", null, null); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when accessing secondary at " + + cmd.getSecondaryStorageURL(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - primaryStorage = _mockStoragePoolDao.findByUuid(cmd.getPool().getUuid()); - if (primaryStorage == null) { - return new CopyVolumeAnswer(cmd, false, "Can't find primary storage", null, null); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when accessing primary at " - + cmd.getPool(), ex); - } finally { - txn.close(); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + primaryStorage = _mockStoragePoolDao.findByUuid(cmd.getPool().getUuid()); + if (primaryStorage == null) { + return new CopyVolumeAnswer(cmd, false, "Can't find primary storage", null, null); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when accessing primary at " + + cmd.getPool(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - MockVolumeVO volume = null; - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolumePath()); - if (volume == null) { - return new CopyVolumeAnswer(cmd, false, "cant' find volume" + cmd.getVolumePath(), null, null); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when accessing volume at " - + cmd.getVolumePath(), ex); - } finally { - txn.close(); + MockVolumeVO volume = null; + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolumePath()); + if (volume == null) { + return new CopyVolumeAnswer(cmd, false, "cant' find volume" + cmd.getVolumePath(), null, null); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when accessing volume at " + + cmd.getVolumePath(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } - String name = UUID.randomUUID().toString(); - if (toSecondaryStorage) { - MockVolumeVO vol = new MockVolumeVO(); - vol.setName(name); - vol.setPath(sec.getMountPoint() + name); - vol.setPoolId(sec.getId()); - vol.setSize(volume.getSize()); - vol.setStatus(Status.DOWNLOADED); - vol.setType(MockVolumeType.VOLUME); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - vol = _mockVolumeDao.persist(vol); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when persisting volume " - + vol.getName(), ex); - } finally { - txn.close(); + String name = UUID.randomUUID().toString(); + if (toSecondaryStorage) { + MockVolumeVO vol = new MockVolumeVO(); + vol.setName(name); + vol.setPath(sec.getMountPoint() + name); + vol.setPoolId(sec.getId()); + vol.setSize(volume.getSize()); + vol.setStatus(Status.DOWNLOADED); + vol.setType(MockVolumeType.VOLUME); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + vol = _mockVolumeDao.persist(vol); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when persisting volume " + + vol.getName(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - return new CopyVolumeAnswer(cmd, true, null, sec.getMountPoint(), vol.getPath()); - } else { - MockVolumeVO vol = new MockVolumeVO(); - vol.setName(name); - vol.setPath(primaryStorage.getMountPoint() + name); - vol.setPoolId(primaryStorage.getId()); - vol.setSize(volume.getSize()); - vol.setStatus(Status.DOWNLOADED); - vol.setType(MockVolumeType.VOLUME); - txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - vol = _mockVolumeDao.persist(vol); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when persisting volume " - + vol.getName(), ex); - } finally { - txn.close(); + } + return new CopyVolumeAnswer(cmd, true, null, sec.getMountPoint(), vol.getPath()); + } else { + MockVolumeVO vol = new MockVolumeVO(); + vol.setName(name); + vol.setPath(primaryStorage.getMountPoint() + name); + vol.setPoolId(primaryStorage.getId()); + vol.setSize(volume.getSize()); + vol.setStatus(Status.DOWNLOADED); + vol.setType(MockVolumeType.VOLUME); + txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + vol = _mockVolumeDao.persist(vol); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Encountered " + ex.getMessage() + " when persisting volume " + + vol.getName(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - return new CopyVolumeAnswer(cmd, true, null, primaryStorage.getMountPoint(), vol.getPath()); - } - } + } + return new CopyVolumeAnswer(cmd, true, null, primaryStorage.getMountPoint(), vol.getPath()); + } + } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java index 40cd80acf8e..63c04be0c81 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java @@ -23,20 +23,56 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.agent.api.*; -import com.cloud.agent.api.routing.*; -import com.cloud.network.router.VirtualRouter; import org.apache.log4j.Logger; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.BumpUpPriorityCommand; +import com.cloud.agent.api.CheckRouterAnswer; +import com.cloud.agent.api.CheckRouterCommand; +import com.cloud.agent.api.CheckVirtualMachineAnswer; +import com.cloud.agent.api.CheckVirtualMachineCommand; +import com.cloud.agent.api.CleanupNetworkRulesCmd; +import com.cloud.agent.api.GetDomRVersionAnswer; +import com.cloud.agent.api.GetDomRVersionCmd; +import com.cloud.agent.api.GetVmStatsAnswer; +import com.cloud.agent.api.GetVmStatsCommand; +import com.cloud.agent.api.GetVncPortAnswer; +import com.cloud.agent.api.GetVncPortCommand; +import com.cloud.agent.api.MigrateAnswer; +import com.cloud.agent.api.MigrateCommand; +import com.cloud.agent.api.NetworkUsageAnswer; +import com.cloud.agent.api.NetworkUsageCommand; +import com.cloud.agent.api.PrepareForMigrationAnswer; +import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.RebootAnswer; +import com.cloud.agent.api.RebootCommand; +import com.cloud.agent.api.SecurityGroupRuleAnswer; +import com.cloud.agent.api.SecurityGroupRulesCmd; +import com.cloud.agent.api.StartAnswer; +import com.cloud.agent.api.StartCommand; +import com.cloud.agent.api.StopAnswer; +import com.cloud.agent.api.StopCommand; +import com.cloud.agent.api.VmStatsEntry; import com.cloud.agent.api.check.CheckSshAnswer; import com.cloud.agent.api.check.CheckSshCommand; import com.cloud.agent.api.proxy.CheckConsoleProxyLoadCommand; import com.cloud.agent.api.proxy.WatchConsoleProxyLoadCommand; +import com.cloud.agent.api.routing.DhcpEntryCommand; +import com.cloud.agent.api.routing.IpAssocCommand; +import com.cloud.agent.api.routing.LoadBalancerConfigCommand; +import com.cloud.agent.api.routing.NetworkElementCommand; +import com.cloud.agent.api.routing.SavePasswordCommand; +import com.cloud.agent.api.routing.SetFirewallRulesCommand; +import com.cloud.agent.api.routing.SetPortForwardingRulesCommand; +import com.cloud.agent.api.routing.SetStaticNatRulesCommand; +import com.cloud.agent.api.routing.VmDataCommand; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.network.Networks.TrafficType; +import com.cloud.network.router.VirtualRouter; import com.cloud.simulator.MockHost; import com.cloud.simulator.MockSecurityRulesVO; import com.cloud.simulator.MockVMVO; @@ -46,7 +82,6 @@ import com.cloud.simulator.dao.MockSecurityRulesDao; import com.cloud.simulator.dao.MockVMDao; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine.State; @@ -55,46 +90,46 @@ import com.cloud.vm.VirtualMachine.State; public class MockVmManagerImpl implements MockVmManager { private static final Logger s_logger = Logger.getLogger(MockVmManagerImpl.class); - @Inject MockVMDao _mockVmDao = null; - @Inject MockAgentManager _mockAgentMgr = null; - @Inject MockHostDao _mockHostDao = null; - @Inject MockSecurityRulesDao _mockSecurityDao = null; - private Map>> _securityRules = new ConcurrentHashMap>>(); + @Inject MockVMDao _mockVmDao = null; + @Inject MockAgentManager _mockAgentMgr = null; + @Inject MockHostDao _mockHostDao = null; + @Inject MockSecurityRulesDao _mockSecurityDao = null; + private final Map>> _securityRules = new ConcurrentHashMap>>(); - public MockVmManagerImpl() { - } + public MockVmManagerImpl() { + } - @Override + @Override public boolean configure(String name, Map params) throws ConfigurationException { - return true; - } + return true; + } public String startVM(String vmName, NicTO[] nics, - int cpuHz, long ramSize, - String bootArgs, String hostGuid) { + int cpuHz, long ramSize, + String bootArgs, String hostGuid) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - MockHost host = null; - MockVm vm = null; - try { - txn.start(); - host = _mockHostDao.findByGuid(hostGuid); - if (host == null) { - return "can't find host"; - } + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + MockHost host = null; + MockVm vm = null; + try { + txn.start(); + host = _mockHostDao.findByGuid(hostGuid); + if (host == null) { + return "can't find host"; + } - vm = _mockVmDao.findByVmName(vmName); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Unable to start VM " + vmName, ex); - } finally { - txn.close(); + vm = _mockVmDao.findByVmName(vmName); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Unable to start VM " + vmName, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } if(vm == null) { int vncPort = 0; @@ -109,43 +144,43 @@ public class MockVmManagerImpl implements MockVmManager { vm.setHostId(host.getId()); vm.setBootargs(bootArgs); if(vmName.startsWith("s-")) { - vm.setType("SecondaryStorageVm"); + vm.setType("SecondaryStorageVm"); } else if (vmName.startsWith("v-")) { - vm.setType("ConsoleProxy"); + vm.setType("ConsoleProxy"); } else if (vmName.startsWith("r-")) { - vm.setType("DomainRouter"); + vm.setType("DomainRouter"); } else if (vmName.startsWith("i-")) { - vm.setType("User"); + vm.setType("User"); } txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - vm = _mockVmDao.persist((MockVMVO) vm); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("unable to save vm to db " + vm.getName(), ex); - } finally { - txn.close(); + try { + txn.start(); + vm = _mockVmDao.persist((MockVMVO) vm); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("unable to save vm to db " + vm.getName(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } } else { if(vm.getState() == State.Stopped) { vm.setState(State.Running); txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - _mockVmDao.update(vm.getId(), (MockVMVO)vm); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("unable to update vm " + vm.getName(), ex); - } finally { - txn.close(); + try { + txn.start(); + _mockVmDao.update(vm.getId(), (MockVMVO)vm); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("unable to update vm " + vm.getName(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } + } } } @@ -192,49 +227,49 @@ public class MockVmManagerImpl implements MockVmManager { return null; } - public boolean rebootVM(String vmName) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - MockVm vm = _mockVmDao.findByVmName(vmName); - if (vm != null) { - vm.setState(State.Running); - _mockVmDao.update(vm.getId(), (MockVMVO) vm); + public boolean rebootVM(String vmName) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + MockVm vm = _mockVmDao.findByVmName(vmName); + if (vm != null) { + vm.setState(State.Running); + _mockVmDao.update(vm.getId(), (MockVMVO) vm); - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("unable to reboot vm " + vmName, ex); - } finally { - txn.close(); + } + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("unable to reboot vm " + vmName, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - return true; - } + } + return true; + } - @Override - public Map getVms(String hostGuid) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - List vms = _mockVmDao.findByHostGuid(hostGuid); - Map vmMap = new HashMap(); - for (MockVMVO vm : vms) { - vmMap.put(vm.getName(), vm); - } - txn.commit(); - return vmMap; - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("unable to fetch vms from host " + hostGuid, ex); - } finally { - txn.close(); + @Override + public Map getVms(String hostGuid) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + List vms = _mockVmDao.findByHostGuid(hostGuid); + Map vmMap = new HashMap(); + for (MockVMVO vm : vms) { + vmMap.put(vm.getName(), vm); + } + txn.commit(); + return vmMap; + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("unable to fetch vms from host " + hostGuid, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } + } @Override public CheckRouterAnswer checkRouter(CheckRouterCommand cmd) { @@ -267,30 +302,30 @@ public class MockVmManagerImpl implements MockVmManager { } @Override - public Map getVmStates(String hostGuid) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - Map states = new HashMap(); - List vms = _mockVmDao.findByHostGuid(hostGuid); - if (vms.isEmpty()) { - txn.commit(); - return states; - } - for (MockVm vm : vms) { - states.put(vm.getName(), vm.getState()); - } - txn.commit(); - return states; - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("unable to fetch vms from host " + hostGuid, ex); - } finally { - txn.close(); + public Map getVmStates(String hostGuid) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + Map states = new HashMap(); + List vms = _mockVmDao.findByHostGuid(hostGuid); + if (vms.isEmpty()) { + txn.commit(); + return states; + } + for (MockVm vm : vms) { + states.put(vm.getName(), vm.getState()); + } + txn.commit(); + return states; + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("unable to fetch vms from host " + hostGuid, ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } + } @Override public boolean start() { @@ -323,26 +358,26 @@ public class MockVmManagerImpl implements MockVmManager { } @Override - public CheckVirtualMachineAnswer checkVmState(CheckVirtualMachineCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - MockVMVO vm = _mockVmDao.findByVmName(cmd.getVmName()); - if (vm == null) { - return new CheckVirtualMachineAnswer(cmd, "can't find vm:" + cmd.getVmName()); - } + public CheckVirtualMachineAnswer checkVmState(CheckVirtualMachineCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + MockVMVO vm = _mockVmDao.findByVmName(cmd.getVmName()); + if (vm == null) { + return new CheckVirtualMachineAnswer(cmd, "can't find vm:" + cmd.getVmName()); + } - txn.commit(); - return new CheckVirtualMachineAnswer(cmd, vm.getState(), vm.getVncPort()); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("unable to fetch vm state " + cmd.getVmName(), ex); - } finally { - txn.close(); + txn.commit(); + return new CheckVirtualMachineAnswer(cmd, vm.getState(), vm.getVncPort()); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("unable to fetch vm state " + cmd.getVmName(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } + } @Override public Answer startVM(StartCommand cmd, SimulatorInfo info) { @@ -372,7 +407,7 @@ public class MockVmManagerImpl implements MockVmManager { @Override public Answer SetFirewallRules(SetFirewallRulesCommand cmd) { - return new Answer(cmd); + return new Answer(cmd); } @@ -382,38 +417,38 @@ public class MockVmManagerImpl implements MockVmManager { } @Override - public MigrateAnswer Migrate(MigrateCommand cmd, SimulatorInfo info) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - String vmName = cmd.getVmName(); - String destGuid = cmd.getHostGuid(); - MockVMVO vm = _mockVmDao.findByVmNameAndHost(vmName, info.getHostUuid()); - if (vm == null) { - return new MigrateAnswer(cmd, false, "can't find vm:" + vmName + " on host:" + info.getHostUuid(), null); - } else { + public MigrateAnswer Migrate(MigrateCommand cmd, SimulatorInfo info) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + String vmName = cmd.getVmName(); + String destGuid = cmd.getHostGuid(); + MockVMVO vm = _mockVmDao.findByVmNameAndHost(vmName, info.getHostUuid()); + if (vm == null) { + return new MigrateAnswer(cmd, false, "can't find vm:" + vmName + " on host:" + info.getHostUuid(), null); + } else { if (vm.getState() == State.Migrating) { vm.setState(State.Running); } } - MockHost destHost = _mockHostDao.findByGuid(destGuid); - if (destHost == null) { - return new MigrateAnswer(cmd, false, "can;t find host:" + info.getHostUuid(), null); - } - vm.setHostId(destHost.getId()); - _mockVmDao.update(vm.getId(), vm); - txn.commit(); - return new MigrateAnswer(cmd, true, null, 0); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("unable to migrate vm " + cmd.getVmName(), ex); - } finally { - txn.close(); + MockHost destHost = _mockHostDao.findByGuid(destGuid); + if (destHost == null) { + return new MigrateAnswer(cmd, false, "can;t find host:" + info.getHostUuid(), null); + } + vm.setHostId(destHost.getId()); + _mockVmDao.update(vm.getId(), vm); + txn.commit(); + return new MigrateAnswer(cmd, true, null, 0); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("unable to migrate vm " + cmd.getVmName(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } + } @Override public PrepareForMigrationAnswer prepareForMigrate(PrepareForMigrationCommand cmd) { @@ -457,81 +492,81 @@ public class MockVmManagerImpl implements MockVmManager { } @Override - public Answer CleanupNetworkRules(CleanupNetworkRulesCmd cmd, SimulatorInfo info) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - List rules = _mockSecurityDao.findByHost(info.getHostUuid()); - for (MockSecurityRulesVO rule : rules) { - MockVMVO vm = _mockVmDao.findByVmNameAndHost(rule.getVmName(), info.getHostUuid()); - if (vm == null) { - _mockSecurityDao.remove(rule.getId()); - } - } - txn.commit(); - return new Answer(cmd); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("unable to clean up rules", ex); - } finally { - txn.close(); + public Answer CleanupNetworkRules(CleanupNetworkRulesCmd cmd, SimulatorInfo info) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + List rules = _mockSecurityDao.findByHost(info.getHostUuid()); + for (MockSecurityRulesVO rule : rules) { + MockVMVO vm = _mockVmDao.findByVmNameAndHost(rule.getVmName(), info.getHostUuid()); + if (vm == null) { + _mockSecurityDao.remove(rule.getId()); + } + } + txn.commit(); + return new Answer(cmd); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("unable to clean up rules", ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } + } @Override - public Answer stopVM(StopCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - String vmName = cmd.getVmName(); - MockVm vm = _mockVmDao.findByVmName(vmName); - if (vm != null) { - vm.setState(State.Stopped); - _mockVmDao.update(vm.getId(), (MockVMVO) vm); - } + public Answer stopVM(StopCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + String vmName = cmd.getVmName(); + MockVm vm = _mockVmDao.findByVmName(vmName); + if (vm != null) { + vm.setState(State.Stopped); + _mockVmDao.update(vm.getId(), (MockVMVO) vm); + } - if (vmName.startsWith("s-")) { - _mockAgentMgr.handleSystemVMStop(vm.getId()); - } - txn.commit(); - return new StopAnswer(cmd, null, new Integer(0), true); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("unable to stop vm " + cmd.getVmName(), ex); - } finally { - txn.close(); + if (vmName.startsWith("s-")) { + _mockAgentMgr.handleSystemVMStop(vm.getId()); + } + txn.commit(); + return new StopAnswer(cmd, null, new Integer(0), true); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("unable to stop vm " + cmd.getVmName(), ex); + } finally { + txn.close(); txn = Transaction.open(Transaction.CLOUD_DB); txn.close(); - } - } + } + } @Override - public Answer rebootVM(RebootCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - MockVm vm = _mockVmDao.findByVmName(cmd.getVmName()); - if (vm != null) { - vm.setState(State.Running); - _mockVmDao.update(vm.getId(), (MockVMVO) vm); - } - txn.commit(); - return new RebootAnswer(cmd, "Rebooted " + cmd.getVmName(), true); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("unable to stop vm " + cmd.getVmName(), ex); - } finally { - txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); - txn.close(); - } - } + public Answer rebootVM(RebootCommand cmd) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + MockVm vm = _mockVmDao.findByVmName(cmd.getVmName()); + if (vm != null) { + vm.setState(State.Running); + _mockVmDao.update(vm.getId(), (MockVMVO) vm); + } + txn.commit(); + return new RebootAnswer(cmd, "Rebooted " + cmd.getVmName(), true); + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("unable to stop vm " + cmd.getVmName(), ex); + } finally { + txn.close(); + txn = Transaction.open(Transaction.CLOUD_DB); + txn.close(); + } + } @Override public Answer getVncPort(GetVncPortCommand cmd) { - return new GetVncPortAnswer(cmd, 0); + return new GetVncPortAnswer(cmd, 0); } @Override @@ -546,13 +581,13 @@ public class MockVmManagerImpl implements MockVmManager { @Override public GetDomRVersionAnswer getDomRVersion(GetDomRVersionCmd cmd) { - return new GetDomRVersionAnswer(cmd, null, null, null); + return new GetDomRVersionAnswer(cmd, null, null, null); } @Override public SecurityGroupRuleAnswer AddSecurityGroupRules(SecurityGroupRulesCmd cmd, SimulatorInfo info) { if (!info.isEnabled()) { - return new SecurityGroupRuleAnswer(cmd, false, "Disabled", SecurityGroupRuleAnswer.FailureReason.CANNOT_BRIDGE_FIREWALL); + return new SecurityGroupRuleAnswer(cmd, false, "Disabled", SecurityGroupRuleAnswer.FailureReason.CANNOT_BRIDGE_FIREWALL); } Map> rules = _securityRules.get(info.getHostUuid()); diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java index 2bed2efec6a..b0bc7036e27 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java @@ -16,30 +16,84 @@ // under the License. package com.cloud.agent.manager; -import com.cloud.agent.api.*; +import java.util.HashMap; +import java.util.Map; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.log4j.Logger; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.AttachIsoCommand; +import com.cloud.agent.api.AttachVolumeCommand; +import com.cloud.agent.api.BackupSnapshotCommand; +import com.cloud.agent.api.BumpUpPriorityCommand; +import com.cloud.agent.api.CheckHealthCommand; +import com.cloud.agent.api.CheckNetworkCommand; +import com.cloud.agent.api.CheckRouterCommand; +import com.cloud.agent.api.CheckVirtualMachineCommand; +import com.cloud.agent.api.CleanupNetworkRulesCmd; +import com.cloud.agent.api.ClusterSyncCommand; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.ComputeChecksumCommand; +import com.cloud.agent.api.CreatePrivateTemplateFromSnapshotCommand; +import com.cloud.agent.api.CreatePrivateTemplateFromVolumeCommand; +import com.cloud.agent.api.CreateStoragePoolCommand; +import com.cloud.agent.api.CreateVolumeFromSnapshotCommand; +import com.cloud.agent.api.DeleteSnapshotBackupCommand; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.GetDomRVersionCmd; +import com.cloud.agent.api.GetHostStatsCommand; +import com.cloud.agent.api.GetStorageStatsCommand; +import com.cloud.agent.api.GetVmStatsCommand; +import com.cloud.agent.api.GetVncPortCommand; +import com.cloud.agent.api.MaintainCommand; +import com.cloud.agent.api.ManageSnapshotCommand; +import com.cloud.agent.api.MigrateCommand; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.NetworkUsageCommand; +import com.cloud.agent.api.PingTestCommand; +import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.RebootCommand; +import com.cloud.agent.api.SecStorageSetupCommand; +import com.cloud.agent.api.SecStorageVMSetupCommand; +import com.cloud.agent.api.SecurityGroupRulesCmd; +import com.cloud.agent.api.StartCommand; +import com.cloud.agent.api.StopCommand; +import com.cloud.agent.api.StoragePoolInfo; import com.cloud.agent.api.check.CheckSshCommand; import com.cloud.agent.api.proxy.CheckConsoleProxyLoadCommand; import com.cloud.agent.api.proxy.WatchConsoleProxyLoadCommand; -import com.cloud.agent.api.routing.*; -import com.cloud.agent.api.storage.*; +import com.cloud.agent.api.routing.DhcpEntryCommand; +import com.cloud.agent.api.routing.IpAssocCommand; +import com.cloud.agent.api.routing.LoadBalancerConfigCommand; +import com.cloud.agent.api.routing.SavePasswordCommand; +import com.cloud.agent.api.routing.SetFirewallRulesCommand; +import com.cloud.agent.api.routing.SetPortForwardingRulesCommand; +import com.cloud.agent.api.routing.SetStaticNatRulesCommand; +import com.cloud.agent.api.routing.VmDataCommand; +import com.cloud.agent.api.storage.CopyVolumeCommand; +import com.cloud.agent.api.storage.CreateCommand; +import com.cloud.agent.api.storage.DeleteTemplateCommand; +import com.cloud.agent.api.storage.DestroyCommand; +import com.cloud.agent.api.storage.DownloadCommand; +import com.cloud.agent.api.storage.DownloadProgressCommand; +import com.cloud.agent.api.storage.ListTemplateCommand; +import com.cloud.agent.api.storage.ListVolumeCommand; +import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; import com.cloud.simulator.MockConfigurationVO; import com.cloud.simulator.MockHost; import com.cloud.simulator.MockVMVO; import com.cloud.simulator.dao.MockConfigurationDao; import com.cloud.simulator.dao.MockHostDao; import com.cloud.utils.Pair; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.ConnectionConcierge; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine.State; -import org.apache.log4j.Logger; - -import javax.ejb.Local; -import javax.naming.ConfigurationException; -import java.util.HashMap; -import java.util.Map; @Local(value = { SimulatorManager.class }) public class SimulatorManagerImpl implements SimulatorManager { @@ -57,7 +111,7 @@ public class SimulatorManagerImpl implements SimulatorManager { private ConnectionConcierge _concierge; @Override public boolean configure(String name, Map params) throws ConfigurationException { - /* + /* try { Connection conn = Transaction.getStandaloneSimulatorConnection(); conn.setAutoCommit(true); @@ -65,7 +119,7 @@ public class SimulatorManagerImpl implements SimulatorManager { } catch (SQLException e) { throw new CloudRuntimeException("Unable to get a db connection to simulator", e); } - */ + */ return true; } @@ -146,7 +200,7 @@ public class SimulatorManagerImpl implements SimulatorManager { } else if (cmd instanceof PingTestCommand) { return _mockAgentMgr.pingTest((PingTestCommand) cmd); } else if (cmd instanceof PrepareForMigrationCommand) { - return _mockVmMgr.prepareForMigrate((PrepareForMigrationCommand) cmd); + return _mockVmMgr.prepareForMigrate((PrepareForMigrationCommand) cmd); } else if (cmd instanceof MigrateCommand) { return _mockVmMgr.Migrate((MigrateCommand) cmd, info); } else if (cmd instanceof StartCommand) { @@ -154,11 +208,11 @@ public class SimulatorManagerImpl implements SimulatorManager { } else if (cmd instanceof CheckSshCommand) { return _mockVmMgr.checkSshCommand((CheckSshCommand) cmd); } else if (cmd instanceof CheckVirtualMachineCommand) { - return _mockVmMgr.checkVmState((CheckVirtualMachineCommand) cmd); + return _mockVmMgr.checkVmState((CheckVirtualMachineCommand) cmd); } else if (cmd instanceof SetStaticNatRulesCommand) { return _mockVmMgr.SetStaticNatRules((SetStaticNatRulesCommand) cmd); } else if (cmd instanceof SetFirewallRulesCommand) { - return _mockVmMgr.SetFirewallRules((SetFirewallRulesCommand) cmd); + return _mockVmMgr.SetFirewallRules((SetFirewallRulesCommand) cmd); } else if (cmd instanceof SetPortForwardingRulesCommand) { return _mockVmMgr.SetPortForwardingRules((SetPortForwardingRulesCommand) cmd); } else if (cmd instanceof NetworkUsageCommand) { @@ -174,7 +228,7 @@ public class SimulatorManagerImpl implements SimulatorManager { } else if (cmd instanceof CleanupNetworkRulesCmd) { return _mockVmMgr.CleanupNetworkRules((CleanupNetworkRulesCmd) cmd, info); } else if (cmd instanceof CheckNetworkCommand) { - return _mockAgentMgr.checkNetworkCommand((CheckNetworkCommand) cmd); + return _mockAgentMgr.checkNetworkCommand((CheckNetworkCommand) cmd); }else if (cmd instanceof StopCommand) { return _mockVmMgr.stopVM((StopCommand)cmd); } else if (cmd instanceof RebootCommand) { @@ -244,11 +298,11 @@ public class SimulatorManagerImpl implements SimulatorManager { } else if (cmd instanceof BumpUpPriorityCommand) { return _mockVmMgr.bumpPriority((BumpUpPriorityCommand) cmd); } else if (cmd instanceof GetDomRVersionCmd) { - return _mockVmMgr.getDomRVersion((GetDomRVersionCmd) cmd); + return _mockVmMgr.getDomRVersion((GetDomRVersionCmd) cmd); } else if (cmd instanceof ClusterSyncCommand) { - return new Answer(cmd); + return new Answer(cmd); } else if (cmd instanceof CopyVolumeCommand) { - return _mockStorageMgr.CopyVolume((CopyVolumeCommand) cmd); + return _mockStorageMgr.CopyVolume((CopyVolumeCommand) cmd); } else { return Answer.createUnsupportedCommandAnswer(cmd); } @@ -270,49 +324,49 @@ public class SimulatorManagerImpl implements SimulatorManager { @Override public Map getVmStates(String hostGuid) { - return _mockVmMgr.getVmStates(hostGuid); + return _mockVmMgr.getVmStates(hostGuid); } @Override public Map getVms(String hostGuid) { - return _mockVmMgr.getVms(hostGuid); + return _mockVmMgr.getVms(hostGuid); } @Override public HashMap> syncNetworkGroups(String hostGuid) { - SimulatorInfo info = new SimulatorInfo(); - info.setHostUuid(hostGuid); - return _mockVmMgr.syncNetworkGroups(info); + SimulatorInfo info = new SimulatorInfo(); + info.setHostUuid(hostGuid); + return _mockVmMgr.syncNetworkGroups(info); } @Override - public boolean configureSimulator(Long zoneId, Long podId, Long clusterId, Long hostId, String command, - String values) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - MockConfigurationVO config = _mockConfigDao.findByCommand(zoneId, podId, clusterId, hostId, command); - if (config == null) { - config = new MockConfigurationVO(); - config.setClusterId(clusterId); - config.setDataCenterId(zoneId); - config.setPodId(podId); - config.setHostId(hostId); - config.setName(command); - config.setValues(values); - _mockConfigDao.persist(config); - txn.commit(); - } else { - config.setValues(values); - _mockConfigDao.update(config.getId(), config); - txn.commit(); - } - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("Unable to configure simulator because of " + ex.getMessage(), ex); - } finally { - txn.close(); - } - return true; - } + public boolean configureSimulator(Long zoneId, Long podId, Long clusterId, Long hostId, String command, + String values) { + Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + try { + txn.start(); + MockConfigurationVO config = _mockConfigDao.findByCommand(zoneId, podId, clusterId, hostId, command); + if (config == null) { + config = new MockConfigurationVO(); + config.setClusterId(clusterId); + config.setDataCenterId(zoneId); + config.setPodId(podId); + config.setHostId(hostId); + config.setName(command); + config.setValues(values); + _mockConfigDao.persist(config); + txn.commit(); + } else { + config.setValues(values); + _mockConfigDao.update(config.getId(), config); + txn.commit(); + } + } catch (Exception ex) { + txn.rollback(); + throw new CloudRuntimeException("Unable to configure simulator because of " + ex.getMessage(), ex); + } finally { + txn.close(); + } + return true; + } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/api/commands/ConfigureSimulator.java b/plugins/hypervisors/simulator/src/com/cloud/api/commands/ConfigureSimulator.java index df81249538d..205484dadd7 100755 --- a/plugins/hypervisors/simulator/src/com/cloud/api/commands/ConfigureSimulator.java +++ b/plugins/hypervisors/simulator/src/com/cloud/api/commands/ConfigureSimulator.java @@ -16,28 +16,31 @@ // under the License. package com.cloud.api.commands; -import org.apache.log4j.Logger; +import javax.inject.Inject; -import com.cloud.agent.manager.SimulatorManager; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; -import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.log4j.Logger; + +import com.cloud.agent.manager.SimulatorManager; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.server.ManagementService; import com.cloud.user.Account; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "configureSimulator", description="configure simulator", responseObject=SuccessResponse.class) public class ConfigureSimulator extends BaseCmd { public static final Logger s_logger = Logger.getLogger(ConfigureSimulator.class.getName()); private static final String s_name = "configuresimulatorresponse"; + @Inject SimulatorManager _simMgr; + @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.LONG, description="configure range: in a zone") private Long zoneId; @@ -58,8 +61,6 @@ public class ConfigureSimulator extends BaseCmd { @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException { - ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name); - SimulatorManager _simMgr = locator.getManager(SimulatorManager.class); boolean result = _simMgr.configureSimulator(zoneId, podId, clusterId, hostId, command, values); if (!result) { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to configure simulator"); diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/AgentResourceBase.java b/plugins/hypervisors/simulator/src/com/cloud/resource/AgentResourceBase.java index 808ca070d4d..de3bfd9139a 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/AgentResourceBase.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/AgentResourceBase.java @@ -26,6 +26,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; @@ -43,242 +44,240 @@ import com.cloud.agent.manager.SimulatorManager.AgentType; import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.simulator.MockHost; -import com.cloud.utils.component.ComponentLocator; + public class AgentResourceBase implements ServerResource { - private static final Logger s_logger = Logger - .getLogger(AgentResourceBase.class); + private static final Logger s_logger = Logger.getLogger(AgentResourceBase.class); - protected String _name; - private List _warnings = new LinkedList(); - private List _errors = new LinkedList(); + protected String _name; + private List _warnings = new LinkedList(); + private List _errors = new LinkedList(); - private transient IAgentControl _agentControl; + private transient IAgentControl _agentControl; - protected long _instanceId; + protected long _instanceId; - private Type _type; + private Type _type; - private transient ComponentLocator _locator = null; - protected transient SimulatorManager _simMgr; - protected MockHost agentHost = null; - protected boolean stopped = false; - protected String hostGuid = null; + @Inject protected SimulatorManager _simMgr; + protected MockHost agentHost = null; + protected boolean stopped = false; + protected String hostGuid = null; - public AgentResourceBase(long instanceId, AgentType agentType, SimulatorManager simMgr, String hostGuid) { - _instanceId = instanceId; + public AgentResourceBase(long instanceId, AgentType agentType, SimulatorManager simMgr, String hostGuid) { + _instanceId = instanceId; - if(s_logger.isDebugEnabled()) { - s_logger.info("New Routing host instantiated with guid:" + hostGuid); - } + if(s_logger.isDebugEnabled()) { + s_logger.info("New Routing host instantiated with guid:" + hostGuid); + } - if (agentType == AgentType.Routing) { - _type = Host.Type.Routing; - } else { - _type = Host.Type.Storage; - } + if (agentType == AgentType.Routing) { + _type = Host.Type.Routing; + } else { + _type = Host.Type.Storage; + } - this.hostGuid = hostGuid; - } + this.hostGuid = hostGuid; + } - protected MockVmManager getVmMgr() { - return _simMgr.getVmMgr(); - } + protected MockVmManager getVmMgr() { + return _simMgr.getVmMgr(); + } - protected MockStorageManager getStorageMgr() { - return _simMgr.getStorageMgr(); - } + protected MockStorageManager getStorageMgr() { + return _simMgr.getStorageMgr(); + } - protected MockAgentManager getAgentMgr() { - return _simMgr.getAgentMgr(); - } + protected MockAgentManager getAgentMgr() { + return _simMgr.getAgentMgr(); + } - protected long getInstanceId() { - return _instanceId; - } + protected long getInstanceId() { + return _instanceId; + } - public AgentResourceBase() { - if(s_logger.isDebugEnabled()) { - s_logger.debug("Deserializing simulated agent on reconnect"); - } + public AgentResourceBase() { + if(s_logger.isDebugEnabled()) { + s_logger.debug("Deserializing simulated agent on reconnect"); + } - } + } - @Override - public String getName() { - return _name; - } + @Override + public String getName() { + return _name; + } - public void setName(String name) { - _name = name; - } + public void setName(String name) { + _name = name; + } - @Override - public boolean configure(String name, Map params) - throws ConfigurationException { - hostGuid = (String)params.get("guid"); - _locator = ComponentLocator.getLocator("management-server"); + @Override + public boolean configure(String name, Map params) + throws ConfigurationException { + hostGuid = (String)params.get("guid"); + _locator = ComponentLocator.getLocator("management-server"); _simMgr = _locator.getManager(SimulatorManager.class); - agentHost = getAgentMgr().getHost(hostGuid); - return true; - } + agentHost = getAgentMgr().getHost(hostGuid); + return true; + } - private void reconnect(MockHost host) { - if(s_logger.isDebugEnabled()) { - s_logger.debug("Reconfiguring existing simulated host w/ name: " + host.getName() + " and guid: " + host.getGuid()); - } - this.agentHost = host; - } + private void reconnect(MockHost host) { + if(s_logger.isDebugEnabled()) { + s_logger.debug("Reconfiguring existing simulated host w/ name: " + host.getName() + " and guid: " + host.getGuid()); + } + this.agentHost = host; + } - @Override - public void disconnected() { - this.stopped = true; - } + @Override + public void disconnected() { + this.stopped = true; + } - protected void recordWarning(String msg, Throwable th) { - String str = getLogStr(msg, th); - synchronized (_warnings) { - _warnings.add(str); - } - } + protected void recordWarning(String msg, Throwable th) { + String str = getLogStr(msg, th); + synchronized (_warnings) { + _warnings.add(str); + } + } - protected void recordWarning(String msg) { - recordWarning(msg, null); - } + protected void recordWarning(String msg) { + recordWarning(msg, null); + } - protected List getWarnings() { - synchronized (this) { - List results = _warnings; - _warnings = new ArrayList(); - return results; - } - } + protected List getWarnings() { + synchronized (this) { + List results = _warnings; + _warnings = new ArrayList(); + return results; + } + } - protected List getErrors() { - synchronized (this) { - List result = _errors; - _errors = new ArrayList(); - return result; - } - } + protected List getErrors() { + synchronized (this) { + List result = _errors; + _errors = new ArrayList(); + return result; + } + } - protected void recordError(String msg, Throwable th) { - String str = getLogStr(msg, th); - synchronized (_errors) { - _errors.add(str); - } - } + protected void recordError(String msg, Throwable th) { + String str = getLogStr(msg, th); + synchronized (_errors) { + _errors.add(str); + } + } - protected void recordError(String msg) { - recordError(msg, null); - } + protected void recordError(String msg) { + recordError(msg, null); + } - protected Answer createErrorAnswer(Command cmd, String msg, Throwable th) { - StringWriter writer = new StringWriter(); - if (msg != null) { - writer.append(msg); - } - writer.append("===>Stack<==="); - th.printStackTrace(new PrintWriter(writer)); - return new Answer(cmd, false, writer.toString()); - } + protected Answer createErrorAnswer(Command cmd, String msg, Throwable th) { + StringWriter writer = new StringWriter(); + if (msg != null) { + writer.append(msg); + } + writer.append("===>Stack<==="); + th.printStackTrace(new PrintWriter(writer)); + return new Answer(cmd, false, writer.toString()); + } - protected String createErrorDetail(String msg, Throwable th) { - StringWriter writer = new StringWriter(); - if (msg != null) { - writer.append(msg); - } - writer.append("===>Stack<==="); - th.printStackTrace(new PrintWriter(writer)); - return writer.toString(); - } + protected String createErrorDetail(String msg, Throwable th) { + StringWriter writer = new StringWriter(); + if (msg != null) { + writer.append(msg); + } + writer.append("===>Stack<==="); + th.printStackTrace(new PrintWriter(writer)); + return writer.toString(); + } - protected String getLogStr(String msg, Throwable th) { - StringWriter writer = new StringWriter(); - writer.append(new Date().toString()).append(": ").append(msg); - if (th != null) { - writer.append("\n Exception: "); - th.printStackTrace(new PrintWriter(writer)); - } - return writer.toString(); - } + protected String getLogStr(String msg, Throwable th) { + StringWriter writer = new StringWriter(); + writer.append(new Date().toString()).append(": ").append(msg); + if (th != null) { + writer.append("\n Exception: "); + th.printStackTrace(new PrintWriter(writer)); + } + return writer.toString(); + } - @Override - public boolean start() { - return true; - } + @Override + public boolean start() { + return true; + } - @Override - public boolean stop() { - this.stopped = true; - return true; - } + @Override + public boolean stop() { + this.stopped = true; + return true; + } - @Override - public IAgentControl getAgentControl() { - return _agentControl; - } + @Override + public IAgentControl getAgentControl() { + return _agentControl; + } - @Override - public void setAgentControl(IAgentControl agentControl) { - _agentControl = agentControl; - } + @Override + public void setAgentControl(IAgentControl agentControl) { + _agentControl = agentControl; + } - protected String findScript(String script) { - s_logger.debug("Looking for " + script + " in the classpath"); - URL url = ClassLoader.getSystemResource(script); - File file = null; - if (url == null) { - file = new File("./" + script); - s_logger.debug("Looking for " + script + " in " - + file.getAbsolutePath()); - if (!file.exists()) { - return null; - } - } else { - file = new File(url.getFile()); - } - return file.getAbsolutePath(); - } + protected String findScript(String script) { + s_logger.debug("Looking for " + script + " in the classpath"); + URL url = ClassLoader.getSystemResource(script); + File file = null; + if (url == null) { + file = new File("./" + script); + s_logger.debug("Looking for " + script + " in " + + file.getAbsolutePath()); + if (!file.exists()) { + return null; + } + } else { + file = new File(url.getFile()); + } + return file.getAbsolutePath(); + } - @Override - public Answer executeRequest(Command cmd) { - return null; - } + @Override + public Answer executeRequest(Command cmd) { + return null; + } - @Override - public PingCommand getCurrentStatus(long id) { - return null; - } + @Override + public PingCommand getCurrentStatus(long id) { + return null; + } - @Override - public Type getType() { - return _type; - } + @Override + public Type getType() { + return _type; + } - public void setType(Host.Type _type) { - this._type = _type; - } + public void setType(Host.Type _type) { + this._type = _type; + } - @Override - public StartupCommand[] initialize() { - return null; - } + @Override + public StartupCommand[] initialize() { + return null; + } - public SimulatorManager getSimulatorManager() { - return _simMgr; - } + public SimulatorManager getSimulatorManager() { + return _simMgr; + } - public void setSimulatorManager(SimulatorManager simMgr) { - _simMgr = simMgr; - } + public void setSimulatorManager(SimulatorManager simMgr) { + _simMgr = simMgr; + } - public boolean isStopped() { - return this.stopped; - } + public boolean isStopped() { + return this.stopped; + } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java index b6d40d49589..5cb094184ba 100755 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java @@ -25,6 +25,7 @@ import java.util.Map; import java.util.UUID; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; @@ -43,26 +44,24 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.exception.ConnectionException; import com.cloud.exception.DiscoveryException; -import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.storage.dao.VMTemplateZoneDao; -import com.cloud.utils.component.Inject; + @Local(value = Discoverer.class) public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { - private static final Logger s_logger = Logger - .getLogger(SimulatorDiscoverer.class); + private static final Logger s_logger = Logger + .getLogger(SimulatorDiscoverer.class); - @Inject HostDao _hostDao; - @Inject VMTemplateDao _vmTemplateDao; + @Inject HostDao _hostDao; + @Inject VMTemplateDao _vmTemplateDao; @Inject VMTemplateHostDao _vmTemplateHostDao; @Inject VMTemplateZoneDao _vmTemplateZoneDao; @Inject ClusterDao _clusterDao; @@ -71,166 +70,166 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L @Inject MockStorageManager _mockStorageMgr = null; @Inject ResourceManager _resourceMgr; - /** - * Finds ServerResources of an in-process simulator - * - * @see com.cloud.resource.Discoverer#find(long, java.lang.Long, - * java.lang.Long, java.net.URI, java.lang.String, java.lang.String) - */ - @Override - public Map> find(long dcId, - Long podId, Long clusterId, URI uri, String username, - String password, List hostTags) throws DiscoveryException { - Map> resources; + /** + * Finds ServerResources of an in-process simulator + * + * @see com.cloud.resource.Discoverer#find(long, java.lang.Long, + * java.lang.Long, java.net.URI, java.lang.String, java.lang.String) + */ + @Override + public Map> find(long dcId, + Long podId, Long clusterId, URI uri, String username, + String password, List hostTags) throws DiscoveryException { + Map> resources; - try { - //http://sim/count=$count, it will add $count number of hosts into the cluster - String scheme = uri.getScheme(); - String host = uri.getAuthority(); - String commands = URLDecoder.decode(uri.getPath()); + try { + //http://sim/count=$count, it will add $count number of hosts into the cluster + String scheme = uri.getScheme(); + String host = uri.getAuthority(); + String commands = URLDecoder.decode(uri.getPath()); - long cpuSpeed = _mockAgentMgr.DEFAULT_HOST_SPEED_MHZ; - long cpuCores = _mockAgentMgr.DEFAULT_HOST_CPU_CORES; - long memory = _mockAgentMgr.DEFAULT_HOST_MEM_SIZE; - long localstorageSize = _mockStorageMgr.DEFAULT_HOST_STORAGE_SIZE; - if (scheme.equals("http")) { - if (host == null || !host.startsWith("sim")) { - String msg = "uri is not of simulator type so we're not taking care of the discovery for this: " - + uri; - if(s_logger.isDebugEnabled()) { - s_logger.debug(msg); - } - return null; - } - if (commands != null) { - int index = commands.lastIndexOf("/"); - if (index != -1) { - commands = commands.substring(index+1); + long cpuSpeed = _mockAgentMgr.DEFAULT_HOST_SPEED_MHZ; + long cpuCores = _mockAgentMgr.DEFAULT_HOST_CPU_CORES; + long memory = _mockAgentMgr.DEFAULT_HOST_MEM_SIZE; + long localstorageSize = _mockStorageMgr.DEFAULT_HOST_STORAGE_SIZE; + if (scheme.equals("http")) { + if (host == null || !host.startsWith("sim")) { + String msg = "uri is not of simulator type so we're not taking care of the discovery for this: " + + uri; + if(s_logger.isDebugEnabled()) { + s_logger.debug(msg); + } + return null; + } + if (commands != null) { + int index = commands.lastIndexOf("/"); + if (index != -1) { + commands = commands.substring(index+1); - String[] cmds = commands.split("&"); - for (String cmd : cmds) { - String[] parameter = cmd.split("="); - if (parameter[0].equalsIgnoreCase("cpuspeed") && parameter[1] != null) { - cpuSpeed = Long.parseLong(parameter[1]); - } else if (parameter[0].equalsIgnoreCase("cpucore") && parameter[1] != null) { - cpuCores = Long.parseLong(parameter[1]); - } else if (parameter[0].equalsIgnoreCase("memory") && parameter[1] != null) { - memory = Long.parseLong(parameter[1]); - } else if (parameter[0].equalsIgnoreCase("localstorage") && parameter[1] != null) { - localstorageSize = Long.parseLong(parameter[1]); - } - } - } - } - } else { - String msg = "uriString is not http so we're not taking care of the discovery for this: " - + uri; - if(s_logger.isDebugEnabled()) { - s_logger.debug(msg); - } - return null; - } + String[] cmds = commands.split("&"); + for (String cmd : cmds) { + String[] parameter = cmd.split("="); + if (parameter[0].equalsIgnoreCase("cpuspeed") && parameter[1] != null) { + cpuSpeed = Long.parseLong(parameter[1]); + } else if (parameter[0].equalsIgnoreCase("cpucore") && parameter[1] != null) { + cpuCores = Long.parseLong(parameter[1]); + } else if (parameter[0].equalsIgnoreCase("memory") && parameter[1] != null) { + memory = Long.parseLong(parameter[1]); + } else if (parameter[0].equalsIgnoreCase("localstorage") && parameter[1] != null) { + localstorageSize = Long.parseLong(parameter[1]); + } + } + } + } + } else { + String msg = "uriString is not http so we're not taking care of the discovery for this: " + + uri; + if(s_logger.isDebugEnabled()) { + s_logger.debug(msg); + } + return null; + } - String cluster = null; - if (clusterId == null) { - String msg = "must specify cluster Id when adding host"; - if(s_logger.isDebugEnabled()) { - s_logger.debug(msg); - } - throw new RuntimeException(msg); - } else { - ClusterVO clu = _clusterDao.findById(clusterId); - if (clu == null - || (clu.getHypervisorType() != HypervisorType.Simulator)) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for Simulator hypervisors"); - return null; - } - cluster = Long.toString(clusterId); - if(clu.getGuid() == null) { - clu.setGuid(UUID.randomUUID().toString()); - } - _clusterDao.update(clusterId, clu); - } + String cluster = null; + if (clusterId == null) { + String msg = "must specify cluster Id when adding host"; + if(s_logger.isDebugEnabled()) { + s_logger.debug(msg); + } + throw new RuntimeException(msg); + } else { + ClusterVO clu = _clusterDao.findById(clusterId); + if (clu == null + || (clu.getHypervisorType() != HypervisorType.Simulator)) { + if (s_logger.isInfoEnabled()) + s_logger.info("invalid cluster id or cluster is not for Simulator hypervisors"); + return null; + } + cluster = Long.toString(clusterId); + if(clu.getGuid() == null) { + clu.setGuid(UUID.randomUUID().toString()); + } + _clusterDao.update(clusterId, clu); + } - String pod; - if (podId == null) { - String msg = "must specify pod Id when adding host"; - if(s_logger.isDebugEnabled()) { - s_logger.debug(msg); - } - throw new RuntimeException(msg); - } else { - pod = Long.toString(podId); - } + String pod; + if (podId == null) { + String msg = "must specify pod Id when adding host"; + if(s_logger.isDebugEnabled()) { + s_logger.debug(msg); + } + throw new RuntimeException(msg); + } else { + pod = Long.toString(podId); + } - Map details = new HashMap(); - Map params = new HashMap(); - details.put("username", username); - params.put("username", username); - details.put("password", password); - params.put("password", password); - params.put("zone", Long.toString(dcId)); - params.put("pod", pod); - params.put("cluster", cluster); - params.put("cpuspeed", Long.toString(cpuSpeed)); - params.put("cpucore", Long.toString(cpuCores)); - params.put("memory", Long.toString(memory)); - params.put("localstorage", Long.toString(localstorageSize)); + Map details = new HashMap(); + Map params = new HashMap(); + details.put("username", username); + params.put("username", username); + details.put("password", password); + params.put("password", password); + params.put("zone", Long.toString(dcId)); + params.put("pod", pod); + params.put("cluster", cluster); + params.put("cpuspeed", Long.toString(cpuSpeed)); + params.put("cpucore", Long.toString(cpuCores)); + params.put("memory", Long.toString(memory)); + params.put("localstorage", Long.toString(localstorageSize)); - resources = createAgentResources(params); - return resources; - } catch (Exception ex) { - s_logger.error("Exception when discovering simulator hosts: " - + ex.getMessage()); - } - return null; - } - - private Map> createAgentResources( - Map params) { - try { - s_logger.info("Creating Simulator Resources"); - return _mockAgentMgr.createServerResources(params); - } catch (Exception ex) { - s_logger.warn("Caught exception at agent resource creation: " - + ex.getMessage(), ex); - } - return null; - } - - @Override - public void postDiscovery(List hosts, long msId) { - - for (HostVO h : hosts) { - associateTemplatesToZone(h.getId(), h.getDataCenterId()); - } - } - - private void associateTemplatesToZone(long hostId, long dcId){ - VMTemplateZoneVO tmpltZone; - - List allTemplates = _vmTemplateDao.listAll(); - for (VMTemplateVO vt: allTemplates){ - if (vt.isCrossZones()) { - tmpltZone = _vmTemplateZoneDao.findByZoneTemplate(dcId, vt.getId()); - if (tmpltZone == null) { - VMTemplateZoneVO vmTemplateZone = new VMTemplateZoneVO(dcId, vt.getId(), new Date()); - _vmTemplateZoneDao.persist(vmTemplateZone); - } - } - } + resources = createAgentResources(params); + return resources; + } catch (Exception ex) { + s_logger.error("Exception when discovering simulator hosts: " + + ex.getMessage()); + } + return null; } - @Override - public HypervisorType getHypervisorType() { - return HypervisorType.Simulator; - } + private Map> createAgentResources( + Map params) { + try { + s_logger.info("Creating Simulator Resources"); + return _mockAgentMgr.createServerResources(params); + } catch (Exception ex) { + s_logger.warn("Caught exception at agent resource creation: " + + ex.getMessage(), ex); + } + return null; + } - @Override - public boolean matchHypervisor(String hypervisor) { - return hypervisor.equalsIgnoreCase(HypervisorType.Simulator.toString()); - } + @Override + public void postDiscovery(List hosts, long msId) { + + for (HostVO h : hosts) { + associateTemplatesToZone(h.getId(), h.getDataCenterId()); + } + } + + private void associateTemplatesToZone(long hostId, long dcId){ + VMTemplateZoneVO tmpltZone; + + List allTemplates = _vmTemplateDao.listAll(); + for (VMTemplateVO vt: allTemplates){ + if (vt.isCrossZones()) { + tmpltZone = _vmTemplateZoneDao.findByZoneTemplate(dcId, vt.getId()); + if (tmpltZone == null) { + VMTemplateZoneVO vmTemplateZone = new VMTemplateZoneVO(dcId, vt.getId(), new Date()); + _vmTemplateZoneDao.persist(vmTemplateZone); + } + } + } + } + + @Override + public HypervisorType getHypervisorType() { + return HypervisorType.Simulator; + } + + @Override + public boolean matchHypervisor(String hypervisor) { + return hypervisor.equalsIgnoreCase(HypervisorType.Simulator.toString()); + } @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -298,38 +297,38 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L return false; } - @Override - public HostVO createHostVOForConnectedAgent(HostVO host, - StartupCommand[] cmd) { - return null; - } + @Override + public HostVO createHostVOForConnectedAgent(HostVO host, + StartupCommand[] cmd) { + return null; + } - @Override - public HostVO createHostVOForDirectConnectAgent(HostVO host, - StartupCommand[] startup, ServerResource resource, - Map details, List hostTags) { - StartupCommand firstCmd = startup[0]; - if (!(firstCmd instanceof StartupRoutingCommand)) { - return null; - } + @Override + public HostVO createHostVOForDirectConnectAgent(HostVO host, + StartupCommand[] startup, ServerResource resource, + Map details, List hostTags) { + StartupCommand firstCmd = startup[0]; + if (!(firstCmd instanceof StartupRoutingCommand)) { + return null; + } - StartupRoutingCommand ssCmd = ((StartupRoutingCommand) firstCmd); - if (ssCmd.getHypervisorType() != HypervisorType.Simulator) { - return null; - } + StartupRoutingCommand ssCmd = ((StartupRoutingCommand) firstCmd); + if (ssCmd.getHypervisorType() != HypervisorType.Simulator) { + return null; + } - return _resourceMgr.fillRoutingHostVO(host, ssCmd, HypervisorType.Simulator, details, hostTags); - } + return _resourceMgr.fillRoutingHostVO(host, ssCmd, HypervisorType.Simulator, details, hostTags); + } - @Override - public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, - boolean isForceDeleteStorage) throws UnableDeleteHostException { - return null; - } + @Override + public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, + boolean isForceDeleteStorage) throws UnableDeleteHostException { + return null; + } @Override public boolean stop() { - _resourceMgr.unregisterResourceStateAdapter(this.getClass().getSimpleName()); + _resourceMgr.unregisterResourceStateAdapter(this.getClass().getSimpleName()); return super.stop(); } diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java index 3f7cea5b6b1..cd0cd2725c9 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java @@ -21,8 +21,11 @@ import java.util.List; import java.util.Map; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.log4j.Logger; + import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -38,9 +41,7 @@ import com.cloud.host.Status; import com.cloud.storage.SnapshotVO; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.secondary.SecondaryStorageDiscoverer; -import com.cloud.utils.component.Inject; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; @Local(value=Discoverer.class) public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer implements ResourceStateAdapter, Listener { @@ -52,7 +53,7 @@ public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer imp @Override public boolean configure(String name, Map params) throws ConfigurationException { - _agentMgr.registerForHostEvents(this, true, false, false); + _agentMgr.registerForHostEvents(this, true, false, false); _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); return super.configure(name, params); } @@ -88,40 +89,40 @@ public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer imp } } - @Override - public HostVO createHostVOForConnectedAgent(HostVO host, - StartupCommand[] cmd) { - return null; - } + @Override + public HostVO createHostVOForConnectedAgent(HostVO host, + StartupCommand[] cmd) { + return null; + } - @Override - public HostVO createHostVOForDirectConnectAgent(HostVO host, - StartupCommand[] startup, ServerResource resource, - Map details, List hostTags) { - //for detecting SSVM dispatch - StartupCommand firstCmd = startup[0]; - if (!(firstCmd instanceof StartupSecondaryStorageCommand)) { - return null; - } + @Override + public HostVO createHostVOForDirectConnectAgent(HostVO host, + StartupCommand[] startup, ServerResource resource, + Map details, List hostTags) { + //for detecting SSVM dispatch + StartupCommand firstCmd = startup[0]; + if (!(firstCmd instanceof StartupSecondaryStorageCommand)) { + return null; + } - host.setType(com.cloud.host.Host.Type.SecondaryStorageVM); - return host; - } + host.setType(com.cloud.host.Host.Type.SecondaryStorageVM); + return host; + } - @Override - public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, - boolean isForceDeleteStorage) throws UnableDeleteHostException { - long hostId = host.getId(); - List snapshots = _snapshotDao.listByHostId(hostId); - if (snapshots != null && !snapshots.isEmpty()) { - throw new CloudRuntimeException("Cannot delete this secondary storage because there are still snapshots on it "); - } - _vmTemplateHostDao.deleteByHost(hostId); - host.setGuid(null); - _hostDao.update(hostId, host); - _hostDao.remove(hostId); - return new DeleteHostAnswer(true); - } + @Override + public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, + boolean isForceDeleteStorage) throws UnableDeleteHostException { + long hostId = host.getId(); + List snapshots = _snapshotDao.listByHostId(hostId); + if (snapshots != null && !snapshots.isEmpty()) { + throw new CloudRuntimeException("Cannot delete this secondary storage because there are still snapshots on it "); + } + _vmTemplateHostDao.deleteByHost(hostId); + host.setGuid(null); + _hostDao.update(hostId, host); + _hostDao.remove(hostId); + return new DeleteHostAnswer(true); + } @Override public boolean start() { @@ -130,49 +131,49 @@ public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer imp @Override public boolean stop() { - _resourceMgr.unregisterResourceStateAdapter(this.getClass().getSimpleName()); + _resourceMgr.unregisterResourceStateAdapter(this.getClass().getSimpleName()); return true; } - @Override - public int getTimeout() { - return 0; - } + @Override + public int getTimeout() { + return 0; + } - @Override - public boolean isRecurring() { - return false; - } + @Override + public boolean isRecurring() { + return false; + } - @Override - public boolean processAnswers(long agentId, long seq, Answer[] answers) { - return false; - } + @Override + public boolean processAnswers(long agentId, long seq, Answer[] answers) { + return false; + } - @Override - public boolean processCommands(long agentId, long seq, Command[] commands) { - return false; - } + @Override + public boolean processCommands(long agentId, long seq, Command[] commands) { + return false; + } - @Override - public void processConnect(HostVO host, StartupCommand cmd, - boolean forRebalance) throws ConnectionException { + @Override + public void processConnect(HostVO host, StartupCommand cmd, + boolean forRebalance) throws ConnectionException { - } + } - @Override - public AgentControlAnswer processControlCommand(long agentId, - AgentControlCommand cmd) { - return null; - } + @Override + public AgentControlAnswer processControlCommand(long agentId, + AgentControlCommand cmd) { + return null; + } - @Override - public boolean processDisconnect(long agentId, Status state) { - return false; - } + @Override + public boolean processDisconnect(long agentId, Status state) { + return false; + } - @Override - public boolean processTimeout(long agentId, long seq) { - return false; - } + @Override + public boolean processTimeout(long agentId, long seq) { + return false; + } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/server/ManagementServerSimulatorImpl.java b/plugins/hypervisors/simulator/src/com/cloud/server/ManagementServerSimulatorImpl.java index ad42c23380e..44ab26a020a 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/server/ManagementServerSimulatorImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/server/ManagementServerSimulatorImpl.java @@ -17,16 +17,16 @@ package com.cloud.server; +import com.cloud.utils.PropertiesUtil; + +import java.util.Map; + public class ManagementServerSimulatorImpl extends ManagementServerExtImpl { @Override - public String[] getPropertiesFiles() { - String[] apis = super.getPropertiesFiles(); - String[] newapis = new String[apis.length + 1]; - for (int i = 0; i < apis.length; i++) { - newapis[i] = apis[i]; - } - - newapis[apis.length] = "commands-simulator.properties"; - return newapis; + public Map getProperties() { + Map apiNameRoleMaskMapping = super.getProperties(); + apiNameRoleMaskMapping.putAll(PropertiesUtil.processConfigFile(new String[] + {"commands-simulator.properties"})); + return apiNameRoleMaskMapping; } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/SimulatorGuru.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/SimulatorGuru.java index b9c404b66a1..c9d308023ed 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/SimulatorGuru.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/SimulatorGuru.java @@ -17,14 +17,14 @@ package com.cloud.simulator; import javax.ejb.Local; +import javax.inject.Inject; import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuru; import com.cloud.hypervisor.HypervisorGuruBase; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.GuestOSVO; import com.cloud.storage.dao.GuestOSDao; -import com.cloud.utils.component.Inject; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -52,8 +52,8 @@ public class SimulatorGuru extends HypervisorGuruBase implements HypervisorGuru return to; } - @Override - public boolean trackVmHostChange() { - return false; - } + @Override + public boolean trackVmHostChange() { + return false; + } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVMDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVMDaoImpl.java index 86264f2c039..be7a98859e2 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVMDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVMDaoImpl.java @@ -21,11 +21,11 @@ import java.util.List; import java.util.Map; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; import com.cloud.simulator.MockHostVO; import com.cloud.simulator.MockVMVO; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java index b5d58dc2619..7286ada5798 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java @@ -66,7 +66,7 @@ import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.user.Account; import com.cloud.utils.UriUtils; -import com.cloud.utils.component.ComponentLocator; + import com.vmware.vim25.ClusterDasConfigInfo; import com.vmware.vim25.ManagedObjectReference; diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index 75e10c9f36a..618d996f955 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -86,7 +86,7 @@ import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.utils.FileUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.component.Manager; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java index 053ed6eaf46..7ec06575208 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java @@ -21,7 +21,7 @@ import org.apache.log4j.Logger; import com.cloud.hypervisor.vmware.manager.VmwareManager; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.utils.StringUtils; -import com.cloud.utils.component.ComponentLocator; + import com.vmware.apputils.version.ExtendedAppUtil; public class VmwareContextFactory { diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index a444cfec197..47fcb86b5c9 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -200,7 +200,7 @@ import com.cloud.storage.template.TemplateInfo; import com.cloud.utils.DateUtil; import com.cloud.utils.Pair; import com.cloud.utils.StringUtils; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExceptionUtil; diff --git a/plugins/hypervisors/vmware/src/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java index c28f25987c3..e17d99d3184 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.network; @@ -49,7 +49,7 @@ import com.cloud.utils.cisco.n1kv.vsm.NetconfHelper; public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { @Inject - CiscoNexusVSMDeviceDao _ciscoNexusVSMDeviceDao; + CiscoNexusVSMDeviceDao _ciscoNexusVSMDeviceDao; @Inject ClusterDao _clusterDao; @Inject @@ -66,9 +66,9 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { PortProfileDao _ppDao; @Inject ConfigurationDao _configDao; - + private static final org.apache.log4j.Logger s_logger = Logger.getLogger(ExternalLoadBalancerDeviceManagerImpl.class); - + @DB //public CiscoNexusVSMDeviceVO addCiscoNexusVSM(long clusterId, String ipaddress, String username, String password, ServerResource resource, String vsmName) { public CiscoNexusVSMDeviceVO addCiscoNexusVSM(long clusterId, String ipaddress, String username, String password, String vCenterIpaddr, String vCenterDcName) { @@ -78,7 +78,7 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { // First check if the cluster is of type vmware. If not, // throw an exception. VSMs are tightly integrated with vmware clusters. - + ClusterVO cluster = _clusterDao.findById(clusterId); if (cluster == null) { throw new InvalidParameterValueException("Cluster with specified ID not found!"); @@ -91,21 +91,21 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { // Next, check if the cluster already has a VSM associated with it. // If so, throw an exception disallowing this operation. The user must first // delete the current VSM and then only attempt to add the new one. - + if (_clusterVSMDao.findByClusterId(clusterId) != null) { // We can't have two VSMs for the same cluster. Throw exception. throw new InvalidParameterValueException("Cluster with specified id already has a VSM tied to it. Please remove that first and retry the operation."); } // TODO: Confirm whether we should be checking for VSM reachability here. - + // Next, check if this VSM is reachable. Use the XML-RPC VSM API Java bindings to talk to // the VSM. //NetconfHelper (String ip, String username, String password) NetconfHelper netconfClient; try { - netconfClient = new NetconfHelper(ipaddress, username, password); + netconfClient = new NetconfHelper(ipaddress, username, password); } catch(CloudRuntimeException e) { String msg = "Failed to connect to Nexus VSM " + ipaddress + " with credentials of user " + username; s_logger.error(msg); @@ -119,7 +119,7 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { // First, check if VSM already exists in the table "virtual_supervisor_module". // If it's not there already, create it. // If it's there already, return success. - + // TODO - Right now, we only check if the ipaddress matches for both requests. // We must really check whether every field of the VSM matches. Anyway, the // advantage of our approach for now is that existing infrastructure using @@ -132,7 +132,7 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { throw new CloudRuntimeException(e.getMessage()); } - if (VSMObj == null) { + if (VSMObj == null) { // Create the VSM record. For now, we aren't using the vsmName field. VSMObj = new CiscoNexusVSMDeviceVO(ipaddress, username, password); Transaction txn = Transaction.currentTxn(); @@ -145,7 +145,7 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { throw new CloudRuntimeException(e.getMessage()); } } - + // At this stage, we have a VSM record for sure. Connect the VSM to the cluster Id. long vsmId = _ciscoNexusVSMDeviceDao.getVSMbyIpaddress(ipaddress).getId(); ClusterVSMMapVO connectorObj = new ClusterVSMMapVO(clusterId, vsmId); @@ -158,22 +158,22 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { txn.rollback(); throw new CloudRuntimeException(e.getMessage()); } - + // Now, get a list of all the ESXi servers in this cluster. // This is effectively a select * from host where cluster_id=clusterId; // All ESXi servers are stored in the host table, and their resource // type is vmwareresource. - + //List hosts = _resourceMgr.listAllHostsInCluster(clusterId); - + //TODO: Activate the code below if we make the Nexus VSM a separate resource. // Iterate through each of the hosts in this list. Each host has a host id. // Given this host id, we can reconfigure the in-memory resource representing // the host via the agent manager. Thus we inject VSM related information // into each host's resource. Also, we first configure each resource's // entries in the database to contain this VSM information before the injection. - - //for (HostVO host : hosts) { + + //for (HostVO host : hosts) { // Create a host details VO object and write it out for this hostid. //Long hostid = new Long(vsmId); //DetailVO vsmDetail = new DetailVO(host.getId(), "vsmId", hostid.toString()); @@ -194,28 +194,28 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { //hostDetails.put(ApiConstants.USERNAME, username); //hostDetails.put(ApiConstants.PASSWORD, password); //_agentMrg.send(host.getId(), ) - + return VSMObj; - + } - + @DB - public boolean deleteCiscoNexusVSM(long vsmId) throws ResourceInUseException { + public boolean deleteCiscoNexusVSM(long vsmId) throws ResourceInUseException { CiscoNexusVSMDeviceVO cisconexusvsm = _ciscoNexusVSMDeviceDao.findById(vsmId); if (cisconexusvsm == null) { // This entry is already not present. Return success. return true; } - + // First, check whether this VSM is part of any non-empty cluster. // Search ClusterVSMMap's table for a list of clusters using this vsmId. - + List clusterList = _clusterVSMDao.listByVSMId(vsmId); - - if (clusterList != null) { + + if (clusterList != null) { for (ClusterVSMMapVO record : clusterList) { // If this cluster id has any hosts in it, fail this operation. - Long clusterId = record.getClusterId(); + Long clusterId = record.getClusterId(); List hosts = _resourceMgr.listAllHostsInCluster(clusterId); if (hosts != null && hosts.size() > 0) { for (Host host: hosts) { @@ -223,26 +223,26 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { s_logger.info("Non-empty cluster with id" + clusterId + "still has a host that uses this VSM. Please empty the cluster first"); throw new ResourceInUseException("Non-empty cluster with id" + clusterId + "still has a host that uses this VSM. Please empty the cluster first"); } - } + } } } } - + // Iterate through the cluster list again, this time, delete the VSM. Transaction txn = Transaction.currentTxn(); try { txn.start(); - // Remove the VSM entry in CiscoNexusVSMDeviceVO's table. + // Remove the VSM entry in CiscoNexusVSMDeviceVO's table. _ciscoNexusVSMDeviceDao.remove(vsmId); - // Remove the current record as well from ClusterVSMMapVO's table. + // Remove the current record as well from ClusterVSMMapVO's table. _clusterVSMDao.removeByVsmId(vsmId); // There are no hosts at this stage in the cluster, so we don't need - // to notify any resources or remove host details. - txn.commit(); + // to notify any resources or remove host details. + txn.commit(); } catch (Exception e) { - s_logger.info("Caught exception when trying to delete VSM record.." + e.getMessage()); + s_logger.info("Caught exception when trying to delete VSM record.." + e.getMessage()); throw new CloudRuntimeException("Failed to delete VSM"); - } + } return true; } @@ -250,10 +250,10 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { public CiscoNexusVSMDeviceVO enableCiscoNexusVSM(long vsmId) { CiscoNexusVSMDeviceVO cisconexusvsm = _ciscoNexusVSMDeviceDao.findById(vsmId); if (cisconexusvsm == null) { - throw new InvalidParameterValueException("Invalid vsm Id specified"); + throw new InvalidParameterValueException("Invalid vsm Id specified"); } // Else, check if this db record shows that this VSM is enabled or not. - if (cisconexusvsm.getvsmDeviceState() == CiscoNexusVSMDeviceVO.VSMDeviceState.Disabled) { + if (cisconexusvsm.getvsmDeviceState() == CiscoNexusVSMDeviceVO.VSMDeviceState.Disabled) { // it's currently disabled. So change it to enabled and write it out to the db. cisconexusvsm.setVsmDeviceState(CiscoNexusVSMDeviceVO.VSMDeviceState.Enabled); Transaction txn = Transaction.currentTxn(); @@ -266,18 +266,18 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { throw new CloudRuntimeException(e.getMessage()); } } - + return cisconexusvsm; } - - @DB + + @DB public CiscoNexusVSMDeviceVO disableCiscoNexusVSM(long vsmId) { CiscoNexusVSMDeviceVO cisconexusvsm = _ciscoNexusVSMDeviceDao.findById(vsmId); if (cisconexusvsm == null) { - throw new InvalidParameterValueException("Invalid vsm Id specified"); + throw new InvalidParameterValueException("Invalid vsm Id specified"); } // Else, check if this db record shows that this VSM is enabled or not. - if (cisconexusvsm.getvsmDeviceState() == CiscoNexusVSMDeviceVO.VSMDeviceState.Enabled) { + if (cisconexusvsm.getvsmDeviceState() == CiscoNexusVSMDeviceVO.VSMDeviceState.Enabled) { // it's currently disabled. So change it to enabled and write it out to the db. cisconexusvsm.setVsmDeviceState(CiscoNexusVSMDeviceVO.VSMDeviceState.Disabled); Transaction txn = Transaction.currentTxn(); @@ -290,15 +290,15 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { throw new CloudRuntimeException(e.getMessage()); } } - + return cisconexusvsm; } - + @DB public CiscoNexusVSMDeviceVO getCiscoVSMbyVSMId(long vsmId) { return _ciscoNexusVSMDeviceDao.findById(vsmId); } - + @DB public CiscoNexusVSMDeviceVO getCiscoVSMbyClusId(long clusterId) { ClusterVSMMapVO mapVO = _clusterVSMDao.findByClusterId(clusterId); @@ -310,12 +310,12 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { CiscoNexusVSMDeviceVO result = _ciscoNexusVSMDeviceDao.findById(mapVO.getVsmId()); return result; } - + public HostVO createHostVOForConnectedAgent(HostVO host, StartupCommand[] cmd) { // TODO Auto-generated method stub return null; } - + @DB public boolean vliadateVsmCluster(String vsmIp, String vsmUser, String vsmPassword, long clusterId, String clusterName) throws ResourceInUseException { // Check if we're associating a Cisco Nexus VSM with a vmware cluster. @@ -343,7 +343,9 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { s_logger.error("Failed to add cluster: specified Nexus VSM is already associated with another cluster"); _clusterDao.remove(clusterId); ResourceInUseException ex = new ResourceInUseException("Failed to add cluster: specified Nexus VSM is already associated with another cluster with specified Id"); - ex.addProxyObject("cluster", clusterList.get(0).getClusterId(), "clusterId"); + // get clusterUuid to report error + ClusterVO cluster = _clusterDao.findById(clusterList.get(0).getClusterId()); + ex.addProxyObject(cluster.getUuid()); throw ex; } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java b/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java index 71683dc306a..f6a812deca4 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java +++ b/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java @@ -17,6 +17,7 @@ package com.cloud.network.element; +import java.lang.String; import java.util.List; import java.util.Map; import java.util.ArrayList; @@ -25,6 +26,7 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; +import com.cloud.utils.PropertiesUtil; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -239,7 +241,8 @@ public class CiscoNexusVSMElement extends CiscoNexusVSMDeviceManagerImpl impleme } @Override - public String[] getPropertiesFiles() { - return new String[] { "cisconexusvsm_commands.properties" }; + public Map getProperties() { + return PropertiesUtil.processConfigFile(new String[] + { "cisconexusvsm_commands.properties" }); } } diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageResource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageResource.java index d47da9b0183..009ed7b1dc9 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageResource.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageResource.java @@ -22,6 +22,7 @@ import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.net.URI; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -30,37 +31,50 @@ import java.util.UUID; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreAnswer; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; import org.apache.cloudstack.storage.command.CopyCmd; -import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.command.CopyTemplateToPrimaryStorageAnswer; import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd; import org.apache.cloudstack.storage.command.CreateVolumeAnswer; import org.apache.cloudstack.storage.command.CreateVolumeCommand; import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand; +import org.apache.cloudstack.storage.command.DeleteVolumeCommand; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol; +import org.apache.cloudstack.storage.to.ImageDataStoreTO; import org.apache.cloudstack.storage.to.ImageOnPrimayDataStoreTO; import org.apache.cloudstack.storage.to.NfsPrimaryDataStoreTO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.TemplateTO; import org.apache.cloudstack.storage.to.VolumeTO; +import org.apache.commons.httpclient.HttpClient; +import org.apache.commons.httpclient.HttpException; +import org.apache.commons.httpclient.HttpStatus; +import org.apache.commons.httpclient.methods.GetMethod; +import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.ClientProtocolException; import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpHead; import org.apache.http.impl.client.DefaultHttpClient; import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; -import com.cloud.agent.api.storage.DeleteVolumeCommand; +import com.cloud.agent.api.ModifyStoragePoolAnswer; +import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; +import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.hypervisor.xen.resource.CitrixResourceBase.SRType; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.template.TemplateInfo; import com.cloud.utils.exception.CloudRuntimeException; import com.xensource.xenapi.Connection; import com.xensource.xenapi.Host; import com.xensource.xenapi.PBD; +import com.xensource.xenapi.Pool; import com.xensource.xenapi.SR; import com.xensource.xenapi.Types; +import com.xensource.xenapi.VBD; import com.xensource.xenapi.Types.BadServerResponse; import com.xensource.xenapi.Types.XenAPIException; import com.xensource.xenapi.VDI; @@ -115,7 +129,7 @@ public class XenServerStorageResource { } protected CreateVolumeAnswer execute(CreateVolumeCommand cmd) { - VolumeTO volume = null; + VolumeTO volume = cmd.getVolume(); PrimaryDataStoreTO primaryDataStore = volume.getDataStore(); Connection conn = hypervisorResource.getConnection(); VDI vdi = null; @@ -150,7 +164,7 @@ public class XenServerStorageResource { } protected Answer execute(DeleteVolumeCommand cmd) { - VolumeTO volume = null; + VolumeTO volume = cmd.getVolume(); Connection conn = hypervisorResource.getConnection(); String errorMsg = null; try { @@ -502,7 +516,7 @@ public class XenServerStorageResource { //downloadHttpToLocalFile(vdiPath, template.getPath()); hypervisorResource.callHostPlugin(conn, "storagePlugin", "downloadTemplateFromUrl", "destPath", vdiPath, "srcUrl", template.getPath()); result = true; - return new CopyCmdAnswer(cmd, vdi.getUuid(conn)); + return new CopyTemplateToPrimaryStorageAnswer(cmd, vdi.getUuid(conn)); } catch (BadServerResponse e) { s_logger.debug("Failed to download template", e); } catch (XenAPIException e) { @@ -528,7 +542,7 @@ public class XenServerStorageResource { } protected Answer execute(AttachPrimaryDataStoreCmd cmd) { - PrimaryDataStoreTO dataStore = null; + PrimaryDataStoreTO dataStore = cmd.getDataStore(); Connection conn = hypervisorResource.getConnection(); try { SR sr = hypervisorResource.getStorageRepository(conn, dataStore.getUuid()); diff --git a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/dao/ElasticLbVmMapDaoImpl.java b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/dao/ElasticLbVmMapDaoImpl.java index ba8c82d01c2..be61f580f75 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/dao/ElasticLbVmMapDaoImpl.java +++ b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/dao/ElasticLbVmMapDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.network.dao.LoadBalancerDao; import com.cloud.network.dao.LoadBalancerDaoImpl; import com.cloud.network.router.VirtualRouter.Role; import com.cloud.network.router.VirtualRouter.Role; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchBuilder; diff --git a/plugins/network-elements/f5/src/com/cloud/api/commands/ListExternalLoadBalancersCmd.java b/plugins/network-elements/f5/src/com/cloud/api/commands/ListExternalLoadBalancersCmd.java index 3ee8d4865a5..72313aa0c0c 100644 --- a/plugins/network-elements/f5/src/com/cloud/api/commands/ListExternalLoadBalancersCmd.java +++ b/plugins/network-elements/f5/src/com/cloud/api/commands/ListExternalLoadBalancersCmd.java @@ -26,6 +26,7 @@ import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ZoneResponse; import com.cloud.host.Host; import com.cloud.network.element.F5ExternalLoadBalancerElementService; import org.apache.cloudstack.api.response.ExternalLoadBalancerResponse; @@ -40,7 +41,7 @@ public class ListExternalLoadBalancersCmd extends BaseListCmd { //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType = ZoneRespones.class, + @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType = ZoneResponse.class, description="zone Id") private long zoneId; diff --git a/plugins/network-elements/f5/src/com/cloud/api/commands/ListF5LoadBalancerNetworksCmd.java b/plugins/network-elements/f5/src/com/cloud/api/commands/ListF5LoadBalancerNetworksCmd.java index 4ec98f02d58..bf1164b4d05 100644 --- a/plugins/network-elements/f5/src/com/cloud/api/commands/ListF5LoadBalancerNetworksCmd.java +++ b/plugins/network-elements/f5/src/com/cloud/api/commands/ListF5LoadBalancerNetworksCmd.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.api.PlugService; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.NetworkResponse; +import com.cloud.api.response.F5LoadBalancerResponse; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; diff --git a/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java b/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java index 33fae86591b..177302660b5 100644 --- a/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java +++ b/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.network.element; +import java.lang.String; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -25,6 +26,7 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; +import com.cloud.utils.PropertiesUtil; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -262,8 +264,9 @@ public class F5ExternalLoadBalancerElement extends ExternalLoadBalancerDeviceMan } @Override - public String getPropertiesFile() { - return "f5bigip_commands.properties"; + public Map getProperties() { + return PropertiesUtil.processConfigFile(new String[] + { "f5bigip_commands.properties" }); } @Override diff --git a/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java b/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java index ba0d7a845f9..63781bb2339 100644 --- a/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java +++ b/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.network.element; +import java.lang.String; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -25,6 +26,7 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; +import com.cloud.utils.PropertiesUtil; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -404,8 +406,9 @@ public class JuniperSRXExternalFirewallElement extends ExternalFirewallDeviceMan } @Override - public String getPropertiesFile() { - return "junipersrx_commands.properties"; + public Map getProperties() { + return PropertiesUtil.processConfigFile(new String[] + { "junipersrx_commands.properties"}); } @Override diff --git a/plugins/network-elements/netscaler/src/com/cloud/api/commands/ListNetscalerLoadBalancerNetworksCmd.java b/plugins/network-elements/netscaler/src/com/cloud/api/commands/ListNetscalerLoadBalancerNetworksCmd.java index b5935f34fda..52476df8316 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/api/commands/ListNetscalerLoadBalancerNetworksCmd.java +++ b/plugins/network-elements/netscaler/src/com/cloud/api/commands/ListNetscalerLoadBalancerNetworksCmd.java @@ -49,7 +49,7 @@ public class ListNetscalerLoadBalancerNetworksCmd extends BaseListCmd { //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name=ApiConstants.LOAD_BALANCER_DEVICE_ID, type=CommandType.UUID, entityType = NetscalerLoadBalancerResponse.class, , + @Parameter(name=ApiConstants.LOAD_BALANCER_DEVICE_ID, type=CommandType.UUID, entityType = NetscalerLoadBalancerResponse.class, required = true, description="netscaler load balancer device ID") private Long lbDeviceId; diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java index 30c51ee5ce1..faff153084c 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java @@ -27,6 +27,7 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; +import com.cloud.utils.PropertiesUtil; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -466,8 +467,9 @@ StaticNatServiceProvider { } @Override - public String getPropertiesFile() { - return "netscalerloadbalancer_commands.properties"; + public Map getProperties() { + return PropertiesUtil.processConfigFile(new String[] + { "netscalerloadbalancer_commands.properties" }); } @Override diff --git a/plugins/network-elements/nicira-nvp/src/com/cloud/network/element/NiciraNvpElement.java b/plugins/network-elements/nicira-nvp/src/com/cloud/network/element/NiciraNvpElement.java index 78fa0832518..cf8d43a70bc 100644 --- a/plugins/network-elements/nicira-nvp/src/com/cloud/network/element/NiciraNvpElement.java +++ b/plugins/network-elements/nicira-nvp/src/com/cloud/network/element/NiciraNvpElement.java @@ -28,8 +28,8 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.agent.api.ConfigurePortForwardingRulesOnLogicalRouterAnswer; @@ -72,16 +72,15 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDetailsDao; +import com.cloud.network.IpAddress; import com.cloud.network.Network; -import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import com.cloud.network.Network.Capability; import com.cloud.network.Network.Provider; import com.cloud.network.Network.Service; +import com.cloud.network.NetworkManager; import com.cloud.network.NetworkVO; import com.cloud.network.Networks; import com.cloud.network.Networks.BroadcastDomainType; -import com.cloud.network.IpAddress; -import com.cloud.network.NetworkManager; import com.cloud.network.NiciraNvpDeviceVO; import com.cloud.network.NiciraNvpNicMappingVO; import com.cloud.network.NiciraNvpRouterMappingVO; @@ -108,6 +107,7 @@ import com.cloud.resource.ResourceStateAdapter; import com.cloud.resource.ServerResource; import com.cloud.resource.UnableDeleteHostException; import com.cloud.user.Account; +import com.cloud.utils.PropertiesUtil; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; @@ -120,17 +120,16 @@ import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.NicDao; -@Component @Local(value = NetworkElement.class) public class NiciraNvpElement extends AdapterBase implements - ConnectivityProvider, SourceNatServiceProvider, - PortForwardingServiceProvider, StaticNatServiceProvider, - NiciraNvpElementService, ResourceStateAdapter, IpDeployer { - private static final Logger s_logger = Logger - .getLogger(NiciraNvpElement.class); - +ConnectivityProvider, SourceNatServiceProvider, +PortForwardingServiceProvider, StaticNatServiceProvider, +NiciraNvpElementService, ResourceStateAdapter, IpDeployer { + private static final Logger s_logger = Logger + .getLogger(NiciraNvpElement.class); + private static final Map> capabilities = setCapabilities(); - + @Inject NicDao _nicDao; @Inject @@ -150,18 +149,18 @@ public class NiciraNvpElement extends AdapterBase implements @Inject NiciraNvpNicMappingDao _niciraNvpNicMappingDao; @Inject - NiciraNvpRouterMappingDao _niciraNvpRouterMappingDao; - @Inject + NiciraNvpRouterMappingDao _niciraNvpRouterMappingDao; + @Inject NetworkDao _networkDao; - @Inject - NetworkManager _networkManager; - @Inject - ConfigurationManager _configMgr; - @Inject - NetworkServiceMapDao _ntwkSrvcDao; - @Inject - VlanDao _vlanDao; - + @Inject + NetworkManager _networkManager; + @Inject + ConfigurationManager _configMgr; + @Inject + NetworkServiceMapDao _ntwkSrvcDao; + @Inject + VlanDao _vlanDao; + @Override public Map> getCapabilities() { return capabilities; @@ -171,130 +170,130 @@ public class NiciraNvpElement extends AdapterBase implements public Provider getProvider() { return Provider.NiciraNvp; } - - protected boolean canHandle(Network network, Service service) { - s_logger.debug("Checking if NiciraNvpElement can handle service " - + service.getName() + " on network " + network.getDisplayText()); + + protected boolean canHandle(Network network, Service service) { + s_logger.debug("Checking if NiciraNvpElement can handle service " + + service.getName() + " on network " + network.getDisplayText()); if (network.getBroadcastDomainType() != BroadcastDomainType.Lswitch) { return false; } - - if (!_networkManager.isProviderForNetwork(getProvider(), - network.getId())) { - s_logger.debug("NiciraNvpElement is not a provider for network " - + network.getDisplayText()); - return false; - } - if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), - service, Network.Provider.NiciraNvp)) { - s_logger.debug("NiciraNvpElement can't provide the " - + service.getName() + " service on network " - + network.getDisplayText()); - return false; - } + if (!_networkManager.isProviderForNetwork(getProvider(), + network.getId())) { + s_logger.debug("NiciraNvpElement is not a provider for network " + + network.getDisplayText()); + return false; + } + + if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), + service, Network.Provider.NiciraNvp)) { + s_logger.debug("NiciraNvpElement can't provide the " + + service.getName() + " service on network " + + network.getDisplayText()); + return false; + } return true; } - + @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); - _resourceMgr.registerResourceStateAdapter(this.getClass() - .getSimpleName(), this); + _resourceMgr.registerResourceStateAdapter(this.getClass() + .getSimpleName(), this); return true; } @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) - throws ConcurrentOperationException, ResourceUnavailableException, - InsufficientCapacityException { - s_logger.debug("entering NiciraNvpElement implement function for network " - + network.getDisplayText() - + " (state " - + network.getState() - + ")"); + throws ConcurrentOperationException, ResourceUnavailableException, + InsufficientCapacityException { + s_logger.debug("entering NiciraNvpElement implement function for network " + + network.getDisplayText() + + " (state " + + network.getState() + + ")"); - if (!canHandle(network, Service.Connectivity)) { - return false; - } - - if (network.getBroadcastUri() == null) { - s_logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); + if (!canHandle(network, Service.Connectivity)) { return false; } - - List devices = _niciraNvpDao - .listByPhysicalNetwork(network.getPhysicalNetworkId()); - if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " - + network.getPhysicalNetworkId()); - return false; - } - NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); - HostVO niciraNvpHost = _hostDao.findById(niciraNvpDevice.getHostId()); - _hostDao.loadDetails(niciraNvpHost); - Account owner = context.getAccount(); + if (network.getBroadcastUri() == null) { + s_logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); + return false; + } - /** - * Lock the network as we might need to do multiple operations that - * should be done only once. - */ - Network lock = _networkDao.acquireInLockTable(network.getId(), - _networkManager.getNetworkLockTimeout()); - if (lock == null) { - throw new ConcurrentOperationException("Unable to lock network " - + network.getId()); - } - try { - // Implement SourceNat immediately as we have al the info already - if (_networkManager.isProviderSupportServiceInNetwork( - network.getId(), Service.SourceNat, Provider.NiciraNvp)) { - s_logger.debug("Apparently we are supposed to provide SourceNat on this network"); + List devices = _niciraNvpDao + .listByPhysicalNetwork(network.getPhysicalNetworkId()); + if (devices.isEmpty()) { + s_logger.error("No NiciraNvp Controller on physical network " + + network.getPhysicalNetworkId()); + return false; + } + NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); + HostVO niciraNvpHost = _hostDao.findById(niciraNvpDevice.getHostId()); + _hostDao.loadDetails(niciraNvpHost); - PublicIp sourceNatIp = _networkManager - .assignSourceNatIpAddressToGuestNetwork(owner, network); - String publicCidr = sourceNatIp.getAddress().addr() + "/" - + NetUtils.getCidrSize(sourceNatIp.getVlanNetmask()); - String internalCidr = network.getGateway() + "/" - + network.getCidr().split("/")[1]; - long vlanid = (Vlan.UNTAGGED.equals(sourceNatIp.getVlanTag())) ? 0 - : Long.parseLong(sourceNatIp.getVlanTag()); + Account owner = context.getAccount(); - CreateLogicalRouterCommand cmd = new CreateLogicalRouterCommand( - niciraNvpHost.getDetail("l3gatewayserviceuuid"), vlanid, - network.getBroadcastUri().getSchemeSpecificPart(), - "router-" + network.getDisplayText(), publicCidr, - sourceNatIp.getGateway(), internalCidr, context - .getDomain().getName() - + "-" - + context.getAccount().getAccountName()); - CreateLogicalRouterAnswer answer = (CreateLogicalRouterAnswer) _agentMgr - .easySend(niciraNvpHost.getId(), cmd); - if (answer.getResult() == false) { - s_logger.error("Failed to create Logical Router for network " - + network.getDisplayText()); - return false; - } + /** + * Lock the network as we might need to do multiple operations that + * should be done only once. + */ + Network lock = _networkDao.acquireInLockTable(network.getId(), + _networkManager.getNetworkLockTimeout()); + if (lock == null) { + throw new ConcurrentOperationException("Unable to lock network " + + network.getId()); + } + try { + // Implement SourceNat immediately as we have al the info already + if (_networkManager.isProviderSupportServiceInNetwork( + network.getId(), Service.SourceNat, Provider.NiciraNvp)) { + s_logger.debug("Apparently we are supposed to provide SourceNat on this network"); - // Store the uuid so we can easily find it during cleanup - NiciraNvpRouterMappingVO routermapping = - new NiciraNvpRouterMappingVO(answer.getLogicalRouterUuid(), network.getId()); - _niciraNvpRouterMappingDao.persist(routermapping); - } - } finally { - if (lock != null) { - _networkDao.releaseFromLockTable(lock.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is released for network id " - + lock.getId() + " as a part of router startup in " - + dest); - } - } - } + PublicIp sourceNatIp = _networkManager + .assignSourceNatIpAddressToGuestNetwork(owner, network); + String publicCidr = sourceNatIp.getAddress().addr() + "/" + + NetUtils.getCidrSize(sourceNatIp.getVlanNetmask()); + String internalCidr = network.getGateway() + "/" + + network.getCidr().split("/")[1]; + long vlanid = (Vlan.UNTAGGED.equals(sourceNatIp.getVlanTag())) ? 0 + : Long.parseLong(sourceNatIp.getVlanTag()); + + CreateLogicalRouterCommand cmd = new CreateLogicalRouterCommand( + niciraNvpHost.getDetail("l3gatewayserviceuuid"), vlanid, + network.getBroadcastUri().getSchemeSpecificPart(), + "router-" + network.getDisplayText(), publicCidr, + sourceNatIp.getGateway(), internalCidr, context + .getDomain().getName() + + "-" + + context.getAccount().getAccountName()); + CreateLogicalRouterAnswer answer = (CreateLogicalRouterAnswer) _agentMgr + .easySend(niciraNvpHost.getId(), cmd); + if (answer.getResult() == false) { + s_logger.error("Failed to create Logical Router for network " + + network.getDisplayText()); + return false; + } + + // Store the uuid so we can easily find it during cleanup + NiciraNvpRouterMappingVO routermapping = + new NiciraNvpRouterMappingVO(answer.getLogicalRouterUuid(), network.getId()); + _niciraNvpRouterMappingDao.persist(routermapping); + } + } finally { + if (lock != null) { + _networkDao.releaseFromLockTable(lock.getId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Lock is released for network id " + + lock.getId() + " as a part of router startup in " + + dest); + } + } + } return true; } @@ -302,10 +301,10 @@ public class NiciraNvpElement extends AdapterBase implements public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) - throws ConcurrentOperationException, ResourceUnavailableException, - InsufficientCapacityException { - - if (!canHandle(network, Service.Connectivity)) { + throws ConcurrentOperationException, ResourceUnavailableException, + InsufficientCapacityException { + + if (!canHandle(network, Service.Connectivity)) { return false; } @@ -316,60 +315,60 @@ public class NiciraNvpElement extends AdapterBase implements NicVO nicVO = _nicDao.findById(nic.getId()); - List devices = _niciraNvpDao - .listByPhysicalNetwork(network.getPhysicalNetworkId()); + List devices = _niciraNvpDao + .listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " - + network.getPhysicalNetworkId()); + s_logger.error("No NiciraNvp Controller on physical network " + + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); HostVO niciraNvpHost = _hostDao.findById(niciraNvpDevice.getHostId()); - NiciraNvpNicMappingVO existingNicMap = _niciraNvpNicMappingDao - .findByNicUuid(nicVO.getUuid()); + NiciraNvpNicMappingVO existingNicMap = _niciraNvpNicMappingDao + .findByNicUuid(nicVO.getUuid()); if (existingNicMap != null) { - FindLogicalSwitchPortCommand findCmd = new FindLogicalSwitchPortCommand( - existingNicMap.getLogicalSwitchUuid(), - existingNicMap.getLogicalSwitchPortUuid()); - FindLogicalSwitchPortAnswer answer = (FindLogicalSwitchPortAnswer) _agentMgr - .easySend(niciraNvpHost.getId(), findCmd); - + FindLogicalSwitchPortCommand findCmd = new FindLogicalSwitchPortCommand( + existingNicMap.getLogicalSwitchUuid(), + existingNicMap.getLogicalSwitchPortUuid()); + FindLogicalSwitchPortAnswer answer = (FindLogicalSwitchPortAnswer) _agentMgr + .easySend(niciraNvpHost.getId(), findCmd); + if (answer.getResult()) { - s_logger.warn("Existing Logical Switchport found for nic " - + nic.getName() + " with uuid " - + existingNicMap.getLogicalSwitchPortUuid()); - UpdateLogicalSwitchPortCommand cmd = new UpdateLogicalSwitchPortCommand( - existingNicMap.getLogicalSwitchPortUuid(), network - .getBroadcastUri().getSchemeSpecificPart(), - nicVO.getUuid(), context.getDomain().getName() + "-" - + context.getAccount().getAccountName(), - nic.getName()); - _agentMgr.easySend(niciraNvpHost.getId(), cmd); - return true; - } else { - s_logger.error("Stale entry found for nic " + nic.getName() - + " with logical switchport uuid " - + existingNicMap.getLogicalSwitchPortUuid()); - _niciraNvpNicMappingDao.remove(existingNicMap.getId()); + s_logger.warn("Existing Logical Switchport found for nic " + + nic.getName() + " with uuid " + + existingNicMap.getLogicalSwitchPortUuid()); + UpdateLogicalSwitchPortCommand cmd = new UpdateLogicalSwitchPortCommand( + existingNicMap.getLogicalSwitchPortUuid(), network + .getBroadcastUri().getSchemeSpecificPart(), + nicVO.getUuid(), context.getDomain().getName() + "-" + + context.getAccount().getAccountName(), + nic.getName()); + _agentMgr.easySend(niciraNvpHost.getId(), cmd); + return true; + } else { + s_logger.error("Stale entry found for nic " + nic.getName() + + " with logical switchport uuid " + + existingNicMap.getLogicalSwitchPortUuid()); + _niciraNvpNicMappingDao.remove(existingNicMap.getId()); } } - - CreateLogicalSwitchPortCommand cmd = new CreateLogicalSwitchPortCommand( - network.getBroadcastUri().getSchemeSpecificPart(), - nicVO.getUuid(), context.getDomain().getName() + "-" - + context.getAccount().getAccountName(), nic.getName()); - CreateLogicalSwitchPortAnswer answer = (CreateLogicalSwitchPortAnswer) _agentMgr - .easySend(niciraNvpHost.getId(), cmd); - + + CreateLogicalSwitchPortCommand cmd = new CreateLogicalSwitchPortCommand( + network.getBroadcastUri().getSchemeSpecificPart(), + nicVO.getUuid(), context.getDomain().getName() + "-" + + context.getAccount().getAccountName(), nic.getName()); + CreateLogicalSwitchPortAnswer answer = (CreateLogicalSwitchPortAnswer) _agentMgr + .easySend(niciraNvpHost.getId(), cmd); + if (answer == null || !answer.getResult()) { - s_logger.error("CreateLogicalSwitchPortCommand failed"); + s_logger.error("CreateLogicalSwitchPortCommand failed"); return false; } - - NiciraNvpNicMappingVO nicMap = new NiciraNvpNicMappingVO(network - .getBroadcastUri().getSchemeSpecificPart(), - answer.getLogicalSwitchPortUuid(), nicVO.getUuid()); + + NiciraNvpNicMappingVO nicMap = new NiciraNvpNicMappingVO(network + .getBroadcastUri().getSchemeSpecificPart(), + answer.getLogicalSwitchPortUuid(), nicVO.getUuid()); _niciraNvpNicMappingDao.persist(nicMap); return true; @@ -381,7 +380,7 @@ public class NiciraNvpElement extends AdapterBase implements ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - if (!canHandle(network, Service.Connectivity)) { + if (!canHandle(network, Service.Connectivity)) { return false; } @@ -389,39 +388,39 @@ public class NiciraNvpElement extends AdapterBase implements s_logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); return false; } - + NicVO nicVO = _nicDao.findById(nic.getId()); - List devices = _niciraNvpDao - .listByPhysicalNetwork(network.getPhysicalNetworkId()); + List devices = _niciraNvpDao + .listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " - + network.getPhysicalNetworkId()); + s_logger.error("No NiciraNvp Controller on physical network " + + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); HostVO niciraNvpHost = _hostDao.findById(niciraNvpDevice.getHostId()); - - NiciraNvpNicMappingVO nicMap = _niciraNvpNicMappingDao - .findByNicUuid(nicVO.getUuid()); + + NiciraNvpNicMappingVO nicMap = _niciraNvpNicMappingDao + .findByNicUuid(nicVO.getUuid()); if (nicMap == null) { s_logger.error("No mapping for nic " + nic.getName()); return false; } - - DeleteLogicalSwitchPortCommand cmd = new DeleteLogicalSwitchPortCommand( - nicMap.getLogicalSwitchUuid(), - nicMap.getLogicalSwitchPortUuid()); - DeleteLogicalSwitchPortAnswer answer = (DeleteLogicalSwitchPortAnswer) _agentMgr - .easySend(niciraNvpHost.getId(), cmd); - + + DeleteLogicalSwitchPortCommand cmd = new DeleteLogicalSwitchPortCommand( + nicMap.getLogicalSwitchUuid(), + nicMap.getLogicalSwitchPortUuid()); + DeleteLogicalSwitchPortAnswer answer = (DeleteLogicalSwitchPortAnswer) _agentMgr + .easySend(niciraNvpHost.getId(), cmd); + if (answer == null || !answer.getResult()) { - s_logger.error("DeleteLogicalSwitchPortCommand failed"); + s_logger.error("DeleteLogicalSwitchPortCommand failed"); return false; } - + _niciraNvpNicMappingDao.remove(nicMap.getId()); - + return true; } @@ -429,54 +428,54 @@ public class NiciraNvpElement extends AdapterBase implements public boolean shutdown(Network network, ReservationContext context, boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { - if (!canHandle(network, Service.Connectivity)) { - return false; - } - - List devices = _niciraNvpDao - .listByPhysicalNetwork(network.getPhysicalNetworkId()); - if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " - + network.getPhysicalNetworkId()); - return false; - } - NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); - HostVO niciraNvpHost = _hostDao.findById(niciraNvpDevice.getHostId()); - - if (_networkManager.isProviderSupportServiceInNetwork(network.getId(), - Service.SourceNat, Provider.NiciraNvp)) { - s_logger.debug("Apparently we were providing SourceNat on this network"); - - // Deleting the LogicalRouter will also take care of all provisioned - // nat rules. - NiciraNvpRouterMappingVO routermapping = _niciraNvpRouterMappingDao - .findByNetworkId(network.getId()); - if (routermapping == null) { - s_logger.warn("No logical router uuid found for network " - + network.getDisplayText()); - // This might be cause by a failed deployment, so don't make shutdown fail as well. - return true; - } - - DeleteLogicalRouterCommand cmd = new DeleteLogicalRouterCommand(routermapping.getLogicalRouterUuid()); - DeleteLogicalRouterAnswer answer = - (DeleteLogicalRouterAnswer) _agentMgr.easySend(niciraNvpHost.getId(), cmd); - if (answer.getResult() == false) { - s_logger.error("Failed to delete LogicalRouter for network " - + network.getDisplayText()); + if (!canHandle(network, Service.Connectivity)) { return false; } - _niciraNvpRouterMappingDao.remove(routermapping.getId()); - } + List devices = _niciraNvpDao + .listByPhysicalNetwork(network.getPhysicalNetworkId()); + if (devices.isEmpty()) { + s_logger.error("No NiciraNvp Controller on physical network " + + network.getPhysicalNetworkId()); + return false; + } + NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); + HostVO niciraNvpHost = _hostDao.findById(niciraNvpDevice.getHostId()); + + if (_networkManager.isProviderSupportServiceInNetwork(network.getId(), + Service.SourceNat, Provider.NiciraNvp)) { + s_logger.debug("Apparently we were providing SourceNat on this network"); + + // Deleting the LogicalRouter will also take care of all provisioned + // nat rules. + NiciraNvpRouterMappingVO routermapping = _niciraNvpRouterMappingDao + .findByNetworkId(network.getId()); + if (routermapping == null) { + s_logger.warn("No logical router uuid found for network " + + network.getDisplayText()); + // This might be cause by a failed deployment, so don't make shutdown fail as well. + return true; + } + + DeleteLogicalRouterCommand cmd = new DeleteLogicalRouterCommand(routermapping.getLogicalRouterUuid()); + DeleteLogicalRouterAnswer answer = + (DeleteLogicalRouterAnswer) _agentMgr.easySend(niciraNvpHost.getId(), cmd); + if (answer.getResult() == false) { + s_logger.error("Failed to delete LogicalRouter for network " + + network.getDisplayText()); + return false; + } + + _niciraNvpRouterMappingDao.remove(routermapping.getId()); + } return true; } @Override - public boolean destroy(Network network, ReservationContext context) + public boolean destroy(Network network, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - if (!canHandle(network, Service.Connectivity)) { + if (!canHandle(network, Service.Connectivity)) { return false; } @@ -491,59 +490,60 @@ public class NiciraNvpElement extends AdapterBase implements @Override public boolean shutdownProviderInstances( PhysicalNetworkServiceProvider provider, ReservationContext context) - throws ConcurrentOperationException, ResourceUnavailableException { + throws ConcurrentOperationException, ResourceUnavailableException { // Nothing to do here. return true; } @Override public boolean canEnableIndividualServices() { - return true; + return true; } @Override public boolean verifyServicesCombination(Set services) { - // This element can only function in a Nicra Nvp based - // SDN network, so Connectivity needs to be present here - if (!services.contains(Service.Connectivity)) { - s_logger.warn("Unable to provide services without Connectivity service enabled for this element"); - return false; - } - if ((services.contains(Service.PortForwarding) || services.contains(Service.StaticNat)) && !services.contains(Service.SourceNat)) { - s_logger.warn("Unable to provide StaticNat and/or PortForwarding without the SourceNat service"); - return false; - } + // This element can only function in a Nicra Nvp based + // SDN network, so Connectivity needs to be present here + if (!services.contains(Service.Connectivity)) { + s_logger.warn("Unable to provide services without Connectivity service enabled for this element"); + return false; + } + if ((services.contains(Service.PortForwarding) || services.contains(Service.StaticNat)) && !services.contains(Service.SourceNat)) { + s_logger.warn("Unable to provide StaticNat and/or PortForwarding without the SourceNat service"); + return false; + } return true; } private static Map> setCapabilities() { Map> capabilities = new HashMap>(); - // L2 Support : SDN provisioning + // L2 Support : SDN provisioning capabilities.put(Service.Connectivity, null); - // L3 Support : Generic? - capabilities.put(Service.Gateway, null); + // L3 Support : Generic? + capabilities.put(Service.Gateway, null); - // L3 Support : SourceNat - Map sourceNatCapabilities = new HashMap(); - sourceNatCapabilities.put(Capability.SupportedSourceNatTypes, - "peraccount"); - sourceNatCapabilities.put(Capability.RedundantRouter, "false"); - capabilities.put(Service.SourceNat, sourceNatCapabilities); + // L3 Support : SourceNat + Map sourceNatCapabilities = new HashMap(); + sourceNatCapabilities.put(Capability.SupportedSourceNatTypes, + "peraccount"); + sourceNatCapabilities.put(Capability.RedundantRouter, "false"); + capabilities.put(Service.SourceNat, sourceNatCapabilities); - // L3 Support : Port Forwarding - capabilities.put(Service.PortForwarding, null); + // L3 Support : Port Forwarding + capabilities.put(Service.PortForwarding, null); - // L3 support : StaticNat - capabilities.put(Service.StaticNat, null); + // L3 support : StaticNat + capabilities.put(Service.StaticNat, null); return capabilities; } @Override - public String[] getPropertiesFiles() { - return new String[] { "nicira-nvp_commands.properties" }; + public Map getProperties() { + return PropertiesUtil.processConfigFile(new String[] + { "nicira-nvp_commands.properties" }); } @Override @@ -551,41 +551,41 @@ public class NiciraNvpElement extends AdapterBase implements public NiciraNvpDeviceVO addNiciraNvpDevice(AddNiciraNvpDeviceCmd cmd) { ServerResource resource = new NiciraNvpResource(); String deviceName = Network.Provider.NiciraNvp.getName(); - NetworkDevice networkDevice = NetworkDevice - .getNetworkDevice(deviceName); + NetworkDevice networkDevice = NetworkDevice + .getNetworkDevice(deviceName); Long physicalNetworkId = cmd.getPhysicalNetworkId(); NiciraNvpDeviceVO niciraNvpDevice = null; - - PhysicalNetworkVO physicalNetwork = _physicalNetworkDao - .findById(physicalNetworkId); + + PhysicalNetworkVO physicalNetwork = _physicalNetworkDao + .findById(physicalNetworkId); if (physicalNetwork == null) { - throw new InvalidParameterValueException( - "Could not find phyical network with ID: " - + physicalNetworkId); + throw new InvalidParameterValueException( + "Could not find phyical network with ID: " + + physicalNetworkId); } long zoneId = physicalNetwork.getDataCenterId(); - PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao - .findByServiceProvider(physicalNetwork.getId(), - networkDevice.getNetworkServiceProvder()); + PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao + .findByServiceProvider(physicalNetwork.getId(), + networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " - + networkDevice.getNetworkServiceProvder() - + " is not enabled in the physical network: " - + physicalNetworkId + "to add this device"); + throw new CloudRuntimeException("Network Service Provider: " + + networkDevice.getNetworkServiceProvder() + + " is not enabled in the physical network: " + + physicalNetworkId + "to add this device"); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " - + ntwkSvcProvider.getProviderName() - + " is in shutdown state in the physical network: " - + physicalNetworkId + "to add this device"); + throw new CloudRuntimeException("Network Service Provider: " + + ntwkSvcProvider.getProviderName() + + " is in shutdown state in the physical network: " + + physicalNetworkId + "to add this device"); } - + if (_niciraNvpDao.listByPhysicalNetwork(physicalNetworkId).size() != 0) { - throw new CloudRuntimeException( - "A NiciraNvp device is already configured on this physical network"); + throw new CloudRuntimeException( + "A NiciraNvp device is already configured on this physical network"); } - - Map params = new HashMap(); + + Map params = new HashMap(); params.put("guid", UUID.randomUUID().toString()); params.put("zoneId", String.valueOf(physicalNetwork.getDataCenterId())); params.put("physicalNetworkId", String.valueOf(physicalNetwork.getId())); @@ -594,40 +594,40 @@ public class NiciraNvpElement extends AdapterBase implements params.put("adminuser", cmd.getUsername()); params.put("adminpass", cmd.getPassword()); params.put("transportzoneuuid", cmd.getTransportzoneUuid()); - // FIXME What to do with multiple isolation types - params.put("transportzoneisotype", - physicalNetwork.getIsolationMethods().get(0).toLowerCase()); - if (cmd.getL3GatewayServiceUuid() != null) { - params.put("l3gatewayserviceuuid", cmd.getL3GatewayServiceUuid()); - } + // FIXME What to do with multiple isolation types + params.put("transportzoneisotype", + physicalNetwork.getIsolationMethods().get(0).toLowerCase()); + if (cmd.getL3GatewayServiceUuid() != null) { + params.put("l3gatewayserviceuuid", cmd.getL3GatewayServiceUuid()); + } - Map hostdetails = new HashMap(); + Map hostdetails = new HashMap(); hostdetails.putAll(params); - + Transaction txn = Transaction.currentTxn(); try { resource.configure(cmd.getHost(), hostdetails); - - Host host = _resourceMgr.addHost(zoneId, resource, - Host.Type.L2Networking, params); + + Host host = _resourceMgr.addHost(zoneId, resource, + Host.Type.L2Networking, params); if (host != null) { txn.start(); - - niciraNvpDevice = new NiciraNvpDeviceVO(host.getId(), - physicalNetworkId, ntwkSvcProvider.getProviderName(), - deviceName); + + niciraNvpDevice = new NiciraNvpDeviceVO(host.getId(), + physicalNetworkId, ntwkSvcProvider.getProviderName(), + deviceName); _niciraNvpDao.persist(niciraNvpDevice); - - DetailVO detail = new DetailVO(host.getId(), - "niciranvpdeviceid", String.valueOf(niciraNvpDevice - .getId())); + + DetailVO detail = new DetailVO(host.getId(), + "niciranvpdeviceid", String.valueOf(niciraNvpDevice + .getId())); _hostDetailsDao.persist(detail); txn.commit(); return niciraNvpDevice; } else { - throw new CloudRuntimeException( - "Failed to add Nicira Nvp Device due to internal error."); + throw new CloudRuntimeException( + "Failed to add Nicira Nvp Device due to internal error."); } } catch (ConfigurationException e) { txn.rollback(); @@ -638,8 +638,8 @@ public class NiciraNvpElement extends AdapterBase implements @Override public NiciraNvpDeviceResponse createNiciraNvpDeviceResponse( NiciraNvpDeviceVO niciraNvpDeviceVO) { - HostVO niciraNvpHost = _hostDao.findById(niciraNvpDeviceVO.getHostId()); - _hostDao.loadDetails(niciraNvpHost); + HostVO niciraNvpHost = _hostDao.findById(niciraNvpDeviceVO.getHostId()); + _hostDao.loadDetails(niciraNvpHost); NiciraNvpDeviceResponse response = new NiciraNvpDeviceResponse(); response.setDeviceName(niciraNvpDeviceVO.getDeviceName()); @@ -647,118 +647,118 @@ public class NiciraNvpElement extends AdapterBase implements if (pnw != null) { response.setPhysicalNetworkId(pnw.getUuid()); } - response.setId(niciraNvpDeviceVO.getUuid()); + response.setId(niciraNvpDeviceVO.getUuid()); response.setProviderName(niciraNvpDeviceVO.getProviderName()); - response.setHostName(niciraNvpHost.getDetail("ip")); - response.setTransportZoneUuid(niciraNvpHost.getDetail("transportzoneuuid")); - response.setL3GatewayServiceUuid(niciraNvpHost.getDetail("l3gatewayserviceuuid")); - response.setObjectName("niciranvpdevice"); + response.setHostName(niciraNvpHost.getDetail("ip")); + response.setTransportZoneUuid(niciraNvpHost.getDetail("transportzoneuuid")); + response.setL3GatewayServiceUuid(niciraNvpHost.getDetail("l3gatewayserviceuuid")); + response.setObjectName("niciranvpdevice"); return response; } - + @Override public boolean deleteNiciraNvpDevice(DeleteNiciraNvpDeviceCmd cmd) { Long niciraDeviceId = cmd.getNiciraNvpDeviceId(); - NiciraNvpDeviceVO niciraNvpDevice = _niciraNvpDao - .findById(niciraDeviceId); + NiciraNvpDeviceVO niciraNvpDevice = _niciraNvpDao + .findById(niciraDeviceId); if (niciraNvpDevice == null) { - throw new InvalidParameterValueException( - "Could not find a nicira device with id " + niciraDeviceId); + throw new InvalidParameterValueException( + "Could not find a nicira device with id " + niciraDeviceId); } - + // Find the physical network we work for Long physicalNetworkId = niciraNvpDevice.getPhysicalNetworkId(); - PhysicalNetworkVO physicalNetwork = _physicalNetworkDao - .findById(physicalNetworkId); + PhysicalNetworkVO physicalNetwork = _physicalNetworkDao + .findById(physicalNetworkId); if (physicalNetwork != null) { // Lets see if there are networks that use us // Find the nicira networks on this physical network - List networkList = _networkDao - .listByPhysicalNetwork(physicalNetworkId); - + List networkList = _networkDao + .listByPhysicalNetwork(physicalNetworkId); + // Networks with broadcast type lswitch are ours for (NetworkVO network : networkList) { if (network.getBroadcastDomainType() == Networks.BroadcastDomainType.Lswitch) { - if ((network.getState() != Network.State.Shutdown) - && (network.getState() != Network.State.Destroy)) { - throw new CloudRuntimeException( - "This Nicira Nvp device can not be deleted as there are one or more logical networks provisioned by cloudstack."); + if ((network.getState() != Network.State.Shutdown) + && (network.getState() != Network.State.Destroy)) { + throw new CloudRuntimeException( + "This Nicira Nvp device can not be deleted as there are one or more logical networks provisioned by cloudstack."); } } } } - + HostVO niciraHost = _hostDao.findById(niciraNvpDevice.getHostId()); Long hostId = niciraHost.getId(); - + niciraHost.setResourceState(ResourceState.Maintenance); _hostDao.update(hostId, niciraHost); _resourceMgr.deleteHost(hostId, false, false); - + _niciraNvpDao.remove(niciraDeviceId); return true; } - + @Override - public List listNiciraNvpDevices( - ListNiciraNvpDevicesCmd cmd) { + public List listNiciraNvpDevices( + ListNiciraNvpDevicesCmd cmd) { Long physicalNetworkId = cmd.getPhysicalNetworkId(); Long niciraNvpDeviceId = cmd.getNiciraNvpDeviceId(); List responseList = new ArrayList(); - + if (physicalNetworkId == null && niciraNvpDeviceId == null) { - throw new InvalidParameterValueException( - "Either physical network Id or nicira device Id must be specified"); + throw new InvalidParameterValueException( + "Either physical network Id or nicira device Id must be specified"); } - + if (niciraNvpDeviceId != null) { - NiciraNvpDeviceVO niciraNvpDevice = _niciraNvpDao - .findById(niciraNvpDeviceId); + NiciraNvpDeviceVO niciraNvpDevice = _niciraNvpDao + .findById(niciraNvpDeviceId); if (niciraNvpDevice == null) { - throw new InvalidParameterValueException( - "Could not find Nicira Nvp device with id: " - + niciraNvpDevice); + throw new InvalidParameterValueException( + "Could not find Nicira Nvp device with id: " + + niciraNvpDevice); } responseList.add(niciraNvpDevice); - } else { - PhysicalNetworkVO physicalNetwork = _physicalNetworkDao - .findById(physicalNetworkId); + } else { + PhysicalNetworkVO physicalNetwork = _physicalNetworkDao + .findById(physicalNetworkId); if (physicalNetwork == null) { - throw new InvalidParameterValueException( - "Could not find a physical network with id: " - + physicalNetworkId); + throw new InvalidParameterValueException( + "Could not find a physical network with id: " + + physicalNetworkId); } - responseList = _niciraNvpDao - .listByPhysicalNetwork(physicalNetworkId); + responseList = _niciraNvpDao + .listByPhysicalNetwork(physicalNetworkId); } - + return responseList; } - + @Override - public List listNiciraNvpDeviceNetworks( - ListNiciraNvpDeviceNetworksCmd cmd) { + public List listNiciraNvpDeviceNetworks( + ListNiciraNvpDeviceNetworksCmd cmd) { Long niciraDeviceId = cmd.getNiciraNvpDeviceId(); - NiciraNvpDeviceVO niciraNvpDevice = _niciraNvpDao - .findById(niciraDeviceId); + NiciraNvpDeviceVO niciraNvpDevice = _niciraNvpDao + .findById(niciraDeviceId); if (niciraNvpDevice == null) { - throw new InvalidParameterValueException( - "Could not find a nicira device with id " + niciraDeviceId); + throw new InvalidParameterValueException( + "Could not find a nicira device with id " + niciraDeviceId); } - + // Find the physical network we work for Long physicalNetworkId = niciraNvpDevice.getPhysicalNetworkId(); - PhysicalNetworkVO physicalNetwork = _physicalNetworkDao - .findById(physicalNetworkId); + PhysicalNetworkVO physicalNetwork = _physicalNetworkDao + .findById(physicalNetworkId); if (physicalNetwork == null) { // No such physical network, so no provisioned networks return Collections.emptyList(); } - + // Find the nicira networks on this physical network - List networkList = _networkDao - .listByPhysicalNetwork(physicalNetworkId); - + List networkList = _networkDao + .listByPhysicalNetwork(physicalNetworkId); + // Networks with broadcast type lswitch are ours List responseList = new ArrayList(); for (NetworkVO network : networkList) { @@ -766,10 +766,10 @@ public class NiciraNvpElement extends AdapterBase implements responseList.add(network); } } - + return responseList; } - + @Override public HostVO createHostVOForConnectedAgent(HostVO host, StartupCommand[] cmd) { @@ -797,156 +797,156 @@ public class NiciraNvpElement extends AdapterBase implements return new DeleteHostAnswer(true); } - /** - * From interface SourceNatServiceProvider - */ - @Override - public IpDeployer getIpDeployer(Network network) { - return this; - } + /** + * From interface SourceNatServiceProvider + */ + @Override + public IpDeployer getIpDeployer(Network network) { + return this; + } - /** - * From interface IpDeployer - * - * @param network - * @param ipAddress - * @param services - * @return - * @throws ResourceUnavailableException - */ - @Override - public boolean applyIps(Network network, - List ipAddress, Set services) - throws ResourceUnavailableException { - if (services.contains(Service.SourceNat)) { - // Only if we need to provide SourceNat we need to configure the logical router - // SourceNat is required for StaticNat and PortForwarding - List devices = _niciraNvpDao - .listByPhysicalNetwork(network.getPhysicalNetworkId()); - if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " - + network.getPhysicalNetworkId()); - return false; - } - NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); - HostVO niciraNvpHost = _hostDao.findById(niciraNvpDevice.getHostId()); - _hostDao.loadDetails(niciraNvpHost); + /** + * From interface IpDeployer + * + * @param network + * @param ipAddress + * @param services + * @return + * @throws ResourceUnavailableException + */ + @Override + public boolean applyIps(Network network, + List ipAddress, Set services) + throws ResourceUnavailableException { + if (services.contains(Service.SourceNat)) { + // Only if we need to provide SourceNat we need to configure the logical router + // SourceNat is required for StaticNat and PortForwarding + List devices = _niciraNvpDao + .listByPhysicalNetwork(network.getPhysicalNetworkId()); + if (devices.isEmpty()) { + s_logger.error("No NiciraNvp Controller on physical network " + + network.getPhysicalNetworkId()); + return false; + } + NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); + HostVO niciraNvpHost = _hostDao.findById(niciraNvpDevice.getHostId()); + _hostDao.loadDetails(niciraNvpHost); - NiciraNvpRouterMappingVO routermapping = _niciraNvpRouterMappingDao - .findByNetworkId(network.getId()); - if (routermapping == null) { - s_logger.error("No logical router uuid found for network " - + network.getDisplayText()); - return false; - } + NiciraNvpRouterMappingVO routermapping = _niciraNvpRouterMappingDao + .findByNetworkId(network.getId()); + if (routermapping == null) { + s_logger.error("No logical router uuid found for network " + + network.getDisplayText()); + return false; + } - List cidrs = new ArrayList(); - for (PublicIpAddress ip : ipAddress) { - cidrs.add(ip.getAddress().addr() + "/" + NetUtils.getCidrSize(ip.getNetmask())); - } - ConfigurePublicIpsOnLogicalRouterCommand cmd = new ConfigurePublicIpsOnLogicalRouterCommand(routermapping.getLogicalRouterUuid(), - niciraNvpHost.getDetail("l3gatewayserviceuuid"), cidrs); - ConfigurePublicIpsOnLogicalRouterAnswer answer = (ConfigurePublicIpsOnLogicalRouterAnswer) _agentMgr.easySend(niciraNvpHost.getId(), cmd); - //FIXME answer can be null if the host is down - return answer.getResult(); - } - else { - s_logger.debug("No need to provision ip addresses as we are not providing L3 services."); - } + List cidrs = new ArrayList(); + for (PublicIpAddress ip : ipAddress) { + cidrs.add(ip.getAddress().addr() + "/" + NetUtils.getCidrSize(ip.getNetmask())); + } + ConfigurePublicIpsOnLogicalRouterCommand cmd = new ConfigurePublicIpsOnLogicalRouterCommand(routermapping.getLogicalRouterUuid(), + niciraNvpHost.getDetail("l3gatewayserviceuuid"), cidrs); + ConfigurePublicIpsOnLogicalRouterAnswer answer = (ConfigurePublicIpsOnLogicalRouterAnswer) _agentMgr.easySend(niciraNvpHost.getId(), cmd); + //FIXME answer can be null if the host is down + return answer.getResult(); + } + else { + s_logger.debug("No need to provision ip addresses as we are not providing L3 services."); + } - return true; - } + return true; + } - /** - * From interface StaticNatServiceProvider - */ - @Override - public boolean applyStaticNats(Network network, - List rules) - throws ResourceUnavailableException { + /** + * From interface StaticNatServiceProvider + */ + @Override + public boolean applyStaticNats(Network network, + List rules) + throws ResourceUnavailableException { if (!canHandle(network, Service.StaticNat)) { return false; } - List devices = _niciraNvpDao - .listByPhysicalNetwork(network.getPhysicalNetworkId()); - if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " - + network.getPhysicalNetworkId()); - return false; - } - NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); - HostVO niciraNvpHost = _hostDao.findById(niciraNvpDevice.getHostId()); + List devices = _niciraNvpDao + .listByPhysicalNetwork(network.getPhysicalNetworkId()); + if (devices.isEmpty()) { + s_logger.error("No NiciraNvp Controller on physical network " + + network.getPhysicalNetworkId()); + return false; + } + NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); + HostVO niciraNvpHost = _hostDao.findById(niciraNvpDevice.getHostId()); - NiciraNvpRouterMappingVO routermapping = _niciraNvpRouterMappingDao - .findByNetworkId(network.getId()); - if (routermapping == null) { - s_logger.error("No logical router uuid found for network " - + network.getDisplayText()); - return false; - } + NiciraNvpRouterMappingVO routermapping = _niciraNvpRouterMappingDao + .findByNetworkId(network.getId()); + if (routermapping == null) { + s_logger.error("No logical router uuid found for network " + + network.getDisplayText()); + return false; + } - List staticNatRules = new ArrayList(); + List staticNatRules = new ArrayList(); for (StaticNat rule : rules) { IpAddress sourceIp = _networkManager.getIp(rule.getSourceIpAddressId()); // Force the nat rule into the StaticNatRuleTO, no use making a new TO object // we only need the source and destination ip. Unfortunately no mention if a rule // is new. StaticNatRuleTO ruleTO = new StaticNatRuleTO(1, - sourceIp.getAddress().addr(), 0, 65535, - rule.getDestIpAddress(), 0, 65535, - "any", rule.isForRevoke(), false); + sourceIp.getAddress().addr(), 0, 65535, + rule.getDestIpAddress(), 0, 65535, + "any", rule.isForRevoke(), false); staticNatRules.add(ruleTO); } ConfigureStaticNatRulesOnLogicalRouterCommand cmd = - new ConfigureStaticNatRulesOnLogicalRouterCommand(routermapping.getLogicalRouterUuid(), staticNatRules); + new ConfigureStaticNatRulesOnLogicalRouterCommand(routermapping.getLogicalRouterUuid(), staticNatRules); ConfigureStaticNatRulesOnLogicalRouterAnswer answer = (ConfigureStaticNatRulesOnLogicalRouterAnswer) _agentMgr.easySend(niciraNvpHost.getId(), cmd); return answer.getResult(); - } + } - /** - * From interface PortForwardingServiceProvider - */ - @Override - public boolean applyPFRules(Network network, List rules) - throws ResourceUnavailableException { + /** + * From interface PortForwardingServiceProvider + */ + @Override + public boolean applyPFRules(Network network, List rules) + throws ResourceUnavailableException { if (!canHandle(network, Service.PortForwarding)) { return false; } - List devices = _niciraNvpDao - .listByPhysicalNetwork(network.getPhysicalNetworkId()); - if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " - + network.getPhysicalNetworkId()); - return false; - } - NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); - HostVO niciraNvpHost = _hostDao.findById(niciraNvpDevice.getHostId()); + List devices = _niciraNvpDao + .listByPhysicalNetwork(network.getPhysicalNetworkId()); + if (devices.isEmpty()) { + s_logger.error("No NiciraNvp Controller on physical network " + + network.getPhysicalNetworkId()); + return false; + } + NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); + HostVO niciraNvpHost = _hostDao.findById(niciraNvpDevice.getHostId()); - NiciraNvpRouterMappingVO routermapping = _niciraNvpRouterMappingDao - .findByNetworkId(network.getId()); - if (routermapping == null) { - s_logger.error("No logical router uuid found for network " - + network.getDisplayText()); - return false; - } + NiciraNvpRouterMappingVO routermapping = _niciraNvpRouterMappingDao + .findByNetworkId(network.getId()); + if (routermapping == null) { + s_logger.error("No logical router uuid found for network " + + network.getDisplayText()); + return false; + } - List portForwardingRules = new ArrayList(); + List portForwardingRules = new ArrayList(); for (PortForwardingRule rule : rules) { IpAddress sourceIp = _networkManager.getIp(rule.getSourceIpAddressId()); Vlan vlan = _vlanDao.findById(sourceIp.getVlanId()); - PortForwardingRuleTO ruleTO = new PortForwardingRuleTO((PortForwardingRule) rule, vlan.getVlanTag(), sourceIp.getAddress().addr()); + PortForwardingRuleTO ruleTO = new PortForwardingRuleTO(rule, vlan.getVlanTag(), sourceIp.getAddress().addr()); portForwardingRules.add(ruleTO); } ConfigurePortForwardingRulesOnLogicalRouterCommand cmd = - new ConfigurePortForwardingRulesOnLogicalRouterCommand(routermapping.getLogicalRouterUuid(), portForwardingRules); + new ConfigurePortForwardingRulesOnLogicalRouterCommand(routermapping.getLogicalRouterUuid(), portForwardingRules); ConfigurePortForwardingRulesOnLogicalRouterAnswer answer = (ConfigurePortForwardingRulesOnLogicalRouterAnswer) _agentMgr.easySend(niciraNvpHost.getId(), cmd); return answer.getResult(); - } + } } diff --git a/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java b/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java index 6bc08f6c787..ebae66061b3 100644 --- a/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java +++ b/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java @@ -22,8 +22,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import org.springframework.stereotype.Component; - import com.cloud.deploy.DeployDestination; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; @@ -42,7 +40,6 @@ import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; -@Component @Local(value = NetworkElement.class) public class OvsElement extends AdapterBase implements NetworkElement { @Inject diff --git a/plugins/pom.xml b/plugins/pom.xml index d1d1bede77c..8c68c28be3c 100644 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -40,7 +40,6 @@ hypervisors/ovm hypervisors/xen hypervisors/kvm - hypervisors/simulator network-elements/elastic-loadbalancer network-elements/ovs network-elements/nicira-nvp @@ -50,8 +49,6 @@ user-authenticators/plain-text user-authenticators/sha256salted network-elements/dns-notifier - storage/image/s3 - storage/volume/solidfire diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java index d0413e39bf8..e55cce0faae 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java @@ -1,82 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.datastore.driver; -import java.util.Set; +import java.util.Map; -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.EndPoint; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; +import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; +import org.apache.cloudstack.storage.volume.VolumeObject; public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { + + @Override + public String grantAccess(VolumeObject vol, EndPoint ep) { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean revokeAccess(VolumeObject vol, EndPoint ep) { + // TODO Auto-generated method stub + return false; + } + @Override - public String grantAccess(DataObject data, - org.apache.cloudstack.engine.subsystem.api.storage.EndPoint ep) { + public long getCapacity() { // TODO Auto-generated method stub - return null; + return 0; } @Override - public boolean revokeAccess(DataObject data, - org.apache.cloudstack.engine.subsystem.api.storage.EndPoint ep) { + public long getAvailableCapacity() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public boolean initialize(Map params) { // TODO Auto-generated method stub return false; } @Override - public Set listObjects(DataStore store) { - // TODO Auto-generated method stub - return null; - } - - @Override - public void createAsync(DataObject data, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - - @Override - public void deleteAsync( - DataObject data, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - - @Override - public void copyAsync(DataObject srcdata, DataObject destData, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - - @Override - public boolean canCopy(DataObject srcData, DataObject destData) { + public boolean grantAccess(EndPoint ep) { // TODO Auto-generated method stub return false; } @Override - public void takeSnapshot( - SnapshotInfo snapshot, - AsyncCompletionCallback callback) { + public boolean revokeAccess(EndPoint ep) { + // TODO Auto-generated method stub + return false; + } + + @Override + public void setDataStore(PrimaryDataStore dataStore) { // TODO Auto-generated method stub } @Override - public void revertSnapshot( - SnapshotInfo snapshot, - AsyncCompletionCallback callback) { + public void createVolumeFromBaseImageAsync(VolumeObject volume, TemplateOnPrimaryDataStoreInfo template, AsyncCompletionCallback callback) { // TODO Auto-generated method stub } + @Override + public void createVolumeAsync(VolumeObject vol, AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } - + @Override + public void deleteVolumeAsync(VolumeObject vo, AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java index 9bf8fa7a24e..bcffbd37e4b 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java @@ -1,5 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.datastore.provider; +import java.util.List; + +import org.apache.cloudstack.storage.datastore.DefaultPrimaryDataStore; +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; +import org.apache.cloudstack.storage.datastore.configurator.PrimaryDataStoreConfigurator; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; +import org.apache.cloudstack.storage.datastore.driver.SolidfirePrimaryDataStoreDriver; +import org.apache.cloudstack.storage.datastore.lifecycle.DefaultPrimaryDataStoreLifeCycleImpl; +import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.stereotype.Component; @Component @@ -8,8 +33,8 @@ public class SolidfirePrimaryDataStoreProvider extends private final String name = "Solidfre Primary Data Store Provider"; - public SolidfirePrimaryDataStoreProvider() { - + public SolidfirePrimaryDataStoreProvider(@Qualifier("solidfire") List configurators) { + super(configurators); // TODO Auto-generated constructor stub } @@ -19,5 +44,21 @@ public class SolidfirePrimaryDataStoreProvider extends return name; } - + @Override + public PrimaryDataStore getDataStore(long dataStoreId) { + PrimaryDataStoreVO dsv = dataStoreDao.findById(dataStoreId); + if (dsv == null) { + return null; + } + + DefaultPrimaryDataStore pds = DefaultPrimaryDataStore.createDataStore(dsv); + SolidfirePrimaryDataStoreDriver driver = new SolidfirePrimaryDataStoreDriver(); + pds.setDriver(driver); + + + DefaultPrimaryDataStoreLifeCycleImpl lifeCycle = new DefaultPrimaryDataStoreLifeCycleImpl(dataStoreDao); + + pds.setLifeCycle(lifeCycle); + return pds; + } } diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java index ba356e3e6b5..63669c453d7 100644 --- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java +++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; import org.aspectj.lang.ProceedingJoinPoint; diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java index 6a7b5ad16b1..eb6fe453886 100644 --- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java +++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; import org.apache.cloudstack.storage.image.motion.ImageMotionService; diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/TestConfiguration.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/TestConfiguration.java index 42cd8fb5f59..2c6092d7408 100644 --- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/TestConfiguration.java +++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/TestConfiguration.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java index 0b7369448e7..f5035bf4303 100644 --- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java +++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java @@ -1,5 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.test; +import static org.junit.Assert.*; + import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -10,10 +28,12 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.apache.cloudstack.storage.command.CreateVolumeAnswer; import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProvider; +import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProviderManager; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -23,9 +43,9 @@ import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import com.cloud.agent.AgentManager; import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; +import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; @@ -53,8 +73,8 @@ public class VolumeTest { DataCenterDao dcDao; @Inject PrimaryDataStoreDao primaryStoreDao; - //@Inject - //PrimaryDataStoreProviderManager primaryDataStoreProviderMgr; + @Inject + PrimaryDataStoreProviderManager primaryDataStoreProviderMgr; @Inject AgentManager agentMgr; Long dcId; @@ -114,16 +134,16 @@ public class VolumeTest { private PrimaryDataStoreInfo createPrimaryDataStore() { try { - //primaryDataStoreProviderMgr.configure("primary data store mgr", new HashMap()); - //PrimaryDataStoreProvider provider = primaryDataStoreProviderMgr.getDataStoreProvider("Solidfre Primary Data Store Provider"); + primaryDataStoreProviderMgr.configure("primary data store mgr", new HashMap()); + PrimaryDataStoreProvider provider = primaryDataStoreProviderMgr.getDataStoreProvider("Solidfre Primary Data Store Provider"); Map params = new HashMap(); params.put("url", "nfs://test/test"); params.put("dcId", dcId.toString()); params.put("clusterId", clusterId.toString()); params.put("name", "my primary data store"); - //PrimaryDataStoreInfo primaryDataStoreInfo = provider.registerDataStore(params); - return null; - } catch (Exception e) { + PrimaryDataStoreInfo primaryDataStoreInfo = provider.registerDataStore(params); + return primaryDataStoreInfo; + } catch (ConfigurationException e) { return null; } } diff --git a/plugins/storage/volume/solidfire/test/resource/storageContext.xml b/plugins/storage/volume/solidfire/test/resource/storageContext.xml index 6800d8f48b8..e4ba9867803 100644 --- a/plugins/storage/volume/solidfire/test/resource/storageContext.xml +++ b/plugins/storage/volume/solidfire/test/resource/storageContext.xml @@ -1,3 +1,21 @@ + params) - throws ConfigurationException { - super.configure(name, params); - return true; - } - - /* (non-Javadoc) - * @see com.cloud.server.auth.UserAuthenticator#authenticate(java.lang.String, java.lang.String, java.lang.Long, java.util.Map) - */ - @Override - public boolean authenticate(String username, String password, - Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { + @Inject + private UserAccountDao _userAccountDao; + private static int s_saltlen = 20; + + @Override + public boolean configure(String name, Map params) + throws ConfigurationException { + super.configure(name, params); + return true; + } + + /* (non-Javadoc) + * @see com.cloud.server.auth.UserAuthenticator#authenticate(java.lang.String, java.lang.String, java.lang.Long, java.util.Map) + */ + @Override + public boolean authenticate(String username, String password, + Long domainId, Map requestParameters) { + if (s_logger.isDebugEnabled()) { s_logger.debug("Retrieving user: " + username); } UserAccount user = _userAccountDao.getUserAccount(username, domainId); @@ -64,59 +62,59 @@ public class SHA256SaltedUserAuthenticator extends DefaultUserAuthenticator { s_logger.debug("Unable to find user with " + username + " in domain " + domainId); return false; } - + try { - String storedPassword[] = user.getPassword().split(":"); - if (storedPassword.length != 2) { - s_logger.warn("The stored password for " + username + " isn't in the right format for this authenticator"); - return false; - } - byte salt[] = Base64.decode(storedPassword[0]); - String hashedPassword = encode(password, salt); - return storedPassword[1].equals(hashedPassword); - } catch (NoSuchAlgorithmException e) { - throw new CloudRuntimeException("Unable to hash password", e); - } catch (UnsupportedEncodingException e) { - throw new CloudRuntimeException("Unable to hash password", e); - } - } + String storedPassword[] = user.getPassword().split(":"); + if (storedPassword.length != 2) { + s_logger.warn("The stored password for " + username + " isn't in the right format for this authenticator"); + return false; + } + byte salt[] = Base64.decode(storedPassword[0]); + String hashedPassword = encode(password, salt); + return storedPassword[1].equals(hashedPassword); + } catch (NoSuchAlgorithmException e) { + throw new CloudRuntimeException("Unable to hash password", e); + } catch (UnsupportedEncodingException e) { + throw new CloudRuntimeException("Unable to hash password", e); + } + } - /* (non-Javadoc) - * @see com.cloud.server.auth.UserAuthenticator#encode(java.lang.String) - */ - @Override - public String encode(String password) { - // 1. Generate the salt - SecureRandom randomGen; - try { - randomGen = SecureRandom.getInstance("SHA1PRNG"); - - byte salt[] = new byte[s_saltlen]; - randomGen.nextBytes(salt); - - String saltString = new String(Base64.encode(salt)); - String hashString = encode(password, salt); - - // 3. concatenate the two and return - return saltString + ":" + hashString; - } catch (NoSuchAlgorithmException e) { - throw new CloudRuntimeException("Unable to hash password", e); - } catch (UnsupportedEncodingException e) { - throw new CloudRuntimeException("Unable to hash password", e); - } - } + /* (non-Javadoc) + * @see com.cloud.server.auth.UserAuthenticator#encode(java.lang.String) + */ + @Override + public String encode(String password) { + // 1. Generate the salt + SecureRandom randomGen; + try { + randomGen = SecureRandom.getInstance("SHA1PRNG"); - public String encode(String password, byte[] salt) throws UnsupportedEncodingException, NoSuchAlgorithmException { - byte[] passwordBytes = password.getBytes("UTF-8"); - byte[] hashSource = new byte[passwordBytes.length + s_saltlen]; - System.arraycopy(passwordBytes, 0, hashSource, 0, passwordBytes.length); - System.arraycopy(salt, 0, hashSource, passwordBytes.length, s_saltlen); - - // 2. Hash the password with the salt - MessageDigest md = MessageDigest.getInstance("SHA-256"); - md.update(hashSource); - byte[] digest = md.digest(); - - return new String(Base64.encode(digest)); - } + byte salt[] = new byte[s_saltlen]; + randomGen.nextBytes(salt); + + String saltString = new String(Base64.encode(salt)); + String hashString = encode(password, salt); + + // 3. concatenate the two and return + return saltString + ":" + hashString; + } catch (NoSuchAlgorithmException e) { + throw new CloudRuntimeException("Unable to hash password", e); + } catch (UnsupportedEncodingException e) { + throw new CloudRuntimeException("Unable to hash password", e); + } + } + + public String encode(String password, byte[] salt) throws UnsupportedEncodingException, NoSuchAlgorithmException { + byte[] passwordBytes = password.getBytes("UTF-8"); + byte[] hashSource = new byte[passwordBytes.length + s_saltlen]; + System.arraycopy(passwordBytes, 0, hashSource, 0, passwordBytes.length); + System.arraycopy(salt, 0, hashSource, passwordBytes.length, s_saltlen); + + // 2. Hash the password with the salt + MessageDigest md = MessageDigest.getInstance("SHA-256"); + md.update(hashSource); + byte[] digest = md.digest(); + + return new String(Base64.encode(digest)); + } } diff --git a/pom.xml b/pom.xml index 3abf731521d..4b5e3cfe54e 100644 --- a/pom.xml +++ b/pom.xml @@ -43,6 +43,7 @@ + true 1.6 UTF-8 @@ -88,7 +89,6 @@ 2.6 1.4 0.9.8 - true @@ -296,6 +296,36 @@ install + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + + org.apache.maven.plugins + + + maven-antrun-plugin + + [1.7,) + + run + + + + + + + + + + org.apache.tomcat.maven tomcat7-maven-plugin @@ -346,6 +376,7 @@ scripts/vm/systemvm/id_rsa.cloud tools/devcloud/basebuild/puppet-devcloudinitial/files/network.conf tools/devcloud/devcloud.cfg + tools/devcloud-kvm/devcloud-kvm.cfg ui/lib/flot/jquery.colorhelpers.js ui/lib/flot/jquery.flot.crosshair.js ui/lib/flot/jquery.flot.fillbetween.js @@ -472,6 +503,7 @@ developer tools/apidoc tools/devcloud + tools/devcloud-kvm tools/marvin tools/cli diff --git a/server/pom.xml b/server/pom.xml index b6d86e128b5..8592f27e4b7 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -90,6 +90,11 @@ cloud-engine-api ${project.version} + + org.apache.cloudstack + cloud-api + ${project.version} + install diff --git a/server/src/com/cloud/agent/manager/AgentManagerImpl.java b/server/src/com/cloud/agent/manager/AgentManagerImpl.java index ee5971f51c7..77f131ad222 100755 --- a/server/src/com/cloud/agent/manager/AgentManagerImpl.java +++ b/server/src/com/cloud/agent/manager/AgentManagerImpl.java @@ -66,7 +66,6 @@ import com.cloud.agent.transport.Response; import com.cloud.alert.AlertManager; import com.cloud.capacity.dao.CapacityDao; import com.cloud.cluster.ManagementServerNode; -import com.cloud.cluster.StackMaid; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.ClusterDetailsDao; @@ -107,7 +106,6 @@ import com.cloud.user.AccountManager; import com.cloud.utils.ActionDelegate; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.component.Manager; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; @@ -151,7 +149,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { protected List> _creationMonitors = new ArrayList>(17); protected List _loadingAgents = new ArrayList(); protected int _monitorId = 0; - private Lock _agentStatusLock = new ReentrantLock(); + private final Lock _agentStatusLock = new ReentrantLock(); protected NioServer _connection; @Inject @@ -195,10 +193,10 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { @Inject protected VirtualMachineManager _vmMgr = null; - + @Inject StorageService _storageSvr = null; @Inject StorageManager _storageMgr = null; - + @Inject protected HypervisorGuruManager _hvGuruMgr; @@ -222,11 +220,11 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { protected ExecutorService _executor; protected ThreadPoolExecutor _connectExecutor; - + protected StateMachine2 _statusStateMachine = Status.getStateMachine(); - + @Inject ResourceManager _resourceMgr; - + @Override public boolean configure(final String name, final Map params) throws ConfigurationException { _name = name; @@ -263,7 +261,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { _nodeId = ManagementServerNode.getManagementServerId(); s_logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId); - + long lastPing = (System.currentTimeMillis() >> 10) - _pingTimeout; _hostDao.markHostsAsDisconnected(_nodeId, lastPing); @@ -276,7 +274,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { new LinkedBlockingQueue(), new NamedThreadFactory("AgentConnectTaskPool")); //allow core threads to time out even when there are no items in the queue _connectExecutor.allowCoreThreadTimeOut(true); - + _connection = new NioServer("AgentManager", _port, workers + 10, this); s_logger.info("Listening on " + _port + " with " + workers + " workers"); @@ -395,7 +393,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { } else if ( ssHost.getType() == Host.Type.SecondaryStorage) { sendToSSVM(ssHost.getDataCenterId(), cmd, listener); } else { - String err = "do not support Secondary Storage type " + ssHost.getType(); + String err = "do not support Secondary Storage type " + ssHost.getType(); s_logger.warn(err); throw new CloudRuntimeException(err); } @@ -435,7 +433,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { } Answer answer = null; try { - + long targetHostId = _hvGuruMgr.getGuruProcessedCommandTargetHost(host.getId(), cmd); answer = easySend(targetHostId, cmd); } catch (Exception e) { @@ -552,7 +550,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { assert cmds.length > 0 : "Why are you sending zero length commands?"; if (cmds.length == 0) { - throw new AgentUnavailableException("Empty command set for agent " + agent.getId(), agent.getId()); + throw new AgentUnavailableException("Empty command set for agent " + agent.getId(), agent.getId()); } Request req = new Request(hostId, _nodeId, cmds, commands.stopOnError(), true); req.setSequence(agent.getNextSequence()); @@ -585,7 +583,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { if (removed != null) { removed.disconnect(nextState); } - + for (Pair monitor : _hostMonitors) { if (s_logger.isDebugEnabled()) { s_logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName()); @@ -593,7 +591,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { monitor.second().processDisconnect(hostId, nextState); } } - + protected AgentAttache notifyMonitorsOfConnection(AgentAttache attache, final StartupCommand[] cmd, boolean forRebalance) throws ConnectionException { long hostId = attache.getId(); HostVO host = _hostDao.findById(hostId); @@ -678,7 +676,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { loadDirectlyConnectedHost(host, false); } } - + private ServerResource loadResourcesWithoutHypervisor(HostVO host){ String resourceName = host.getResource(); ServerResource resource = null; @@ -704,10 +702,10 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { if(resource != null){ _hostDao.loadDetails(host); - + HashMap params = new HashMap(host.getDetails().size() + 5); params.putAll(host.getDetails()); - + params.put("guid", host.getGuid()); params.put("zone", Long.toString(host.getDataCenterId())); if (host.getPodId() != null) { @@ -726,19 +724,19 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { params.put("pool", guid); } } - + params.put("ipaddress", host.getPrivateIpAddress()); params.put("secondary.storage.vm", "false"); params.put("max.template.iso.size", _configDao.getValue(Config.MaxTemplateAndIsoSize.toString())); params.put("migratewait", _configDao.getValue(Config.MigrateWait.toString())); - + try { resource.configure(host.getName(), params); } catch (ConfigurationException e) { s_logger.warn("Unable to configure resource due to " + e.getMessage()); return null; } - + if (!resource.start()) { s_logger.warn("Unable to start the resource"); return null; @@ -746,13 +744,13 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { } return resource; } - + @SuppressWarnings("rawtypes") protected boolean loadDirectlyConnectedHost(HostVO host, boolean forRebalance) { - boolean initialized = false; + boolean initialized = false; ServerResource resource = null; - try { + try { //load the respective discoverer Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType()); if(discoverer == null){ @@ -761,20 +759,20 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { }else{ resource = discoverer.reloadResource(host); } - + if(resource == null){ s_logger.warn("Unable to load the resource: "+ host.getId()); return false; } - - initialized = true; - } finally { - if(!initialized) { + + initialized = true; + } finally { + if(!initialized) { if (host != null) { agentStatusTransitTo(host, Event.AgentDisconnected, _nodeId); } - } - } + } + } if (forRebalance) { Host h = _resourceMgr.createHostAndAgent(host.getId(), resource, host.getDetails(), false, null, true); @@ -790,10 +788,10 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { if (resource instanceof DummySecondaryStorageResource || resource instanceof KvmDummyResourceBase) { return new DummyAttache(this, host.getId(), false); } - + s_logger.debug("create DirectAgentAttache for " + host.getId()); DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), resource, host.isInMaintenanceStates(), this); - + AgentAttache old = null; synchronized (_agents) { old = _agents.put(host.getId(), attache); @@ -804,7 +802,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { return attache; } - + @Override public boolean stop() { if (_monitor != null) { @@ -823,13 +821,13 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { s_logger.debug("Cant not find host " + agent.getId()); } } else { - if (!agent.forForward()) { - agentStatusTransitTo(host, Event.ManagementServerDown, _nodeId); - } + if (!agent.forForward()) { + agentStatusTransitTo(host, Event.ManagementServerDown, _nodeId); + } } } } - + _connectExecutor.shutdownNow(); return true; } @@ -838,7 +836,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { public String getName() { return _name; } - + protected boolean handleDisconnectWithoutInvestigation(AgentAttache attache, Status.Event event, boolean transitState) { long hostId = attache.getId(); @@ -863,7 +861,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { s_logger.debug(err); throw new CloudRuntimeException(err); } - + if (s_logger.isDebugEnabled()) { s_logger.debug("The next status of agent " + hostId + "is " + nextStatus + ", current status is " + currentStatus); } @@ -876,15 +874,15 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { //remove the attache removeAgent(attache, nextStatus); - + //update the DB if (host != null && transitState) { - disconnectAgent(host, event, _nodeId); + disconnectAgent(host, event, _nodeId); } return true; } - + protected boolean handleDisconnectWithInvestigation(AgentAttache attache, Status.Event event) { long hostId = attache.getId(); HostVO host = _hostDao.findById(hostId); @@ -898,7 +896,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { * God knew what race condition the code dealt with! */ } - + if (nextStatus == Status.Alert) { /* OK, we are going to the bad status, let's see what happened */ s_logger.info("Investigating why host " + hostId + " has disconnected with event " + event); @@ -947,7 +945,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { s_logger.debug("The next status of Agent " + host.getId() + " is not Alert, no need to investigate what happened"); } } - + handleDisconnectWithoutInvestigation(attache, event, true); host = _hostDao.findById(host.getId()); if (host.getStatus() == Status.Alert || host.getStatus() == Status.Down) { @@ -970,16 +968,14 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { @Override public void run() { - try { + try { if (_investigate == true) { handleDisconnectWithInvestigation(_attache, _event); } else { - handleDisconnectWithoutInvestigation(_attache, _event, true); + handleDisconnectWithoutInvestigation(_attache, _event, true); } } catch (final Exception e) { s_logger.error("Exception caught while handling disconnect: ", e); - } finally { - StackMaid.current().exitCleanup(); } } } @@ -1059,14 +1055,14 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { @Override public boolean executeUserRequest(long hostId, Event event) throws AgentUnavailableException { - if (event == Event.AgentDisconnected) { + if (event == Event.AgentDisconnected) { if (s_logger.isDebugEnabled()) { s_logger.debug("Received agent disconnect event for host " + hostId); } AgentAttache attache = null; attache = findAttache(hostId); if (attache != null) { - handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true); + handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true); } return true; } else if (event == Event.ShutdownRequested) { @@ -1079,7 +1075,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { s_logger.debug("create ConnectedAgentAttache for " + host.getId()); AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), link, host.isInMaintenanceStates()); link.attach(attache); - + AgentAttache old = null; synchronized (_agents) { old = _agents.put(host.getId(), attache); @@ -1090,36 +1086,36 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { return attache; } - + private AgentAttache handleConnectedAgent(final Link link, final StartupCommand[] startup, Request request) { - AgentAttache attache = null; - ReadyCommand ready = null; - try { - HostVO host = _resourceMgr.createHostVOForConnectedAgent(startup); - if (host != null) { - ready = new ReadyCommand(host.getDataCenterId(), host.getId()); - attache = createAttacheForConnect(host, link); - attache = notifyMonitorsOfConnection(attache, startup, false); - } + AgentAttache attache = null; + ReadyCommand ready = null; + try { + HostVO host = _resourceMgr.createHostVOForConnectedAgent(startup); + if (host != null) { + ready = new ReadyCommand(host.getDataCenterId(), host.getId()); + attache = createAttacheForConnect(host, link); + attache = notifyMonitorsOfConnection(attache, startup, false); + } } catch (Exception e) { - s_logger.debug("Failed to handle host connection: " + e.toString()); - ready = new ReadyCommand(null); - ready.setDetails(e.toString()); + s_logger.debug("Failed to handle host connection: " + e.toString()); + ready = new ReadyCommand(null); + ready.setDetails(e.toString()); } finally { if (ready == null) { ready = new ReadyCommand(null); - } + } } - + try { - if (attache == null) { - final Request readyRequest = new Request(-1, -1, ready, false); - link.send(readyRequest.getBytes()); - } else { - easySend(attache.getId(), ready); - } + if (attache == null) { + final Request readyRequest = new Request(-1, -1, ready, false); + link.send(readyRequest.getBytes()); + } else { + easySend(attache.getId(), ready); + } } catch (Exception e) { - s_logger.debug("Failed to send ready command:" + e.toString()); + s_logger.debug("Failed to send ready command:" + e.toString()); } return attache; } @@ -1143,7 +1139,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { if (s_logger.isDebugEnabled()) { s_logger.debug("Simulating start for resource " + resource.getName() + " id " + id); } - + _resourceMgr.createHostAndAgent(id, resource, details, false, null, false); } catch (Exception e) { s_logger.warn("Unable to simulate start on resource " + id + " name " + resource.getName(), e); @@ -1151,7 +1147,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { if (actionDelegate != null) { actionDelegate.action(new Long(id)); } - StackMaid.current().exitCleanup(); } } } @@ -1174,32 +1169,32 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { for (int i = 0; i < _cmds.length; i++) { startups[i] = (StartupCommand) _cmds[i]; } - + AgentAttache attache = handleConnectedAgent(_link, startups, _request); if (attache == null) { s_logger.warn("Unable to create attache for agent: " + _request); } } } - + protected void connectAgent(Link link, final Command[] cmds, final Request request) { - //send startupanswer to agent in the very beginning, so agent can move on without waiting for the answer for an undetermined time, if we put this logic into another thread pool. - StartupAnswer[] answers = new StartupAnswer[cmds.length]; - Command cmd; - for (int i = 0; i < cmds.length; i++) { - cmd = cmds[i]; - if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) { - answers[i] = new StartupAnswer((StartupCommand)cmds[i], 0, getPingInterval()); - break; - } - } - Response response = null; - response = new Response(request, answers[0], _nodeId, -1); - try { - link.send(response.toBytes()); - } catch (ClosedChannelException e) { - s_logger.debug("Failed to send startupanswer: " + e.toString()); - } + //send startupanswer to agent in the very beginning, so agent can move on without waiting for the answer for an undetermined time, if we put this logic into another thread pool. + StartupAnswer[] answers = new StartupAnswer[cmds.length]; + Command cmd; + for (int i = 0; i < cmds.length; i++) { + cmd = cmds[i]; + if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) { + answers[i] = new StartupAnswer((StartupCommand)cmds[i], 0, getPingInterval()); + break; + } + } + Response response = null; + response = new Response(request, answers[0], _nodeId, -1); + try { + link.send(response.toBytes()); + } catch (ClosedChannelException e) { + s_logger.debug("Failed to send startupanswer: " + e.toString()); + } _connectExecutor.execute(new HandleAgentConnectTask(link, cmds, request)); } @@ -1215,14 +1210,14 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { boolean logD = true; if (attache == null) { - if (!(cmd instanceof StartupCommand)) { - s_logger.warn("Throwing away a request because it came through as the first command on a connect: " + request); - } else { - //submit the task for execution - request.logD("Scheduling the first command "); - connectAgent(link, cmds, request); - } - return; + if (!(cmd instanceof StartupCommand)) { + s_logger.warn("Throwing away a request because it came through as the first command on a connect: " + request); + } else { + //submit the task for execution + request.logD("Scheduling the first command "); + connectAgent(link, cmds, request); + } + return; } final long hostId = attache.getId(); @@ -1286,20 +1281,20 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { if (cmd instanceof PingRoutingCommand) { boolean gatewayAccessible = ((PingRoutingCommand) cmd).isGatewayAccessible(); HostVO host = _hostDao.findById(Long.valueOf(cmdHostId)); - - if (host != null) { - if (!gatewayAccessible) { - // alert that host lost connection to - // gateway (cannot ping the default route) - DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); - HostPodVO podVO = _podDao.findById(host.getPodId()); - String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); - _alertMgr.sendAlert(AlertManager.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId(), "Host lost connection to gateway, " + hostDesc, "Host [" + hostDesc - + "] lost connection to gateway (default route) and is possibly having network connection issues."); - } else { - _alertMgr.clearAlert(AlertManager.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId()); - } + if (host != null) { + if (!gatewayAccessible) { + // alert that host lost connection to + // gateway (cannot ping the default route) + DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); + HostPodVO podVO = _podDao.findById(host.getPodId()); + String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); + + _alertMgr.sendAlert(AlertManager.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId(), "Host lost connection to gateway, " + hostDesc, "Host [" + hostDesc + + "] lost connection to gateway (default route) and is possibly having network connection issues."); + } else { + _alertMgr.clearAlert(AlertManager.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId()); + } } else { s_logger.debug("Not processing " + PingRoutingCommand.class.getSimpleName() + " for agent id=" + cmdHostId + "; can't find the host in the DB"); @@ -1382,7 +1377,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { } } } finally { - StackMaid.current().exitCleanup(); txn.close(); } } @@ -1391,7 +1385,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { protected AgentManagerImpl() { } - @Override + @Override public boolean tapLoadingAgents(Long hostId, TapAgentsAction action) { synchronized (_loadingAgents) { if (action == TapAgentsAction.Add) { @@ -1406,58 +1400,58 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { } return true; } - + @Override public boolean agentStatusTransitTo(HostVO host, Status.Event e, long msId) { - try { - _agentStatusLock.lock(); - if (status_logger.isDebugEnabled()) { - ResourceState state = host.getResourceState(); - StringBuilder msg = new StringBuilder("Transition:"); - msg.append("[Resource state = ").append(state); - msg.append(", Agent event = ").append(e.toString()); - msg.append(", Host id = ").append(host.getId()).append(", name = " + host.getName()).append("]"); - status_logger.debug(msg); - } + try { + _agentStatusLock.lock(); + if (status_logger.isDebugEnabled()) { + ResourceState state = host.getResourceState(); + StringBuilder msg = new StringBuilder("Transition:"); + msg.append("[Resource state = ").append(state); + msg.append(", Agent event = ").append(e.toString()); + msg.append(", Host id = ").append(host.getId()).append(", name = " + host.getName()).append("]"); + status_logger.debug(msg); + } - host.setManagementServerId(msId); - try { - return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); - } catch (NoTransitionException e1) { - status_logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName() - + ", mangement server id is " + msId); - throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", mangement server id is " - + msId + "," + e1.getMessage()); - } - } finally { - _agentStatusLock.unlock(); - } + host.setManagementServerId(msId); + try { + return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); + } catch (NoTransitionException e1) { + status_logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName() + + ", mangement server id is " + msId); + throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", mangement server id is " + + msId + "," + e1.getMessage()); + } + } finally { + _agentStatusLock.unlock(); + } } - + public boolean disconnectAgent(HostVO host, Status.Event e, long msId) { host.setDisconnectedOn(new Date()); if (e.equals(Status.Event.Remove)) { host.setGuid(null); host.setClusterId(null); } - + return agentStatusTransitTo(host, e, msId); } - + protected void disconnectWithoutInvestigation(AgentAttache attache, final Status.Event event) { _executor.submit(new DisconnectTask(attache, event, false)); } - + protected void disconnectWithInvestigation(AgentAttache attache, final Status.Event event) { _executor.submit(new DisconnectTask(attache, event, true)); } - + private void disconnectInternal(final long hostId, final Status.Event event, boolean invstigate) { AgentAttache attache = findAttache(hostId); if (attache != null) { if (!invstigate) { - disconnectWithoutInvestigation(attache, event); + disconnectWithoutInvestigation(attache, event); } else { disconnectWithInvestigation(attache, event); } @@ -1470,35 +1464,35 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { HostVO host = _hostDao.findById(hostId); if (host != null && host.getRemoved() == null) { - disconnectAgent(host, event, _nodeId); + disconnectAgent(host, event, _nodeId); } } } - + public void disconnectWithInvestigation(final long hostId, final Status.Event event) { disconnectInternal(hostId, event, true); } - + @Override public void disconnectWithoutInvestigation(final long hostId, final Status.Event event) { disconnectInternal(hostId, event, false); } - @Override + @Override public AgentAttache handleDirectConnectAgent(HostVO host, StartupCommand[] cmds, ServerResource resource, boolean forRebalance) throws ConnectionException { - AgentAttache attache; - - attache = createAttacheForDirectConnect(host, resource); + AgentAttache attache; + + attache = createAttacheForDirectConnect(host, resource); StartupAnswer[] answers = new StartupAnswer[cmds.length]; for (int i = 0; i < answers.length; i++) { answers[i] = new StartupAnswer(cmds[i], attache.getId(), _pingInterval); } attache.process(answers); - attache = notifyMonitorsOfConnection(attache, cmds, forRebalance); - - return attache; + attache = notifyMonitorsOfConnection(attache, cmds, forRebalance); + + return attache; } - + @Override public void pullAgentToMaintenance(long hostId) { AgentAttache attache = findAttache(hostId); @@ -1508,15 +1502,15 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { attache.cancelAllCommands(Status.Disconnected, false); } } - + @Override public void pullAgentOutMaintenance(long hostId) { AgentAttache attache = findAttache(hostId); if (attache != null) { - attache.setMaintenanceMode(false); + attache.setMaintenanceMode(false); } } - - - + + + } diff --git a/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index 6753b280961..25c71687bed 100755 --- a/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -62,7 +62,6 @@ import com.cloud.cluster.ClusterManagerListener; import com.cloud.cluster.ClusteredAgentRebalanceService; import com.cloud.cluster.ManagementServerHost; import com.cloud.cluster.ManagementServerHostVO; -import com.cloud.cluster.StackMaid; import com.cloud.cluster.agentlb.AgentLoadBalancerPlanner; import com.cloud.cluster.agentlb.HostTransferMapVO; import com.cloud.cluster.agentlb.HostTransferMapVO.HostTransferState; @@ -80,8 +79,6 @@ import com.cloud.resource.ServerResource; import com.cloud.storage.resource.DummySecondaryStorageResource; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.component.Adapters; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.SearchCriteria2; @@ -116,10 +113,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust protected ManagementServerHostDao _mshostDao; @Inject protected HostTransferMapDao _hostTransferDao; - + // @com.cloud.utils.component.Inject(adapter = AgentLoadBalancerPlanner.class) @Inject protected List _lbPlanners; - + @Inject protected AgentManager _agentMgr; @Inject ConfigurationDao _configDao; @@ -133,7 +130,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust _peers = new HashMap(7); _sslEngines = new HashMap(7); _nodeId = _clusterMgr.getManagementNodeId(); - + s_logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): " + _nodeId); Map params = _configDao.getConfiguration(xmlParams); @@ -143,7 +140,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust ClusteredAgentAttache.initialize(this); _clusterMgr.registerListener(this); - + return super.configure(name, xmlParams); } @@ -177,7 +174,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust List hosts = _hostDao.findAndUpdateDirectAgentToLoad(cutSeconds, _loadSize, _nodeId); List appliances = _hostDao.findAndUpdateApplianceToLoad(cutSeconds, _nodeId); hosts.addAll(appliances); - + if (hosts != null && hosts.size() > 0) { s_logger.debug("Found " + hosts.size() + " unmanaged direct hosts, processing connect for them..."); for (HostVO host : hosts) { @@ -278,12 +275,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust protected boolean handleDisconnectWithoutInvestigation(AgentAttache attache, Status.Event event, boolean transitState) { return handleDisconnect(attache, event, false, true); } - + @Override protected boolean handleDisconnectWithInvestigation(AgentAttache attache, Status.Event event) { return handleDisconnect(attache, event, true, true); } - + protected boolean handleDisconnect(AgentAttache agent, Status.Event event, boolean investigate, boolean broadcast) { boolean res; if (!investigate) { @@ -292,14 +289,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust res = super.handleDisconnectWithInvestigation(agent, event); } - if (res) { - if (broadcast) { - notifyNodesInCluster(agent); - } - return true; - } else { - return false; - } + if (res) { + if (broadcast) { + notifyNodesInCluster(agent); + } + return true; + } else { + return false; + } } @Override @@ -343,15 +340,15 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust public boolean reconnect(final long hostId) { Boolean result; try { - result = _clusterMgr.propagateAgentEvent(hostId, Event.ShutdownRequested); - if (result != null) { - return result; - } + result = _clusterMgr.propagateAgentEvent(hostId, Event.ShutdownRequested); + if (result != null) { + return result; + } } catch (AgentUnavailableException e) { - s_logger.debug("cannot propagate agent reconnect because agent is not available", e); - return false; + s_logger.debug("cannot propagate agent reconnect because agent is not available", e); + return false; } - + return super.reconnect(hostId); } @@ -413,7 +410,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust public String findPeer(long hostId) { return _clusterMgr.getPeerName(hostId); } - + public SSLEngine getSSLEngine(String peerName) { return _sslEngines.get(peerName); } @@ -520,7 +517,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } if (agent == null) { - AgentUnavailableException ex = new AgentUnavailableException("Host with specified id is not in the right state: " + host.getStatus(), hostId); + AgentUnavailableException ex = new AgentUnavailableException("Host with specified id is not in the right state: " + host.getStatus(), hostId); ex.addProxyObject(ApiDBUtils.findHostById(hostId).getUuid()); throw ex; } @@ -540,11 +537,11 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } _timer.cancel(); - + //cancel all transfer tasks s_transferExecutor.shutdownNow(); cleanupTransferMap(_nodeId); - + return super.stop(); } @@ -698,19 +695,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override public boolean executeRebalanceRequest(long agentId, long currentOwnerId, long futureOwnerId, Event event) throws AgentUnavailableException, OperationTimedoutException { - boolean result = false; + boolean result = false; if (event == Event.RequestAgentRebalance) { return setToWaitForRebalance(agentId, currentOwnerId, futureOwnerId); } else if (event == Event.StartAgentRebalance) { try { - result = rebalanceHost(agentId, currentOwnerId, futureOwnerId); + result = rebalanceHost(agentId, currentOwnerId, futureOwnerId); } catch (Exception e) { s_logger.warn("Unable to rebalance host id=" + agentId, e); } } return result; } - + @Override public void scheduleRebalanceAgents() { _timer.schedule(new AgentLoadBalancerTask(), 30000); @@ -735,20 +732,20 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override public synchronized void run() { - try { - if (!cancelled) { - startRebalanceAgents(); - if (s_logger.isInfoEnabled()) { - s_logger.info("The agent load balancer task is now being cancelled"); - } - cancelled = true; - } - } catch(Throwable e) { - s_logger.error("Unexpected exception " + e.toString(), e); - } + try { + if (!cancelled) { + startRebalanceAgents(); + if (s_logger.isInfoEnabled()) { + s_logger.info("The agent load balancer task is now being cancelled"); + } + cancelled = true; + } + } catch(Throwable e) { + s_logger.error("Unexpected exception " + e.toString(), e); + } } } - + public void startRebalanceAgents() { s_logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents"); List allMS = _mshostDao.listBy(ManagementServerHost.State.Up); @@ -767,7 +764,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } return; } - + if (avLoad == 0L) { if (s_logger.isDebugEnabled()) { s_logger.debug("As calculated average load is less than 1, rounding it to 1"); @@ -777,7 +774,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust for (ManagementServerHostVO node : allMS) { if (node.getMsid() != _nodeId) { - + List hostsToRebalance = new ArrayList(); for (AgentLoadBalancerPlanner lbPlanner : _lbPlanners) { hostsToRebalance = lbPlanner.getHostsToRebalance(node.getMsid(), avLoad); @@ -788,14 +785,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } - + if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { s_logger.debug("Found " + hostsToRebalance.size() + " hosts to rebalance from management server " + node.getMsid()); for (HostVO host : hostsToRebalance) { long hostId = host.getId(); s_logger.debug("Asking management server " + node.getMsid() + " to give away host id=" + hostId); boolean result = true; - + if (_hostTransferDao.findById(hostId) != null) { s_logger.warn("Somebody else is already rebalancing host id: " + hostId); continue; @@ -867,7 +864,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust for (Iterator iterator = _agentToTransferIds.iterator(); iterator.hasNext();) { Long hostId = iterator.next(); AgentAttache attache = findAttache(hostId); - + // if the thread: // 1) timed out waiting for the host to reconnect // 2) recipient management server is not active any more @@ -883,14 +880,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust _hostTransferDao.completeAgentTransfer(hostId); continue; } - + if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) { s_logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host"); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; } - + ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner()); if (ms != null && ms.getState() != ManagementServerHost.State.Up) { s_logger.debug("Can't transfer host " + hostId + " as it's future owner is not in UP state: " + ms + ", skipping rebalance for the host"); @@ -898,7 +895,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust _hostTransferDao.completeAgentTransfer(hostId); continue; } - + if (attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) { iterator.remove(); try { @@ -907,7 +904,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust s_logger.warn("Failed to submit rebalance task for host id=" + hostId + "; postponing the execution"); continue; } - + } else { s_logger.debug("Agent " + hostId + " can't be transfered yet as its request queue size is " + attache.getQueueSize() + " and listener queue size is " + attache.getNonRecurringListenersSize()); } @@ -925,16 +922,16 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } }; } - - + + private boolean setToWaitForRebalance(final long hostId, long currentOwnerId, long futureOwnerId) { s_logger.debug("Adding agent " + hostId + " to the list of agents to transfer"); synchronized (_agentToTransferIds) { return _agentToTransferIds.add(hostId); } } - - + + protected boolean rebalanceHost(final long hostId, long currentOwnerId, long futureOwnerId) throws AgentUnavailableException{ boolean result = true; @@ -954,7 +951,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust s_logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex); result = false; } - + if (result) { s_logger.debug("Successfully transfered host id=" + hostId + " to management server " + futureOwnerId); finishRebalance(hostId, futureOwnerId, Event.RebalanceCompleted); @@ -962,7 +959,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust s_logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId); finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed); } - + } else if (futureOwnerId == _nodeId) { HostVO host = _hostDao.findById(hostId); try { @@ -977,9 +974,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (result) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); - } - result = loadDirectlyConnectedHost(host, true); + s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); + } + result = loadDirectlyConnectedHost(host, true); } else { s_logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); @@ -989,7 +986,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust s_logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process due to:", ex); result = false; } - + if (result) { s_logger.debug("Successfully loaded directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); } else { @@ -999,7 +996,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust return result; } - + protected void finishRebalance(final long hostId, long futureOwnerId, Event event){ @@ -1007,21 +1004,21 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (s_logger.isDebugEnabled()) { s_logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event); } - + AgentAttache attache = findAttache(hostId); if (attache == null || !(attache instanceof ClusteredAgentAttache)) { s_logger.debug("Unable to find forward attache for the host id=" + hostId + ", assuming that the agent disconnected already"); _hostTransferDao.completeAgentTransfer(hostId); return; } - + ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)attache; - + if (success) { //1) Set transfer mode to false - so the agent can start processing requests normally forwardAttache.setTransferMode(false); - + //2) Get all transfer requests and route them to peer Request requestToTransfer = forwardAttache.getRequestToTransfer(); while (requestToTransfer != null) { @@ -1030,20 +1027,20 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (!routeResult) { logD(requestToTransfer.getBytes(), "Failed to route request to peer"); } - + requestToTransfer = forwardAttache.getRequestToTransfer(); } - + s_logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance to " + futureOwnerId); - + } else { failRebalance(hostId); } - + s_logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance"); _hostTransferDao.completeAgentTransfer(hostId); } - + protected void failRebalance(final long hostId){ try { s_logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId); @@ -1053,19 +1050,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust s_logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup"); } } - + protected boolean startRebalance(final long hostId) { HostVO host = _hostDao.findById(hostId); - + if (host == null || host.getRemoved() != null) { s_logger.warn("Unable to find host record, fail start rebalancing process"); return false; } - + synchronized (_agents) { ClusteredDirectAgentAttache attache = (ClusteredDirectAgentAttache)_agents.get(hostId); if (attache != null && attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) { - handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true); + handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true); ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId); if (forwardAttache == null) { s_logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process"); @@ -1086,27 +1083,27 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust _hostTransferDao.startAgentTransfer(hostId); return true; } - + protected void cleanupTransferMap(long msId) { List hostsJoingingCluster = _hostTransferDao.listHostsJoiningCluster(msId); - + for (HostTransferMapVO hostJoingingCluster : hostsJoingingCluster) { _hostTransferDao.remove(hostJoingingCluster.getId()); } - + List hostsLeavingCluster = _hostTransferDao.listHostsLeavingCluster(msId); for (HostTransferMapVO hostLeavingCluster : hostsLeavingCluster) { _hostTransferDao.remove(hostLeavingCluster.getId()); } } - - + + protected class RebalanceTask implements Runnable { Long hostId = null; Long currentOwnerId = null; Long futureOwnerId = null; - - + + public RebalanceTask(long hostId, long currentOwnerId, long futureOwnerId) { this.hostId = hostId; this.currentOwnerId = currentOwnerId; @@ -1122,10 +1119,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust rebalanceHost(hostId, currentOwnerId, futureOwnerId); } catch (Exception e) { s_logger.warn("Unable to rebalance host id=" + hostId, e); - } finally { - StackMaid.current().exitCleanup(); } } } - + } diff --git a/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java index 9951896ffd6..c8bbe02aece 100755 --- a/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java +++ b/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java @@ -26,14 +26,12 @@ import javax.inject.Inject; import org.springframework.stereotype.Component; import com.cloud.agent.manager.allocator.HostAllocator; -import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.host.dao.HostDao; import com.cloud.offering.ServiceOffering; -import com.cloud.utils.component.ComponentLocator; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -51,19 +49,19 @@ public class TestingAllocator implements HostAllocator { ExcludeList avoid, int returnUpTo) { return allocateTo(vmProfile, plan, type, avoid, returnUpTo, true); } - + @Override public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan plan, Type type, - ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity) { - List availableHosts = new ArrayList(); - Host host = null; + ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity) { + List availableHosts = new ArrayList(); + Host host = null; if (type == Host.Type.Routing && _routingHost != null) { - host = _hostDao.findById(_routingHost); + host = _hostDao.findById(_routingHost); } else if (type == Host.Type.Storage && _storageHost != null) { - host = _hostDao.findById(_storageHost); + host = _hostDao.findById(_storageHost); } if(host != null){ - availableHosts.add(host); + availableHosts.add(host); } return availableHosts; } @@ -82,9 +80,9 @@ public class TestingAllocator implements HostAllocator { value = (String)params.get(Host.Type.Storage.toString()); _storageHost = (value != null) ? Long.parseLong(value) : null; - + _name = name; - + return true; } diff --git a/server/src/com/cloud/alert/AlertManagerImpl.java b/server/src/com/cloud/alert/AlertManagerImpl.java index 2ad21eda9c1..bcd364e8cd3 100755 --- a/server/src/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/com/cloud/alert/AlertManagerImpl.java @@ -72,7 +72,6 @@ import com.cloud.storage.StoragePoolVO; import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.SearchCriteria; import com.sun.mail.smtp.SMTPMessage; @@ -116,9 +115,9 @@ public class AlertManagerImpl implements AlertManager { private double _publicIPCapacityThreshold = 0.75; private double _privateIPCapacityThreshold = 0.75; private double _secondaryStorageCapacityThreshold = 0.75; - private double _vlanCapacityThreshold = 0.75; - private double _directNetworkPublicIpCapacityThreshold = 0.75; - private double _localStorageCapacityThreshold = 0.75; + private double _vlanCapacityThreshold = 0.75; + private double _directNetworkPublicIpCapacityThreshold = 0.75; + private double _localStorageCapacityThreshold = 0.75; Map _capacityTypeThresholdMap = new HashMap(); @Override @@ -149,7 +148,7 @@ public class AlertManagerImpl implements AlertManager { _emailAlert = new EmailAlert(emailAddresses, smtpHost, smtpPort, useAuth, smtpUsername, smtpPassword, emailSender, smtpDebug); - + String storageCapacityThreshold = _configDao.getValue(Config.StorageCapacityThreshold.key()); String cpuCapacityThreshold = _configDao.getValue(Config.CPUCapacityThreshold.key()); String memoryCapacityThreshold = _configDao.getValue(Config.MemoryCapacityThreshold.key()); @@ -160,7 +159,7 @@ public class AlertManagerImpl implements AlertManager { String vlanCapacityThreshold = _configDao.getValue(Config.VlanCapacityThreshold.key()); String directNetworkPublicIpCapacityThreshold = _configDao.getValue(Config.DirectNetworkPublicIpCapacityThreshold.key()); String localStorageCapacityThreshold = _configDao.getValue(Config.LocalStorageCapacityThreshold.key()); - + if (storageCapacityThreshold != null) { _storageCapacityThreshold = Double.parseDouble(storageCapacityThreshold); } @@ -174,10 +173,10 @@ public class AlertManagerImpl implements AlertManager { _memoryCapacityThreshold = Double.parseDouble(memoryCapacityThreshold); } if (publicIPCapacityThreshold != null) { - _publicIPCapacityThreshold = Double.parseDouble(publicIPCapacityThreshold); + _publicIPCapacityThreshold = Double.parseDouble(publicIPCapacityThreshold); } if (privateIPCapacityThreshold != null) { - _privateIPCapacityThreshold = Double.parseDouble(privateIPCapacityThreshold); + _privateIPCapacityThreshold = Double.parseDouble(privateIPCapacityThreshold); } if (secondaryStorageCapacityThreshold != null) { _secondaryStorageCapacityThreshold = Double.parseDouble(secondaryStorageCapacityThreshold); @@ -191,7 +190,7 @@ public class AlertManagerImpl implements AlertManager { if (localStorageCapacityThreshold != null) { _localStorageCapacityThreshold = Double.parseDouble(localStorageCapacityThreshold); } - + _capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_STORAGE, _storageCapacityThreshold); _capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, _storageAllocCapacityThreshold); _capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_CPU, _cpuCapacityThreshold); @@ -203,19 +202,19 @@ public class AlertManagerImpl implements AlertManager { _capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP, _directNetworkPublicIpCapacityThreshold); _capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_LOCAL_STORAGE, _localStorageCapacityThreshold); - + String capacityCheckPeriodStr = configs.get("capacity.check.period"); if (capacityCheckPeriodStr != null) { _capacityCheckPeriod = Long.parseLong(capacityCheckPeriodStr); if(_capacityCheckPeriod <= 0) - _capacityCheckPeriod = Long.parseLong(Config.CapacityCheckPeriod.getDefaultValue()); + _capacityCheckPeriod = Long.parseLong(Config.CapacityCheckPeriod.getDefaultValue()); } - + String cpuOverProvisioningFactorStr = configs.get("cpu.overprovisioning.factor"); if (cpuOverProvisioningFactorStr != null) { _cpuOverProvisioningFactor = NumbersUtil.parseFloat(cpuOverProvisioningFactorStr,1); if(_cpuOverProvisioningFactor < 1){ - _cpuOverProvisioningFactor = 1; + _cpuOverProvisioningFactor = 1; } } @@ -273,122 +272,122 @@ public class AlertManagerImpl implements AlertManager { // is stopped we updated the amount allocated, and when VM sync reports a changed state, we update // the amount allocated. Hopefully it's limited to 3 entry points and will keep the amount allocated // per host accurate. - + try { - - if (s_logger.isDebugEnabled()) { + + if (s_logger.isDebugEnabled()) { s_logger.debug("recalculating system capacity"); s_logger.debug("Executing cpu/ram capacity update"); } - - // Calculate CPU and RAM capacities - // get all hosts...even if they are not in 'UP' state - List hosts = _resourceMgr.listAllNotInMaintenanceHostsInOneZone(Host.Type.Routing, null); - for (HostVO host : hosts) { - _capacityMgr.updateCapacityForHost(host); - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Done executing cpu/ram capacity update"); - s_logger.debug("Executing storage capacity update"); - } - // Calculate storage pool capacity - List storagePools = _storagePoolDao.listAll(); - for (StoragePoolVO pool : storagePools) { - long disk = _capacityMgr.getAllocatedPoolCapacity(pool, null); - if (pool.isShared()){ - _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, disk); - }else { - _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, disk); - } - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Done executing storage capacity update"); - s_logger.debug("Executing capacity updates for public ip and Vlans"); - } - List datacenters = _dcDao.listAll(); - for (DataCenterVO datacenter : datacenters) { - long dcId = datacenter.getId(); - - //NOTE - //What happens if we have multiple vlans? Dashboard currently shows stats - //with no filter based on a vlan - //ideal way would be to remove out the vlan param, and filter only on dcId - //implementing the same - - // Calculate new Public IP capacity for Virtual Network - if (datacenter.getNetworkType() == NetworkType.Advanced){ - createOrUpdateIpCapacity(dcId, null, CapacityVO.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP, datacenter.getAllocationState()); - } - - // Calculate new Public IP capacity for Direct Attached Network - createOrUpdateIpCapacity(dcId, null, CapacityVO.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP, datacenter.getAllocationState()); - - if (datacenter.getNetworkType() == NetworkType.Advanced){ - //Calculate VLAN's capacity - createOrUpdateVlanCapacity(dcId, datacenter.getAllocationState()); + // Calculate CPU and RAM capacities + // get all hosts...even if they are not in 'UP' state + List hosts = _resourceMgr.listAllNotInMaintenanceHostsInOneZone(Host.Type.Routing, null); + for (HostVO host : hosts) { + _capacityMgr.updateCapacityForHost(host); + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Done executing cpu/ram capacity update"); + s_logger.debug("Executing storage capacity update"); + } + // Calculate storage pool capacity + List storagePools = _storagePoolDao.listAll(); + for (StoragePoolVO pool : storagePools) { + long disk = _capacityMgr.getAllocatedPoolCapacity(pool, null); + if (pool.isShared()){ + _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, disk); + }else { + _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, disk); } - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Done capacity updates for public ip and Vlans"); - s_logger.debug("Executing capacity updates for private ip"); - } - - // Calculate new Private IP capacity - List pods = _podDao.listAll(); - for (HostPodVO pod : pods) { - long podId = pod.getId(); - long dcId = pod.getDataCenterId(); + } - createOrUpdateIpCapacity(dcId, podId, CapacityVO.CAPACITY_TYPE_PRIVATE_IP, _configMgr.findPodAllocationState(pod)); - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Done executing capacity updates for private ip"); - s_logger.debug("Done recalculating system capacity"); - } + if (s_logger.isDebugEnabled()) { + s_logger.debug("Done executing storage capacity update"); + s_logger.debug("Executing capacity updates for public ip and Vlans"); + } + + List datacenters = _dcDao.listAll(); + for (DataCenterVO datacenter : datacenters) { + long dcId = datacenter.getId(); + + //NOTE + //What happens if we have multiple vlans? Dashboard currently shows stats + //with no filter based on a vlan + //ideal way would be to remove out the vlan param, and filter only on dcId + //implementing the same + + // Calculate new Public IP capacity for Virtual Network + if (datacenter.getNetworkType() == NetworkType.Advanced){ + createOrUpdateIpCapacity(dcId, null, CapacityVO.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP, datacenter.getAllocationState()); + } + + // Calculate new Public IP capacity for Direct Attached Network + createOrUpdateIpCapacity(dcId, null, CapacityVO.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP, datacenter.getAllocationState()); + + if (datacenter.getNetworkType() == NetworkType.Advanced){ + //Calculate VLAN's capacity + createOrUpdateVlanCapacity(dcId, datacenter.getAllocationState()); + } + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Done capacity updates for public ip and Vlans"); + s_logger.debug("Executing capacity updates for private ip"); + } + + // Calculate new Private IP capacity + List pods = _podDao.listAll(); + for (HostPodVO pod : pods) { + long podId = pod.getId(); + long dcId = pod.getDataCenterId(); + + createOrUpdateIpCapacity(dcId, podId, CapacityVO.CAPACITY_TYPE_PRIVATE_IP, _configMgr.findPodAllocationState(pod)); + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Done executing capacity updates for private ip"); + s_logger.debug("Done recalculating system capacity"); + } } catch (Throwable t) { - s_logger.error("Caught exception in recalculating capacity", t); + s_logger.error("Caught exception in recalculating capacity", t); } } - - + + private void createOrUpdateVlanCapacity(long dcId, AllocationState capacityState) { - - SearchCriteria capacitySC = _capacityDao.createSearchCriteria(); + + SearchCriteria capacitySC = _capacityDao.createSearchCriteria(); List capacities = _capacityDao.search(capacitySC, null); capacitySC = _capacityDao.createSearchCriteria(); capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, dcId); capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, Capacity.CAPACITY_TYPE_VLAN); capacities = _capacityDao.search(capacitySC, null); - - int totalVlans = _dcDao.countZoneVlans(dcId, false); - int allocatedVlans = _dcDao.countZoneVlans(dcId, true); - + + int totalVlans = _dcDao.countZoneVlans(dcId, false); + int allocatedVlans = _dcDao.countZoneVlans(dcId, true); + if (capacities.size() == 0){ - CapacityVO newVlanCapacity = new CapacityVO(null, dcId, null, null, allocatedVlans, totalVlans, Capacity.CAPACITY_TYPE_VLAN); - if (capacityState == AllocationState.Disabled){ - newVlanCapacity.setCapacityState(CapacityState.Disabled); - } + CapacityVO newVlanCapacity = new CapacityVO(null, dcId, null, null, allocatedVlans, totalVlans, Capacity.CAPACITY_TYPE_VLAN); + if (capacityState == AllocationState.Disabled){ + newVlanCapacity.setCapacityState(CapacityState.Disabled); + } _capacityDao.persist(newVlanCapacity); }else if ( !(capacities.get(0).getUsedCapacity() == allocatedVlans - && capacities.get(0).getTotalCapacity() == totalVlans) ){ - CapacityVO capacity = capacities.get(0); - capacity.setUsedCapacity(allocatedVlans); - capacity.setTotalCapacity(totalVlans); + && capacities.get(0).getTotalCapacity() == totalVlans) ){ + CapacityVO capacity = capacities.get(0); + capacity.setUsedCapacity(allocatedVlans); + capacity.setTotalCapacity(totalVlans); _capacityDao.update(capacity.getId(), capacity); } - - - } - public void createOrUpdateIpCapacity(Long dcId, Long podId, short capacityType, AllocationState capacityState){ + + } + + public void createOrUpdateIpCapacity(Long dcId, Long podId, short capacityType, AllocationState capacityState){ SearchCriteria capacitySC = _capacityDao.createSearchCriteria(); List capacities = _capacityDao.search(capacitySC, null); @@ -401,55 +400,55 @@ public class AlertManagerImpl implements AlertManager { int allocatedIPs; capacities = _capacityDao.search(capacitySC, null); if (capacityType == CapacityVO.CAPACITY_TYPE_PRIVATE_IP){ - totalIPs = _privateIPAddressDao.countIPs(podId, dcId, false); - allocatedIPs = _privateIPAddressDao.countIPs(podId, dcId, true); + totalIPs = _privateIPAddressDao.countIPs(podId, dcId, false); + allocatedIPs = _privateIPAddressDao.countIPs(podId, dcId, true); }else if (capacityType == CapacityVO.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP){ - totalIPs = _publicIPAddressDao.countIPsForNetwork(dcId, false, VlanType.VirtualNetwork); + totalIPs = _publicIPAddressDao.countIPsForNetwork(dcId, false, VlanType.VirtualNetwork); allocatedIPs = _publicIPAddressDao.countIPsForNetwork(dcId, true, VlanType.VirtualNetwork); }else { - totalIPs = _publicIPAddressDao.countIPsForNetwork(dcId, false, VlanType.DirectAttached); + totalIPs = _publicIPAddressDao.countIPsForNetwork(dcId, false, VlanType.DirectAttached); allocatedIPs = _publicIPAddressDao.countIPsForNetwork(dcId, true, VlanType.DirectAttached); } - + if (capacities.size() == 0){ - CapacityVO newPublicIPCapacity = new CapacityVO(null, dcId, podId, null, allocatedIPs, totalIPs, capacityType); - if (capacityState == AllocationState.Disabled){ - newPublicIPCapacity.setCapacityState(CapacityState.Disabled); - } + CapacityVO newPublicIPCapacity = new CapacityVO(null, dcId, podId, null, allocatedIPs, totalIPs, capacityType); + if (capacityState == AllocationState.Disabled){ + newPublicIPCapacity.setCapacityState(CapacityState.Disabled); + } _capacityDao.persist(newPublicIPCapacity); }else if ( !(capacities.get(0).getUsedCapacity() == allocatedIPs - && capacities.get(0).getTotalCapacity() == totalIPs) ){ - CapacityVO capacity = capacities.get(0); - capacity.setUsedCapacity(allocatedIPs); - capacity.setTotalCapacity(totalIPs); + && capacities.get(0).getTotalCapacity() == totalIPs) ){ + CapacityVO capacity = capacities.get(0); + capacity.setUsedCapacity(allocatedIPs); + capacity.setTotalCapacity(totalIPs); _capacityDao.update(capacity.getId(), capacity); } - + } class CapacityChecker extends TimerTask { @Override - public void run() { + public void run() { try { - s_logger.debug("Running Capacity Checker ... "); - checkForAlerts(); - s_logger.debug("Done running Capacity Checker ... "); + s_logger.debug("Running Capacity Checker ... "); + checkForAlerts(); + s_logger.debug("Done running Capacity Checker ... "); } catch (Throwable t) { s_logger.error("Exception in CapacityChecker", t); } } } - - + + public void checkForAlerts(){ - - recalculateCapacity(); + + recalculateCapacity(); // abort if we can't possibly send an alert... if (_emailAlert == null) { return; } - + //Get all datacenters, pods and clusters in the system. List dataCenterList = _dcDao.listAll(); List clusterList = _clusterDao.listAll(); @@ -458,89 +457,89 @@ public class AlertManagerImpl implements AlertManager { List dataCenterCapacityTypes = getCapacityTypesAtZoneLevel(); List podCapacityTypes = getCapacityTypesAtPodLevel(); List clusterCapacityTypes = getCapacityTypesAtClusterLevel(); - + // Generate Alerts for Zone Level capacities for(DataCenterVO dc : dataCenterList){ - for (Short capacityType : dataCenterCapacityTypes){ - List capacity = new ArrayList(); - capacity = _capacityDao.findCapacityBy(capacityType.intValue(), dc.getId(), null, null); - - if (capacityType == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE){ - capacity.add(getUsedStats(capacityType, dc.getId(), null, null)); - } - if (capacity == null || capacity.size() == 0){ - continue; - } - double totalCapacity = capacity.get(0).getTotalCapacity(); + for (Short capacityType : dataCenterCapacityTypes){ + List capacity = new ArrayList(); + capacity = _capacityDao.findCapacityBy(capacityType.intValue(), dc.getId(), null, null); + + if (capacityType == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE){ + capacity.add(getUsedStats(capacityType, dc.getId(), null, null)); + } + if (capacity == null || capacity.size() == 0){ + continue; + } + double totalCapacity = capacity.get(0).getTotalCapacity(); double usedCapacity = capacity.get(0).getUsedCapacity(); if (totalCapacity != 0 && usedCapacity/totalCapacity > _capacityTypeThresholdMap.get(capacityType)){ - generateEmailAlert(dc, null, null, totalCapacity, usedCapacity, capacityType); + generateEmailAlert(dc, null, null, totalCapacity, usedCapacity, capacityType); } - } + } } - + // Generate Alerts for Pod Level capacities for( HostPodVO pod : podList){ - for (Short capacityType : podCapacityTypes){ - List capacity = _capacityDao.findCapacityBy(capacityType.intValue(), pod.getDataCenterId(), pod.getId(), null); - if (capacity == null || capacity.size() == 0){ - continue; - } - double totalCapacity = capacity.get(0).getTotalCapacity(); + for (Short capacityType : podCapacityTypes){ + List capacity = _capacityDao.findCapacityBy(capacityType.intValue(), pod.getDataCenterId(), pod.getId(), null); + if (capacity == null || capacity.size() == 0){ + continue; + } + double totalCapacity = capacity.get(0).getTotalCapacity(); double usedCapacity = capacity.get(0).getUsedCapacity(); if (totalCapacity != 0 && usedCapacity/totalCapacity > _capacityTypeThresholdMap.get(capacityType)){ - generateEmailAlert(ApiDBUtils.findZoneById(pod.getDataCenterId()), pod, null, - totalCapacity, usedCapacity, capacityType); + generateEmailAlert(ApiDBUtils.findZoneById(pod.getDataCenterId()), pod, null, + totalCapacity, usedCapacity, capacityType); } - } + } } - + // Generate Alerts for Cluster Level capacities for( ClusterVO cluster : clusterList){ - for (Short capacityType : clusterCapacityTypes){ - List capacity = new ArrayList(); - float overProvFactor = 1f; - capacity = _capacityDao.findCapacityBy(capacityType.intValue(), cluster.getDataCenterId(), null, cluster.getId()); - - if (capacityType == Capacity.CAPACITY_TYPE_STORAGE){ - capacity.add(getUsedStats(capacityType, cluster.getDataCenterId(), cluster.getPodId(), cluster.getId())); - } - if (capacity == null || capacity.size() == 0){ - continue; - } - if (capacityType == Capacity.CAPACITY_TYPE_CPU){ - overProvFactor = ApiDBUtils.getCpuOverprovisioningFactor(); - } - - double totalCapacity = capacity.get(0).getTotalCapacity() * overProvFactor; + for (Short capacityType : clusterCapacityTypes){ + List capacity = new ArrayList(); + float overProvFactor = 1f; + capacity = _capacityDao.findCapacityBy(capacityType.intValue(), cluster.getDataCenterId(), null, cluster.getId()); + + if (capacityType == Capacity.CAPACITY_TYPE_STORAGE){ + capacity.add(getUsedStats(capacityType, cluster.getDataCenterId(), cluster.getPodId(), cluster.getId())); + } + if (capacity == null || capacity.size() == 0){ + continue; + } + if (capacityType == Capacity.CAPACITY_TYPE_CPU){ + overProvFactor = ApiDBUtils.getCpuOverprovisioningFactor(); + } + + double totalCapacity = capacity.get(0).getTotalCapacity() * overProvFactor; double usedCapacity = capacity.get(0).getUsedCapacity() + capacity.get(0).getReservedCapacity(); if (totalCapacity != 0 && usedCapacity/totalCapacity > _capacityTypeThresholdMap.get(capacityType)){ - generateEmailAlert(ApiDBUtils.findZoneById(cluster.getDataCenterId()), ApiDBUtils.findPodById(cluster.getPodId()), cluster, - totalCapacity, usedCapacity, capacityType); + generateEmailAlert(ApiDBUtils.findZoneById(cluster.getDataCenterId()), ApiDBUtils.findPodById(cluster.getPodId()), cluster, + totalCapacity, usedCapacity, capacityType); } - } + } } - + } - + private SummedCapacity getUsedStats(short capacityType, long zoneId, Long podId, Long clusterId){ - CapacityVO capacity; - if (capacityType == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE){ - capacity = _storageMgr.getSecondaryStorageUsedStats(null, zoneId); - }else{ - capacity = _storageMgr.getStoragePoolUsedStats(null, clusterId, podId, zoneId); - } - if (capacity != null){ - return new SummedCapacity(capacity.getUsedCapacity(), 0, capacity.getTotalCapacity(), capacityType, clusterId, podId); - }else{ - return null; - } - + CapacityVO capacity; + if (capacityType == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE){ + capacity = _storageMgr.getSecondaryStorageUsedStats(null, zoneId); + }else{ + capacity = _storageMgr.getStoragePoolUsedStats(null, clusterId, podId, zoneId); + } + if (capacity != null){ + return new SummedCapacity(capacity.getUsedCapacity(), 0, capacity.getTotalCapacity(), capacityType, clusterId, podId); + }else{ + return null; + } + } - + private void generateEmailAlert(DataCenterVO dc, HostPodVO pod, ClusterVO cluster, double totalCapacity, double usedCapacity, short capacityType){ - - String msgSubject = null; + + String msgSubject = null; String msgContent = null; String totalStr; String usedStr; @@ -548,10 +547,10 @@ public class AlertManagerImpl implements AlertManager { short alertType = -1; Long podId = pod == null ? null : pod.getId(); Long clusterId = cluster == null ? null : cluster.getId(); - - switch (capacityType) { - - //Cluster Level + + switch (capacityType) { + + //Cluster Level case CapacityVO.CAPACITY_TYPE_MEMORY: msgSubject = "System Alert: Low Available Memory in cluster " +cluster.getName()+ " pod " +pod.getName()+ " of availability zone " + dc.getName(); totalStr = formatBytesToMegabytes(totalCapacity); @@ -587,24 +586,24 @@ public class AlertManagerImpl implements AlertManager { msgContent = "Unallocated storage space is low, total: " + totalStr + " MB, allocated: " + usedStr + " MB (" + pctStr + "%)"; alertType = ALERT_TYPE_LOCAL_STORAGE; break; - - //Pod Level + + //Pod Level case CapacityVO.CAPACITY_TYPE_PRIVATE_IP: - msgSubject = "System Alert: Number of unallocated private IPs is low in pod " +pod.getName()+ " of availability zone " + dc.getName(); - totalStr = Double.toString(totalCapacity); + msgSubject = "System Alert: Number of unallocated private IPs is low in pod " +pod.getName()+ " of availability zone " + dc.getName(); + totalStr = Double.toString(totalCapacity); usedStr = Double.toString(usedCapacity); - msgContent = "Number of unallocated private IPs is low, total: " + totalStr + ", allocated: " + usedStr + " (" + pctStr + "%)"; - alertType = ALERT_TYPE_PRIVATE_IP; - break; - - //Zone Level + msgContent = "Number of unallocated private IPs is low, total: " + totalStr + ", allocated: " + usedStr + " (" + pctStr + "%)"; + alertType = ALERT_TYPE_PRIVATE_IP; + break; + + //Zone Level case CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE: - msgSubject = "System Alert: Low Available Secondary Storage in availability zone " + dc.getName(); - totalStr = formatBytesToMegabytes(totalCapacity); + msgSubject = "System Alert: Low Available Secondary Storage in availability zone " + dc.getName(); + totalStr = formatBytesToMegabytes(totalCapacity); usedStr = formatBytesToMegabytes(usedCapacity); - msgContent = "Available secondary storage space is low, total: " + totalStr + " MB, used: " + usedStr + " MB (" + pctStr + "%)"; - alertType = ALERT_TYPE_SECONDARY_STORAGE; - break; + msgContent = "Available secondary storage space is low, total: " + totalStr + " MB, used: " + usedStr + " MB (" + pctStr + "%)"; + alertType = ALERT_TYPE_SECONDARY_STORAGE; + break; case CapacityVO.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP: msgSubject = "System Alert: Number of unallocated virtual network public IPs is low in availability zone " + dc.getName(); totalStr = Double.toString(totalCapacity); @@ -627,47 +626,47 @@ public class AlertManagerImpl implements AlertManager { alertType = ALERT_TYPE_VLAN; break; } - - try { - if (s_logger.isDebugEnabled()){ - s_logger.debug(msgSubject); - s_logger.debug(msgContent); - } - _emailAlert.sendAlert(alertType, dc.getId(), podId, clusterId, msgSubject, msgContent); - } catch (Exception ex) { + + try { + if (s_logger.isDebugEnabled()){ + s_logger.debug(msgSubject); + s_logger.debug(msgContent); + } + _emailAlert.sendAlert(alertType, dc.getId(), podId, clusterId, msgSubject, msgContent); + } catch (Exception ex) { s_logger.error("Exception in CapacityChecker", ex); - } + } } - + private List getCapacityTypesAtZoneLevel(){ - - List dataCenterCapacityTypes = new ArrayList(); - dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP); - dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP); - dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_SECONDARY_STORAGE); - dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_VLAN); - return dataCenterCapacityTypes; - + + List dataCenterCapacityTypes = new ArrayList(); + dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP); + dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP); + dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_SECONDARY_STORAGE); + dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_VLAN); + return dataCenterCapacityTypes; + } - + private List getCapacityTypesAtPodLevel(){ - - List podCapacityTypes = new ArrayList(); - podCapacityTypes.add(Capacity.CAPACITY_TYPE_PRIVATE_IP); - return podCapacityTypes; - + + List podCapacityTypes = new ArrayList(); + podCapacityTypes.add(Capacity.CAPACITY_TYPE_PRIVATE_IP); + return podCapacityTypes; + } - + private List getCapacityTypesAtClusterLevel(){ - - List clusterCapacityTypes = new ArrayList(); - clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_CPU); - clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_MEMORY); - clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_STORAGE); - clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED); - clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_LOCAL_STORAGE); - return clusterCapacityTypes; - + + List clusterCapacityTypes = new ArrayList(); + clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_CPU); + clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_MEMORY); + clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_STORAGE); + clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED); + clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_LOCAL_STORAGE); + return clusterCapacityTypes; + } class EmailAlert { @@ -735,12 +734,12 @@ public class AlertManagerImpl implements AlertManager { public void sendAlert(short alertType, long dataCenterId, Long podId, Long clusterId, String subject, String content) throws MessagingException, UnsupportedEncodingException { AlertVO alert = null; if ((alertType != AlertManager.ALERT_TYPE_HOST) && - (alertType != AlertManager.ALERT_TYPE_USERVM) && - (alertType != AlertManager.ALERT_TYPE_DOMAIN_ROUTER) && - (alertType != AlertManager.ALERT_TYPE_CONSOLE_PROXY) && - (alertType != AlertManager.ALERT_TYPE_SSVM) && - (alertType != AlertManager.ALERT_TYPE_STORAGE_MISC) && - (alertType != AlertManager.ALERT_TYPE_MANAGMENT_NODE)) { + (alertType != AlertManager.ALERT_TYPE_USERVM) && + (alertType != AlertManager.ALERT_TYPE_DOMAIN_ROUTER) && + (alertType != AlertManager.ALERT_TYPE_CONSOLE_PROXY) && + (alertType != AlertManager.ALERT_TYPE_SSVM) && + (alertType != AlertManager.ALERT_TYPE_STORAGE_MISC) && + (alertType != AlertManager.ALERT_TYPE_MANAGMENT_NODE)) { alert = _alertDao.getLastAlert(alertType, dataCenterId, podId, clusterId); } diff --git a/server/src/com/cloud/alert/ConsoleProxyAlertAdapter.java b/server/src/com/cloud/alert/ConsoleProxyAlertAdapter.java index 37e385021e1..7e744229bf7 100644 --- a/server/src/com/cloud/alert/ConsoleProxyAlertAdapter.java +++ b/server/src/com/cloud/alert/ConsoleProxyAlertAdapter.java @@ -25,13 +25,10 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.alert.AlertAdapter; -import com.cloud.alert.AlertManager; import com.cloud.consoleproxy.ConsoleProxyAlertEventArgs; import com.cloud.consoleproxy.ConsoleProxyManager; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.events.SubscriptionMgr; import com.cloud.vm.ConsoleProxyVO; import com.cloud.vm.dao.ConsoleProxyDao; @@ -39,178 +36,178 @@ import com.cloud.vm.dao.ConsoleProxyDao; @Component @Local(value=AlertAdapter.class) public class ConsoleProxyAlertAdapter implements AlertAdapter { - - private static final Logger s_logger = Logger.getLogger(ConsoleProxyAlertAdapter.class); - - @Inject private AlertManager _alertMgr; + + private static final Logger s_logger = Logger.getLogger(ConsoleProxyAlertAdapter.class); + + @Inject private AlertManager _alertMgr; private String _name; - - @Inject private DataCenterDao _dcDao; - @Inject private ConsoleProxyDao _consoleProxyDao; - + + @Inject private DataCenterDao _dcDao; + @Inject private ConsoleProxyDao _consoleProxyDao; + public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) { - if(s_logger.isDebugEnabled()) - s_logger.debug("received console proxy alert"); - - DataCenterVO dc = _dcDao.findById(args.getZoneId()); - ConsoleProxyVO proxy = args.getProxy(); - if(proxy == null) - proxy = _consoleProxyDao.findById(args.getProxyId()); - - switch(args.getType()) { - case ConsoleProxyAlertEventArgs.PROXY_CREATED : - if(s_logger.isDebugEnabled()) - s_logger.debug("New console proxy created, zone: " + dc.getName() + ", proxy: " + - proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + - proxy.getPrivateIpAddress()); - break; - - case ConsoleProxyAlertEventArgs.PROXY_UP : - if(s_logger.isDebugEnabled()) - s_logger.debug("Console proxy is up, zone: " + dc.getName() + ", proxy: " + - proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + - proxy.getPrivateIpAddress()); - - _alertMgr.sendAlert( - AlertManager.ALERT_TYPE_CONSOLE_PROXY, - args.getZoneId(), - proxy.getPodIdToDeployIn(), - "Console proxy up in zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() - + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()), - "Console proxy up (zone " + dc.getName() + ")" - ); - break; - - case ConsoleProxyAlertEventArgs.PROXY_DOWN : - if(s_logger.isDebugEnabled()) - s_logger.debug("Console proxy is down, zone: " + dc.getName() + ", proxy: " + - proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + - (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); - - _alertMgr.sendAlert( - AlertManager.ALERT_TYPE_CONSOLE_PROXY, - args.getZoneId(), - proxy.getPodIdToDeployIn(), - "Console proxy down in zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() - + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()), - "Console proxy down (zone " + dc.getName() + ")" - ); - break; - - case ConsoleProxyAlertEventArgs.PROXY_REBOOTED : - if(s_logger.isDebugEnabled()) - s_logger.debug("Console proxy is rebooted, zone: " + dc.getName() + ", proxy: " + - proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + - (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); - - _alertMgr.sendAlert( - AlertManager.ALERT_TYPE_CONSOLE_PROXY, - args.getZoneId(), - proxy.getPodIdToDeployIn(), - "Console proxy rebooted in zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() - + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()), - "Console proxy rebooted (zone " + dc.getName() + ")" - ); - break; - - case ConsoleProxyAlertEventArgs.PROXY_CREATE_FAILURE : - if(s_logger.isDebugEnabled()) - s_logger.debug("Console proxy creation failure, zone: " + dc.getName() + ", proxy: " + - proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + - (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); - - _alertMgr.sendAlert( - AlertManager.ALERT_TYPE_CONSOLE_PROXY, - args.getZoneId(), - proxy.getPodIdToDeployIn(), - "Console proxy creation failure. zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() - + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()) - + ", error details: " + args.getMessage(), - "Console proxy creation failure (zone " + dc.getName() + ")" - ); - break; - - case ConsoleProxyAlertEventArgs.PROXY_START_FAILURE : - if(s_logger.isDebugEnabled()) - s_logger.debug("Console proxy startup failure, zone: " + dc.getName() + ", proxy: " + - proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + - (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); - - _alertMgr.sendAlert( - AlertManager.ALERT_TYPE_CONSOLE_PROXY, - args.getZoneId(), - proxy.getPodIdToDeployIn(), - "Console proxy startup failure. zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() - + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()) - + ", error details: " + args.getMessage(), - "Console proxy startup failure (zone " + dc.getName() + ")" - ); - break; - - case ConsoleProxyAlertEventArgs.PROXY_FIREWALL_ALERT : - if(s_logger.isDebugEnabled()) - s_logger.debug("Console proxy firewall alert, zone: " + dc.getName() + ", proxy: " + - proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + - (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); - - _alertMgr.sendAlert( - AlertManager.ALERT_TYPE_CONSOLE_PROXY, - args.getZoneId(), - proxy.getPodIdToDeployIn(), - "Failed to open console proxy firewall port. zone: " + dc.getName() + ", proxy: " + proxy.getHostName() - + ", public IP: " + proxy.getPublicIpAddress() - + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()), - "Console proxy alert (zone " + dc.getName() + ")" - ); - break; - - case ConsoleProxyAlertEventArgs.PROXY_STORAGE_ALERT : - if(s_logger.isDebugEnabled()) - s_logger.debug("Console proxy storage alert, zone: " + dc.getName() + ", proxy: " + - proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + - proxy.getPrivateIpAddress() + ", message: " + args.getMessage()); - - _alertMgr.sendAlert( - AlertManager.ALERT_TYPE_STORAGE_MISC, - args.getZoneId(), - proxy.getPodIdToDeployIn(), - "Console proxy storage issue. zone: " + dc.getName() + ", message: " + args.getMessage(), - "Console proxy alert (zone " + dc.getName() + ")" - ); - break; - } + if(s_logger.isDebugEnabled()) + s_logger.debug("received console proxy alert"); + + DataCenterVO dc = _dcDao.findById(args.getZoneId()); + ConsoleProxyVO proxy = args.getProxy(); + if(proxy == null) + proxy = _consoleProxyDao.findById(args.getProxyId()); + + switch(args.getType()) { + case ConsoleProxyAlertEventArgs.PROXY_CREATED : + if(s_logger.isDebugEnabled()) + s_logger.debug("New console proxy created, zone: " + dc.getName() + ", proxy: " + + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + + proxy.getPrivateIpAddress()); + break; + + case ConsoleProxyAlertEventArgs.PROXY_UP : + if(s_logger.isDebugEnabled()) + s_logger.debug("Console proxy is up, zone: " + dc.getName() + ", proxy: " + + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + + proxy.getPrivateIpAddress()); + + _alertMgr.sendAlert( + AlertManager.ALERT_TYPE_CONSOLE_PROXY, + args.getZoneId(), + proxy.getPodIdToDeployIn(), + "Console proxy up in zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()), + "Console proxy up (zone " + dc.getName() + ")" + ); + break; + + case ConsoleProxyAlertEventArgs.PROXY_DOWN : + if(s_logger.isDebugEnabled()) + s_logger.debug("Console proxy is down, zone: " + dc.getName() + ", proxy: " + + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); + + _alertMgr.sendAlert( + AlertManager.ALERT_TYPE_CONSOLE_PROXY, + args.getZoneId(), + proxy.getPodIdToDeployIn(), + "Console proxy down in zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()), + "Console proxy down (zone " + dc.getName() + ")" + ); + break; + + case ConsoleProxyAlertEventArgs.PROXY_REBOOTED : + if(s_logger.isDebugEnabled()) + s_logger.debug("Console proxy is rebooted, zone: " + dc.getName() + ", proxy: " + + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); + + _alertMgr.sendAlert( + AlertManager.ALERT_TYPE_CONSOLE_PROXY, + args.getZoneId(), + proxy.getPodIdToDeployIn(), + "Console proxy rebooted in zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()), + "Console proxy rebooted (zone " + dc.getName() + ")" + ); + break; + + case ConsoleProxyAlertEventArgs.PROXY_CREATE_FAILURE : + if(s_logger.isDebugEnabled()) + s_logger.debug("Console proxy creation failure, zone: " + dc.getName() + ", proxy: " + + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); + + _alertMgr.sendAlert( + AlertManager.ALERT_TYPE_CONSOLE_PROXY, + args.getZoneId(), + proxy.getPodIdToDeployIn(), + "Console proxy creation failure. zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()) + + ", error details: " + args.getMessage(), + "Console proxy creation failure (zone " + dc.getName() + ")" + ); + break; + + case ConsoleProxyAlertEventArgs.PROXY_START_FAILURE : + if(s_logger.isDebugEnabled()) + s_logger.debug("Console proxy startup failure, zone: " + dc.getName() + ", proxy: " + + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); + + _alertMgr.sendAlert( + AlertManager.ALERT_TYPE_CONSOLE_PROXY, + args.getZoneId(), + proxy.getPodIdToDeployIn(), + "Console proxy startup failure. zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()) + + ", error details: " + args.getMessage(), + "Console proxy startup failure (zone " + dc.getName() + ")" + ); + break; + + case ConsoleProxyAlertEventArgs.PROXY_FIREWALL_ALERT : + if(s_logger.isDebugEnabled()) + s_logger.debug("Console proxy firewall alert, zone: " + dc.getName() + ", proxy: " + + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress())); + + _alertMgr.sendAlert( + AlertManager.ALERT_TYPE_CONSOLE_PROXY, + args.getZoneId(), + proxy.getPodIdToDeployIn(), + "Failed to open console proxy firewall port. zone: " + dc.getName() + ", proxy: " + proxy.getHostName() + + ", public IP: " + proxy.getPublicIpAddress() + + ", private IP: " + (proxy.getPrivateIpAddress() == null ? "N/A" : proxy.getPrivateIpAddress()), + "Console proxy alert (zone " + dc.getName() + ")" + ); + break; + + case ConsoleProxyAlertEventArgs.PROXY_STORAGE_ALERT : + if(s_logger.isDebugEnabled()) + s_logger.debug("Console proxy storage alert, zone: " + dc.getName() + ", proxy: " + + proxy.getHostName() + ", public IP: " + proxy.getPublicIpAddress() + ", private IP: " + + proxy.getPrivateIpAddress() + ", message: " + args.getMessage()); + + _alertMgr.sendAlert( + AlertManager.ALERT_TYPE_STORAGE_MISC, + args.getZoneId(), + proxy.getPodIdToDeployIn(), + "Console proxy storage issue. zone: " + dc.getName() + ", message: " + args.getMessage(), + "Console proxy alert (zone " + dc.getName() + ")" + ); + break; + } } - - @Override - public boolean configure(String name, Map params) - throws ConfigurationException { - - if (s_logger.isInfoEnabled()) - s_logger.info("Start configuring console proxy alert manager : " + name); - try { - SubscriptionMgr.getInstance().subscribe(ConsoleProxyManager.ALERT_SUBJECT, this, "onProxyAlert"); - } catch (SecurityException e) { - throw new ConfigurationException("Unable to register console proxy event subscription, exception: " + e); - } catch (NoSuchMethodException e) { - throw new ConfigurationException("Unable to register console proxy event subscription, exception: " + e); - } - - return true; - } + @Override + public boolean configure(String name, Map params) + throws ConfigurationException { - @Override - public String getName() { - return _name; - } + if (s_logger.isInfoEnabled()) + s_logger.info("Start configuring console proxy alert manager : " + name); - @Override - public boolean start() { - return true; - } + try { + SubscriptionMgr.getInstance().subscribe(ConsoleProxyManager.ALERT_SUBJECT, this, "onProxyAlert"); + } catch (SecurityException e) { + throw new ConfigurationException("Unable to register console proxy event subscription, exception: " + e); + } catch (NoSuchMethodException e) { + throw new ConfigurationException("Unable to register console proxy event subscription, exception: " + e); + } - @Override - public boolean stop() { - return true; - } + return true; + } + + @Override + public String getName() { + return _name; + } + + @Override + public boolean start() { + return true; + } + + @Override + public boolean stop() { + return true; + } } diff --git a/server/src/com/cloud/api/ApiDBUtils.java b/server/src/com/cloud/api/ApiDBUtils.java index 5e8a044691d..af061b11cff 100755 --- a/server/src/com/cloud/api/ApiDBUtils.java +++ b/server/src/com/cloud/api/ApiDBUtils.java @@ -22,6 +22,9 @@ import java.util.List; import java.util.Map; import java.util.Set; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.cloudstack.api.ApiConstants.HostDetails; import org.apache.cloudstack.api.ApiConstants.VMDetails; import org.apache.cloudstack.api.response.AccountResponse; @@ -39,6 +42,7 @@ import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; +import org.springframework.stereotype.Component; import com.cloud.api.query.dao.AccountJoinDao; import com.cloud.api.query.dao.AsyncJobJoinDao; @@ -114,11 +118,11 @@ import com.cloud.network.NetworkManager; import com.cloud.network.NetworkProfile; import com.cloud.network.NetworkRuleConfigVO; import com.cloud.network.NetworkVO; +import com.cloud.network.Networks.TrafficType; import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.network.PhysicalNetworkVO; -import com.cloud.network.Site2SiteVpnGatewayVO; import com.cloud.network.Site2SiteCustomerGatewayVO; -import com.cloud.network.Networks.TrafficType; +import com.cloud.network.Site2SiteVpnGatewayVO; import com.cloud.network.as.AutoScalePolicy; import com.cloud.network.as.AutoScalePolicyConditionMapVO; import com.cloud.network.as.AutoScalePolicyVO; @@ -139,15 +143,15 @@ import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.LoadBalancerDao; import com.cloud.network.dao.NetworkDao; -import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.NetworkDomainDao; import com.cloud.network.dao.NetworkRuleConfigDao; +import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderVO; import com.cloud.network.dao.PhysicalNetworkTrafficTypeDao; import com.cloud.network.dao.PhysicalNetworkTrafficTypeVO; -import com.cloud.network.dao.Site2SiteVpnGatewayDao; import com.cloud.network.dao.Site2SiteCustomerGatewayDao; +import com.cloud.network.dao.Site2SiteVpnGatewayDao; import com.cloud.network.router.VirtualRouter; import com.cloud.network.rules.FirewallRuleVO; import com.cloud.network.security.SecurityGroup; @@ -160,6 +164,7 @@ import com.cloud.network.vpc.VpcManager; import com.cloud.network.vpc.VpcOffering; import com.cloud.network.vpc.VpcVO; import com.cloud.network.vpc.dao.StaticRouteDao; +import com.cloud.network.vpc.dao.VpcDao; import com.cloud.network.vpc.dao.VpcGatewayDao; import com.cloud.network.vpc.dao.VpcOfferingDao; import com.cloud.offering.NetworkOffering; @@ -229,7 +234,6 @@ import com.cloud.user.dao.UserStatisticsDao; import com.cloud.uservm.UserVm; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; import com.cloud.vm.ConsoleProxyVO; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.InstanceGroup; @@ -246,194 +250,287 @@ import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; -import com.cloud.network.vpc.dao.VpcDao; +@Component public class ApiDBUtils { private static ManagementServer _ms; - public static AsyncJobManager _asyncMgr; - private static SecurityGroupManager _securityGroupMgr; - private static StorageManager _storageMgr; - private static UserVmManager _userVmMgr; - private static NetworkManager _networkMgr; - private static StatsCollector _statsCollector; + @Inject static AsyncJobManager _asyncMgr; + @Inject static SecurityGroupManager _securityGroupMgr; + @Inject static StorageManager _storageMgr; + @Inject static UserVmManager _userVmMgr; + @Inject static NetworkManager _networkMgr; + @Inject static StatsCollector _statsCollector; - private static AccountDao _accountDao; - private static AccountVlanMapDao _accountVlanMapDao; - private static ClusterDao _clusterDao; - private static CapacityDao _capacityDao; - private static DiskOfferingDao _diskOfferingDao; - private static DomainDao _domainDao; - private static DomainRouterDao _domainRouterDao; - private static DomainRouterJoinDao _domainRouterJoinDao; - private static GuestOSDao _guestOSDao; - private static GuestOSCategoryDao _guestOSCategoryDao; - private static HostDao _hostDao; - private static IPAddressDao _ipAddressDao; - private static LoadBalancerDao _loadBalancerDao; - private static SecurityGroupDao _securityGroupDao; - private static SecurityGroupJoinDao _securityGroupJoinDao; - private static NetworkRuleConfigDao _networkRuleConfigDao; - private static HostPodDao _podDao; - private static ServiceOfferingDao _serviceOfferingDao; - private static SnapshotDao _snapshotDao; - private static StoragePoolDao _storagePoolDao; - private static VMTemplateDao _templateDao; - private static VMTemplateDetailsDao _templateDetailsDao; - private static VMTemplateHostDao _templateHostDao; - private static VMTemplateSwiftDao _templateSwiftDao; - private static VMTemplateS3Dao _templateS3Dao; - private static UploadDao _uploadDao; - private static UserDao _userDao; - private static UserStatisticsDao _userStatsDao; - private static UserVmDao _userVmDao; - private static UserVmJoinDao _userVmJoinDao; - private static VlanDao _vlanDao; - private static VolumeDao _volumeDao; - private static Site2SiteVpnGatewayDao _site2SiteVpnGatewayDao; - private static Site2SiteCustomerGatewayDao _site2SiteCustomerGatewayDao; - private static VolumeHostDao _volumeHostDao; - private static DataCenterDao _zoneDao; - private static NetworkOfferingDao _networkOfferingDao; - private static NetworkDao _networkDao; - private static PhysicalNetworkDao _physicalNetworkDao; - private static ConfigurationService _configMgr; - private static ConfigurationDao _configDao; - private static ConsoleProxyDao _consoleProxyDao; - private static FirewallRulesCidrsDao _firewallCidrsDao; - private static VMInstanceDao _vmDao; - private static ResourceLimitService _resourceLimitMgr; - private static ProjectService _projectMgr; - private static ResourceManager _resourceMgr; - private static AccountDetailsDao _accountDetailsDao; - private static NetworkDomainDao _networkDomainDao; - private static HighAvailabilityManager _haMgr; - private static VpcManager _vpcMgr; - private static TaggedResourceService _taggedResourceService; - private static UserVmDetailsDao _userVmDetailsDao; - private static SSHKeyPairDao _sshKeyPairDao; + @Inject static AccountDao _accountDao; + @Inject static AccountVlanMapDao _accountVlanMapDao; + @Inject static ClusterDao _clusterDao; + @Inject static CapacityDao _capacityDao; + @Inject static DiskOfferingDao _diskOfferingDao; + @Inject static DomainDao _domainDao; + @Inject static DomainRouterDao _domainRouterDao; + @Inject static DomainRouterJoinDao _domainRouterJoinDao; + @Inject static GuestOSDao _guestOSDao; + @Inject static GuestOSCategoryDao _guestOSCategoryDao; + @Inject static HostDao _hostDao; + @Inject static IPAddressDao _ipAddressDao; + @Inject static LoadBalancerDao _loadBalancerDao; + @Inject static SecurityGroupDao _securityGroupDao; + @Inject static SecurityGroupJoinDao _securityGroupJoinDao; + @Inject static NetworkRuleConfigDao _networkRuleConfigDao; + @Inject static HostPodDao _podDao; + @Inject static ServiceOfferingDao _serviceOfferingDao; + @Inject static SnapshotDao _snapshotDao; + @Inject static StoragePoolDao _storagePoolDao; + @Inject static VMTemplateDao _templateDao; + @Inject static VMTemplateDetailsDao _templateDetailsDao; + @Inject static VMTemplateHostDao _templateHostDao; + @Inject static VMTemplateSwiftDao _templateSwiftDao; + @Inject static VMTemplateS3Dao _templateS3Dao; + @Inject static UploadDao _uploadDao; + @Inject static UserDao _userDao; + @Inject static UserStatisticsDao _userStatsDao; + @Inject static UserVmDao _userVmDao; + @Inject static UserVmJoinDao _userVmJoinDao; + @Inject static VlanDao _vlanDao; + @Inject static VolumeDao _volumeDao; + @Inject static Site2SiteVpnGatewayDao _site2SiteVpnGatewayDao; + @Inject static Site2SiteCustomerGatewayDao _site2SiteCustomerGatewayDao; + @Inject static VolumeHostDao _volumeHostDao; + @Inject static DataCenterDao _zoneDao; + @Inject static NetworkOfferingDao _networkOfferingDao; + @Inject static NetworkDao _networkDao; + @Inject static PhysicalNetworkDao _physicalNetworkDao; + @Inject static ConfigurationService _configMgr; + @Inject static ConfigurationDao _configDao; + @Inject static ConsoleProxyDao _consoleProxyDao; + @Inject static FirewallRulesCidrsDao _firewallCidrsDao; + @Inject static VMInstanceDao _vmDao; + @Inject static ResourceLimitService _resourceLimitMgr; + @Inject static ProjectService _projectMgr; + @Inject static ResourceManager _resourceMgr; + @Inject static AccountDetailsDao _accountDetailsDao; + @Inject static NetworkDomainDao _networkDomainDao; + @Inject static HighAvailabilityManager _haMgr; + @Inject static VpcManager _vpcMgr; + @Inject static TaggedResourceService _taggedResourceService; + @Inject static UserVmDetailsDao _userVmDetailsDao; + @Inject static SSHKeyPairDao _sshKeyPairDao; - private static ConditionDao _asConditionDao; - private static AutoScalePolicyConditionMapDao _asPolicyConditionMapDao; - private static AutoScaleVmGroupPolicyMapDao _asVmGroupPolicyMapDao; - private static AutoScalePolicyDao _asPolicyDao; - private static AutoScaleVmProfileDao _asVmProfileDao; - private static AutoScaleVmGroupDao _asVmGroupDao; - private static CounterDao _counterDao; - private static ResourceTagJoinDao _tagJoinDao; - private static EventJoinDao _eventJoinDao; - private static InstanceGroupJoinDao _vmGroupJoinDao; - private static UserAccountJoinDao _userAccountJoinDao; - private static ProjectJoinDao _projectJoinDao; - private static ProjectAccountJoinDao _projectAccountJoinDao; - private static ProjectInvitationJoinDao _projectInvitationJoinDao; - private static HostJoinDao _hostJoinDao; - private static VolumeJoinDao _volJoinDao; - private static StoragePoolJoinDao _poolJoinDao; - private static AccountJoinDao _accountJoinDao; - private static AsyncJobJoinDao _jobJoinDao; + @Inject static ConditionDao _asConditionDao; + @Inject static AutoScalePolicyConditionMapDao _asPolicyConditionMapDao; + @Inject static AutoScaleVmGroupPolicyMapDao _asVmGroupPolicyMapDao; + @Inject static AutoScalePolicyDao _asPolicyDao; + @Inject static AutoScaleVmProfileDao _asVmProfileDao; + @Inject static AutoScaleVmGroupDao _asVmGroupDao; + @Inject static CounterDao _counterDao; + @Inject static ResourceTagJoinDao _tagJoinDao; + @Inject static EventJoinDao _eventJoinDao; + @Inject static InstanceGroupJoinDao _vmGroupJoinDao; + @Inject static UserAccountJoinDao _userAccountJoinDao; + @Inject static ProjectJoinDao _projectJoinDao; + @Inject static ProjectAccountJoinDao _projectAccountJoinDao; + @Inject static ProjectInvitationJoinDao _projectInvitationJoinDao; + @Inject static HostJoinDao _hostJoinDao; + @Inject static VolumeJoinDao _volJoinDao; + @Inject static StoragePoolJoinDao _poolJoinDao; + @Inject static AccountJoinDao _accountJoinDao; + @Inject static AsyncJobJoinDao _jobJoinDao; - private static PhysicalNetworkTrafficTypeDao _physicalNetworkTrafficTypeDao; - private static PhysicalNetworkServiceProviderDao _physicalNetworkServiceProviderDao; - private static FirewallRulesDao _firewallRuleDao; - private static StaticRouteDao _staticRouteDao; - private static VpcGatewayDao _vpcGatewayDao; - private static VpcDao _vpcDao; - private static VpcOfferingDao _vpcOfferingDao; - private static SnapshotPolicyDao _snapshotPolicyDao; - private static AsyncJobDao _asyncJobDao; + @Inject static PhysicalNetworkTrafficTypeDao _physicalNetworkTrafficTypeDao; + @Inject static PhysicalNetworkServiceProviderDao _physicalNetworkServiceProviderDao; + @Inject static FirewallRulesDao _firewallRuleDao; + @Inject static StaticRouteDao _staticRouteDao; + @Inject static VpcGatewayDao _vpcGatewayDao; + @Inject static VpcDao _vpcDao; + @Inject static VpcOfferingDao _vpcOfferingDao; + @Inject static SnapshotPolicyDao _snapshotPolicyDao; + @Inject static AsyncJobDao _asyncJobDao; - static { - _ms = (ManagementServer) ComponentLocator.getComponent(ManagementServer.Name); - ComponentLocator locator = ComponentLocator.getLocator(ManagementServer.Name); - _asyncMgr = locator.getManager(AsyncJobManager.class); - _securityGroupMgr = locator.getManager(SecurityGroupManager.class); - _storageMgr = locator.getManager(StorageManager.class); - _userVmMgr = locator.getManager(UserVmManager.class); - _networkMgr = locator.getManager(NetworkManager.class); - _configMgr = locator.getManager(ConfigurationService.class); + @Inject private ManagementServer ms; + @Inject public AsyncJobManager asyncMgr; + @Inject private SecurityGroupManager securityGroupMgr; + @Inject private StorageManager storageMgr; + @Inject private UserVmManager userVmMgr; + @Inject private NetworkManager networkMgr; + @Inject private StatsCollector statsCollector; - _accountDao = locator.getDao(AccountDao.class); - _accountVlanMapDao = locator.getDao(AccountVlanMapDao.class); - _clusterDao = locator.getDao(ClusterDao.class); - _capacityDao = locator.getDao(CapacityDao.class); - _diskOfferingDao = locator.getDao(DiskOfferingDao.class); - _domainDao = locator.getDao(DomainDao.class); - _domainRouterDao = locator.getDao(DomainRouterDao.class); - _domainRouterJoinDao = locator.getDao(DomainRouterJoinDao.class); - _guestOSDao = locator.getDao(GuestOSDao.class); - _guestOSCategoryDao = locator.getDao(GuestOSCategoryDao.class); - _hostDao = locator.getDao(HostDao.class); - _ipAddressDao = locator.getDao(IPAddressDao.class); - _loadBalancerDao = locator.getDao(LoadBalancerDao.class); - _networkRuleConfigDao = locator.getDao(NetworkRuleConfigDao.class); - _podDao = locator.getDao(HostPodDao.class); - _serviceOfferingDao = locator.getDao(ServiceOfferingDao.class); - _snapshotDao = locator.getDao(SnapshotDao.class); - _storagePoolDao = locator.getDao(StoragePoolDao.class); - _templateDao = locator.getDao(VMTemplateDao.class); - _templateDetailsDao = locator.getDao(VMTemplateDetailsDao.class); - _templateHostDao = locator.getDao(VMTemplateHostDao.class); - _templateSwiftDao = locator.getDao(VMTemplateSwiftDao.class); - _templateS3Dao = locator.getDao(VMTemplateS3Dao.class); - _uploadDao = locator.getDao(UploadDao.class); - _userDao = locator.getDao(UserDao.class); - _userStatsDao = locator.getDao(UserStatisticsDao.class); - _userVmDao = locator.getDao(UserVmDao.class); - _userVmJoinDao = locator.getDao(UserVmJoinDao.class); - _vlanDao = locator.getDao(VlanDao.class); - _volumeDao = locator.getDao(VolumeDao.class); - _site2SiteVpnGatewayDao = locator.getDao(Site2SiteVpnGatewayDao.class); - _site2SiteCustomerGatewayDao = locator.getDao(Site2SiteCustomerGatewayDao.class); - _volumeHostDao = locator.getDao(VolumeHostDao.class); - _zoneDao = locator.getDao(DataCenterDao.class); - _securityGroupDao = locator.getDao(SecurityGroupDao.class); - _securityGroupJoinDao = locator.getDao(SecurityGroupJoinDao.class); - _networkOfferingDao = locator.getDao(NetworkOfferingDao.class); - _networkDao = locator.getDao(NetworkDao.class); - _physicalNetworkDao = locator.getDao(PhysicalNetworkDao.class); - _configDao = locator.getDao(ConfigurationDao.class); - _consoleProxyDao = locator.getDao(ConsoleProxyDao.class); - _firewallCidrsDao = locator.getDao(FirewallRulesCidrsDao.class); - _vmDao = locator.getDao(VMInstanceDao.class); - _resourceLimitMgr = locator.getManager(ResourceLimitService.class); - _projectMgr = locator.getManager(ProjectService.class); - _resourceMgr = locator.getManager(ResourceManager.class); - _accountDetailsDao = locator.getDao(AccountDetailsDao.class); - _networkDomainDao = locator.getDao(NetworkDomainDao.class); - _haMgr = locator.getManager(HighAvailabilityManager.class); - _vpcMgr = locator.getManager(VpcManager.class); - _taggedResourceService = locator.getManager(TaggedResourceService.class); - _sshKeyPairDao = locator.getDao(SSHKeyPairDao.class); - _userVmDetailsDao = locator.getDao(UserVmDetailsDao.class); - _asConditionDao = locator.getDao(ConditionDao.class); - _asPolicyDao = locator.getDao(AutoScalePolicyDao.class); - _asPolicyConditionMapDao = locator.getDao(AutoScalePolicyConditionMapDao.class); - _counterDao = locator.getDao(CounterDao.class); - _asVmGroupPolicyMapDao = locator.getDao(AutoScaleVmGroupPolicyMapDao.class); - _tagJoinDao = locator.getDao(ResourceTagJoinDao.class); - _vmGroupJoinDao = locator.getDao(InstanceGroupJoinDao.class); - _eventJoinDao = locator.getDao(EventJoinDao.class); - _userAccountJoinDao = locator.getDao(UserAccountJoinDao.class); - _projectJoinDao = locator.getDao(ProjectJoinDao.class); - _projectAccountJoinDao = locator.getDao(ProjectAccountJoinDao.class); - _projectInvitationJoinDao = locator.getDao(ProjectInvitationJoinDao.class); - _hostJoinDao = locator.getDao(HostJoinDao.class); - _volJoinDao = locator.getDao(VolumeJoinDao.class); - _poolJoinDao = locator.getDao(StoragePoolJoinDao.class); - _accountJoinDao = locator.getDao(AccountJoinDao.class); - _jobJoinDao = locator.getDao(AsyncJobJoinDao.class); + @Inject private AccountDao accountDao; + @Inject private AccountVlanMapDao accountVlanMapDao; + @Inject private ClusterDao clusterDao; + @Inject private CapacityDao capacityDao; + @Inject private DiskOfferingDao diskOfferingDao; + @Inject private DomainDao domainDao; + @Inject private DomainRouterDao domainRouterDao; + @Inject private DomainRouterJoinDao domainRouterJoinDao; + @Inject private GuestOSDao guestOSDao; + @Inject private GuestOSCategoryDao guestOSCategoryDao; + @Inject private HostDao hostDao; + @Inject private IPAddressDao ipAddressDao; + @Inject private LoadBalancerDao loadBalancerDao; + @Inject private SecurityGroupDao securityGroupDao; + @Inject private SecurityGroupJoinDao securityGroupJoinDao; + @Inject private NetworkRuleConfigDao networkRuleConfigDao; + @Inject private HostPodDao podDao; + @Inject private ServiceOfferingDao serviceOfferingDao; + @Inject private SnapshotDao snapshotDao; + @Inject private StoragePoolDao storagePoolDao; + @Inject private VMTemplateDao templateDao; + @Inject private VMTemplateDetailsDao templateDetailsDao; + @Inject private VMTemplateHostDao templateHostDao; + @Inject private VMTemplateSwiftDao templateSwiftDao; + @Inject private VMTemplateS3Dao templateS3Dao; + @Inject private UploadDao uploadDao; + @Inject private UserDao userDao; + @Inject private UserStatisticsDao userStatsDao; + @Inject private UserVmDao userVmDao; + @Inject private UserVmJoinDao userVmJoinDao; + @Inject private VlanDao vlanDao; + @Inject private VolumeDao volumeDao; + @Inject private Site2SiteVpnGatewayDao site2SiteVpnGatewayDao; + @Inject private Site2SiteCustomerGatewayDao site2SiteCustomerGatewayDao; + @Inject private VolumeHostDao volumeHostDao; + @Inject private DataCenterDao zoneDao; + @Inject private NetworkOfferingDao networkOfferingDao; + @Inject private NetworkDao networkDao; + @Inject private PhysicalNetworkDao physicalNetworkDao; + @Inject private ConfigurationService configMgr; + @Inject private ConfigurationDao configDao; + @Inject private ConsoleProxyDao consoleProxyDao; + @Inject private FirewallRulesCidrsDao firewallCidrsDao; + @Inject private VMInstanceDao vmDao; + @Inject private ResourceLimitService resourceLimitMgr; + @Inject private ProjectService projectMgr; + @Inject private ResourceManager resourceMgr; + @Inject private AccountDetailsDao accountDetailsDao; + @Inject private NetworkDomainDao networkDomainDao; + @Inject private HighAvailabilityManager haMgr; + @Inject private VpcManager vpcMgr; + @Inject private TaggedResourceService taggedResourceService; + @Inject private UserVmDetailsDao userVmDetailsDao; + @Inject private SSHKeyPairDao sshKeyPairDao; + + @Inject private ConditionDao asConditionDao; + @Inject private AutoScalePolicyConditionMapDao asPolicyConditionMapDao; + @Inject private AutoScaleVmGroupPolicyMapDao asVmGroupPolicyMapDao; + @Inject private AutoScalePolicyDao asPolicyDao; + @Inject private AutoScaleVmProfileDao asVmProfileDao; + @Inject private AutoScaleVmGroupDao asVmGroupDao; + @Inject private CounterDao counterDao; + @Inject private ResourceTagJoinDao tagJoinDao; + @Inject private EventJoinDao eventJoinDao; + @Inject private InstanceGroupJoinDao vmGroupJoinDao; + @Inject private UserAccountJoinDao userAccountJoinDao; + @Inject private ProjectJoinDao projectJoinDao; + @Inject private ProjectAccountJoinDao projectAccountJoinDao; + @Inject private ProjectInvitationJoinDao projectInvitationJoinDao; + @Inject private HostJoinDao hostJoinDao; + @Inject private VolumeJoinDao volJoinDao; + @Inject private StoragePoolJoinDao poolJoinDao; + @Inject private AccountJoinDao accountJoinDao; + @Inject private AsyncJobJoinDao jobJoinDao; - _physicalNetworkTrafficTypeDao = locator.getDao(PhysicalNetworkTrafficTypeDao.class); - _physicalNetworkServiceProviderDao = locator.getDao(PhysicalNetworkServiceProviderDao.class); - _firewallRuleDao = locator.getDao(FirewallRulesDao.class); - _staticRouteDao = locator.getDao(StaticRouteDao.class); - _vpcGatewayDao = locator.getDao(VpcGatewayDao.class); - _asVmProfileDao = locator.getDao(AutoScaleVmProfileDao.class); - _asVmGroupDao = locator.getDao(AutoScaleVmGroupDao.class); - _vpcDao = locator.getDao(VpcDao.class); - _vpcOfferingDao = locator.getDao(VpcOfferingDao.class); - _snapshotPolicyDao = locator.getDao(SnapshotPolicyDao.class); - _asyncJobDao = locator.getDao(AsyncJobDao.class); + @Inject private PhysicalNetworkTrafficTypeDao physicalNetworkTrafficTypeDao; + @Inject private PhysicalNetworkServiceProviderDao physicalNetworkServiceProviderDao; + @Inject private FirewallRulesDao firewallRuleDao; + @Inject private StaticRouteDao staticRouteDao; + @Inject private VpcGatewayDao vpcGatewayDao; + @Inject private VpcDao vpcDao; + @Inject private VpcOfferingDao vpcOfferingDao; + @Inject private SnapshotPolicyDao snapshotPolicyDao; + @Inject private AsyncJobDao asyncJobDao; + + @PostConstruct + void init() { + _ms = ms; + _asyncMgr = asyncMgr; + _securityGroupMgr = securityGroupMgr; + _storageMgr = storageMgr; + _userVmMgr = userVmMgr; + _networkMgr = networkMgr; + _configMgr = configMgr; + + _accountDao = accountDao; + _accountVlanMapDao = accountVlanMapDao; + _clusterDao = clusterDao; + _capacityDao = capacityDao; + _diskOfferingDao = diskOfferingDao; + _domainDao = domainDao; + _domainRouterDao = domainRouterDao; + _domainRouterJoinDao = domainRouterJoinDao; + _guestOSDao = guestOSDao; + _guestOSCategoryDao = guestOSCategoryDao; + _hostDao = hostDao; + _ipAddressDao = ipAddressDao; + _loadBalancerDao = loadBalancerDao; + _networkRuleConfigDao = networkRuleConfigDao; + _podDao = podDao; + _serviceOfferingDao = serviceOfferingDao; + _snapshotDao = snapshotDao; + _storagePoolDao = storagePoolDao; + _templateDao = templateDao; + _templateDetailsDao = templateDetailsDao; + _templateHostDao = templateHostDao; + _templateSwiftDao = templateSwiftDao; + _templateS3Dao = templateS3Dao; + _uploadDao = uploadDao; + _userDao = userDao; + _userStatsDao = userStatsDao; + _userVmDao = userVmDao; + _userVmJoinDao = userVmJoinDao; + _vlanDao = vlanDao; + _volumeDao = volumeDao; + _site2SiteVpnGatewayDao = site2SiteVpnGatewayDao; + _site2SiteCustomerGatewayDao = site2SiteCustomerGatewayDao; + _volumeHostDao = volumeHostDao; + _zoneDao = zoneDao; + _securityGroupDao = securityGroupDao; + _securityGroupJoinDao = securityGroupJoinDao; + _networkOfferingDao = networkOfferingDao; + _networkDao = networkDao; + _physicalNetworkDao = physicalNetworkDao; + _configDao = configDao; + _consoleProxyDao = consoleProxyDao; + _firewallCidrsDao = firewallCidrsDao; + _vmDao = vmDao; + _resourceLimitMgr = resourceLimitMgr; + _projectMgr = projectMgr; + _resourceMgr = resourceMgr; + _accountDetailsDao = accountDetailsDao; + _networkDomainDao = networkDomainDao; + _haMgr = haMgr; + _vpcMgr = vpcMgr; + _taggedResourceService = taggedResourceService; + _sshKeyPairDao = sshKeyPairDao; + _userVmDetailsDao = userVmDetailsDao; + _asConditionDao = asConditionDao; + _asPolicyDao = asPolicyDao; + _asPolicyConditionMapDao = asPolicyConditionMapDao; + _counterDao = counterDao; + _asVmGroupPolicyMapDao = asVmGroupPolicyMapDao; + _tagJoinDao = tagJoinDao; + _vmGroupJoinDao = vmGroupJoinDao; + _eventJoinDao = eventJoinDao; + _userAccountJoinDao = userAccountJoinDao; + _projectJoinDao = projectJoinDao; + _projectAccountJoinDao = projectAccountJoinDao; + _projectInvitationJoinDao = projectInvitationJoinDao; + _hostJoinDao = hostJoinDao; + _volJoinDao = volJoinDao; + _poolJoinDao = poolJoinDao; + _accountJoinDao = accountJoinDao; + _jobJoinDao = jobJoinDao; + + _physicalNetworkTrafficTypeDao = physicalNetworkTrafficTypeDao; + _physicalNetworkServiceProviderDao = physicalNetworkServiceProviderDao; + _firewallRuleDao = firewallRuleDao; + _staticRouteDao = staticRouteDao; + _vpcGatewayDao = vpcGatewayDao; + _asVmProfileDao = asVmProfileDao; + _asVmGroupDao = asVmGroupDao; + _vpcDao = vpcDao; + _vpcOfferingDao = vpcOfferingDao; + _snapshotPolicyDao = snapshotPolicyDao; + _asyncJobDao = asyncJobDao; // Note: stats collector should already have been initialized by this time, otherwise a null instance is returned _statsCollector = StatsCollector.getInstance(); @@ -611,7 +708,7 @@ public class ApiDBUtils { } public static boolean isChildDomain(long parentId, long childId) { - return _domainDao.isChildDomain(parentId, childId); + return _domainDao.isChildDomain(parentId, childId); } public static DomainRouterVO findDomainRouterById(Long routerId) { @@ -1016,7 +1113,7 @@ public class ApiDBUtils { else scaleDownPolicyIds.add(autoScalePolicy.getId()); } - } + } public static String getKeyPairName(String sshPublicKey) { SSHKeyPairVO sshKeyPair = _sshKeyPairDao.findByPublicKey(sshPublicKey); //key might be removed prior to this point @@ -1226,7 +1323,7 @@ public class ApiDBUtils { } public static DomainRouterResponse fillRouterDetails(DomainRouterResponse vrData, DomainRouterJoinVO vr){ - return _domainRouterJoinDao.setDomainRouterResponse(vrData, vr); + return _domainRouterJoinDao.setDomainRouterResponse(vrData, vr); } public static List newDomainRouterView(VirtualRouter vr){ @@ -1238,7 +1335,7 @@ public class ApiDBUtils { } public static UserVmResponse fillVmDetails(UserVmResponse vmData, UserVmJoinVO vm){ - return _userVmJoinDao.setUserVmResponse(vmData, vm); + return _userVmJoinDao.setUserVmResponse(vmData, vm); } public static List newUserVmView(UserVm... userVms){ @@ -1250,7 +1347,7 @@ public class ApiDBUtils { } public static SecurityGroupResponse fillSecurityGroupDetails(SecurityGroupResponse vsgData, SecurityGroupJoinVO sg){ - return _securityGroupJoinDao.setSecurityGroupResponse(vsgData, sg); + return _securityGroupJoinDao.setSecurityGroupResponse(vsgData, sg); } public static List newSecurityGroupView(SecurityGroup sg){ @@ -1312,7 +1409,7 @@ public class ApiDBUtils { } public static ProjectResponse fillProjectDetails(ProjectResponse rsp, ProjectJoinVO proj){ - return _projectJoinDao.setProjectResponse(rsp,proj); + return _projectJoinDao.setProjectResponse(rsp,proj); } public static List newProjectView(Project proj){ @@ -1344,7 +1441,7 @@ public class ApiDBUtils { } public static HostResponse fillHostDetails(HostResponse vrData, HostJoinVO vr){ - return _hostJoinDao.setHostResponse(vrData, vr); + return _hostJoinDao.setHostResponse(vrData, vr); } public static List newHostView(Host vr){ @@ -1358,42 +1455,42 @@ public class ApiDBUtils { public static VolumeResponse fillVolumeDetails(VolumeResponse vrData, VolumeJoinVO vr){ return _volJoinDao.setVolumeResponse(vrData, vr); - } + } - public static List newVolumeView(Volume vr){ - return _volJoinDao.newVolumeView(vr); - } + public static List newVolumeView(Volume vr){ + return _volJoinDao.newVolumeView(vr); + } - public static StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO vr) { - return _poolJoinDao.newStoragePoolResponse(vr); - } + public static StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO vr) { + return _poolJoinDao.newStoragePoolResponse(vr); + } - public static StoragePoolResponse fillStoragePoolDetails(StoragePoolResponse vrData, StoragePoolJoinVO vr){ + public static StoragePoolResponse fillStoragePoolDetails(StoragePoolResponse vrData, StoragePoolJoinVO vr){ return _poolJoinDao.setStoragePoolResponse(vrData, vr); - } + } - public static List newStoragePoolView(StoragePool vr){ - return _poolJoinDao.newStoragePoolView(vr); - } + public static List newStoragePoolView(StoragePool vr){ + return _poolJoinDao.newStoragePoolView(vr); + } - public static AccountResponse newAccountResponse(AccountJoinVO ve) { - return _accountJoinDao.newAccountResponse(ve); - } + public static AccountResponse newAccountResponse(AccountJoinVO ve) { + return _accountJoinDao.newAccountResponse(ve); + } - public static AccountJoinVO newAccountView(Account e){ - return _accountJoinDao.newAccountView(e); - } + public static AccountJoinVO newAccountView(Account e){ + return _accountJoinDao.newAccountView(e); + } - public static AccountJoinVO findAccountViewById(Long accountId) { - return _accountJoinDao.findByIdIncludingRemoved(accountId); - } + public static AccountJoinVO findAccountViewById(Long accountId) { + return _accountJoinDao.findByIdIncludingRemoved(accountId); + } - public static AsyncJobResponse newAsyncJobResponse(AsyncJobJoinVO ve) { - return _jobJoinDao.newAsyncJobResponse(ve); - } + public static AsyncJobResponse newAsyncJobResponse(AsyncJobJoinVO ve) { + return _jobJoinDao.newAsyncJobResponse(ve); + } - public static AsyncJobJoinVO newAsyncJobView(AsyncJob e){ - return _jobJoinDao.newAsyncJobView(e); - } + public static AsyncJobJoinVO newAsyncJobView(AsyncJob e){ + return _jobJoinDao.newAsyncJobView(e); + } } diff --git a/server/src/com/cloud/api/ApiDispatcher.java b/server/src/com/cloud/api/ApiDispatcher.java index a0ef6e24358..0df37f154fc 100755 --- a/server/src/com/cloud/api/ApiDispatcher.java +++ b/server/src/com/cloud/api/ApiDispatcher.java @@ -24,92 +24,82 @@ import java.text.ParseException; import java.util.ArrayList; import java.util.Calendar; import java.util.Date; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.StringTokenizer; import java.util.regex.Matcher; -import com.cloud.dao.EntityManager; -import com.cloud.utils.ReflectUtil; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.InfrastructureEntity; -import org.apache.cloudstack.acl.Role; -import org.apache.cloudstack.api.*; +import org.apache.cloudstack.api.ACL; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.BaseAsyncCreateCmd; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.BaseCmd.CommandType; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.EntityReference; +import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.PlugService; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.Validate; +import org.apache.cloudstack.api.command.user.event.ListEventsCmd; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import org.apache.cloudstack.api.BaseCmd.CommandType; -import org.apache.cloudstack.api.command.user.event.ListEventsCmd; import com.cloud.async.AsyncCommandQueued; import com.cloud.async.AsyncJobManager; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dao.EntityManager; import com.cloud.exception.AccountLimitException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.network.dao.NetworkDao; -import com.cloud.server.ManagementServer; -import com.cloud.storage.dao.VMTemplateDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.UserContext; import com.cloud.utils.DateUtil; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.component.Inject; -import com.cloud.utils.component.PluggableService; -import com.cloud.utils.db.GenericDao; +import com.cloud.utils.ReflectUtil; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CSExceptionErrorCode; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.uuididentity.dao.IdentityDao; -// ApiDispatcher: A class that dispatches API commands to the appropriate manager for execution. +@Component public class ApiDispatcher { private static final Logger s_logger = Logger.getLogger(ApiDispatcher.class.getName()); - ComponentLocator _locator; Long _createSnapshotQueueSizeLimit; - @Inject private AsyncJobManager _asyncMgr = null; - @Inject private AccountManager _accountMgr = null; + @Inject AsyncJobManager _asyncMgr = null; + @Inject AccountManager _accountMgr = null; @Inject EntityManager _entityMgr = null; - @Inject IdentityDao _identityDao = null; - Map> _daoNameMap = new HashMap>(); - // singleton class - private static ApiDispatcher s_instance = ApiDispatcher.getInstance(); + private static ApiDispatcher s_instance; public static ApiDispatcher getInstance() { - if (s_instance == null) { - s_instance = ComponentLocator.inject(ApiDispatcher.class); - } return s_instance; } - protected ApiDispatcher() { - super(); - _locator = ComponentLocator.getLocator(ManagementServer.Name); - ConfigurationDao configDao = _locator.getDao(ConfigurationDao.class); - Map configs = configDao.getConfiguration(); - String strSnapshotLimit = configs.get(Config.ConcurrentSnapshotsThresholdPerHost.key()); - if (strSnapshotLimit != null) { - Long snapshotLimit = NumbersUtil.parseLong(strSnapshotLimit, 1L); - if (snapshotLimit <= 0) { - s_logger.debug("Global config parameter " + Config.ConcurrentSnapshotsThresholdPerHost.toString() - + " is less or equal 0; defaulting to unlimited"); - } else { - _createSnapshotQueueSizeLimit = snapshotLimit; - } - } - _daoNameMap.put("com.cloud.network.Network", NetworkDao.class); - _daoNameMap.put("com.cloud.template.VirtualMachineTemplate", VMTemplateDao.class); + public ApiDispatcher() { + } + + @PostConstruct + void init() { + s_instance = this; + } + + public void setCreateSnapshotQueueSizeLimit(Long snapshotLimit) { + _createSnapshotQueueSizeLimit = snapshotLimit; } public void dispatchCreateCmd(BaseAsyncCreateCmd cmd, Map params) { - processParameters(cmd, params); + processParameters(cmd, params); try { UserContext ctx = UserContext.current(); @@ -206,113 +196,113 @@ public class ApiDispatcher { } catch (Throwable t) { if (t instanceof InvalidParameterValueException) { - // earlier, we'd log the db id as part of the log message, but now since we've pushed - // the id into a IdentityProxy object, we would need to dump that object alongwith the - // message. - InvalidParameterValueException ref = (InvalidParameterValueException) t; - ServerApiException ex = new ServerApiException(BaseCmd.PARAM_ERROR, t.getMessage()); + // earlier, we'd log the db id as part of the log message, but now since we've pushed + // the id into a IdentityProxy object, we would need to dump that object alongwith the + // message. + InvalidParameterValueException ref = (InvalidParameterValueException) t; + ServerApiException ex = new ServerApiException(BaseCmd.PARAM_ERROR, t.getMessage()); // copy over the IdentityProxy information as well and throw the serverapiexception. ArrayList idList = ref.getIdProxyList(); if (idList != null) { - // Iterate through entire arraylist and copy over each proxy id. - for (int i = 0 ; i < idList.size(); i++) { - ex.addProxyObject(idList.get(i)); - s_logger.info(t.getMessage() + " uuid: " + idList.get(i)); - } + // Iterate through entire arraylist and copy over each proxy id. + for (int i = 0 ; i < idList.size(); i++) { + ex.addProxyObject(idList.get(i)); + s_logger.info(t.getMessage() + " uuid: " + idList.get(i)); + } } else { - s_logger.info(t.getMessage()); + s_logger.info(t.getMessage()); } // Also copy over the cserror code. - ex.setCSErrorCode(ref.getCSErrorCode()); + ex.setCSErrorCode(ref.getCSErrorCode()); throw ex; } else if(t instanceof IllegalArgumentException) { - throw new ServerApiException(BaseCmd.PARAM_ERROR, t.getMessage()); + throw new ServerApiException(BaseCmd.PARAM_ERROR, t.getMessage()); } else if (t instanceof PermissionDeniedException) { - PermissionDeniedException ref = (PermissionDeniedException)t; - ServerApiException ex = new ServerApiException(BaseCmd.ACCOUNT_ERROR, t.getMessage()); + PermissionDeniedException ref = (PermissionDeniedException)t; + ServerApiException ex = new ServerApiException(BaseCmd.ACCOUNT_ERROR, t.getMessage()); // copy over the IdentityProxy information as well and throw the serverapiexception. - ArrayList idList = ref.getIdProxyList(); + ArrayList idList = ref.getIdProxyList(); if (idList != null) { - // Iterate through entire arraylist and copy over each proxy id. - for (int i = 0 ; i < idList.size(); i++) { - ex.addProxyObject(idList.get(i)); - s_logger.info("PermissionDenied: " + t.getMessage() + "uuid: " + idList.get(i)); - } - } else { - s_logger.info("PermissionDenied: " + t.getMessage()); - } - // Also copy over the cserror code. - ex.setCSErrorCode(ref.getCSErrorCode()); - throw ex; - } else if (t instanceof AccountLimitException) { - AccountLimitException ref = (AccountLimitException)t; - ServerApiException ex = new ServerApiException(BaseCmd.ACCOUNT_RESOURCE_LIMIT_ERROR, t.getMessage()); - // copy over the IdentityProxy information as well and throw the serverapiexception. - ArrayList idList = ref.getIdProxyList(); - if (idList != null) { - // Iterate through entire arraylist and copy over each proxy id. - for (int i = 0 ; i < idList.size(); i++) { - ex.addProxyObject(idList.get(i)); - s_logger.info(t.getMessage() + "uuid: " + idList.get(i)); - } + // Iterate through entire arraylist and copy over each proxy id. + for (int i = 0 ; i < idList.size(); i++) { + ex.addProxyObject(idList.get(i)); + s_logger.info("PermissionDenied: " + t.getMessage() + "uuid: " + idList.get(i)); + } } else { - s_logger.info(t.getMessage()); + s_logger.info("PermissionDenied: " + t.getMessage()); } // Also copy over the cserror code. - ex.setCSErrorCode(ref.getCSErrorCode()); + ex.setCSErrorCode(ref.getCSErrorCode()); + throw ex; + } else if (t instanceof AccountLimitException) { + AccountLimitException ref = (AccountLimitException)t; + ServerApiException ex = new ServerApiException(BaseCmd.ACCOUNT_RESOURCE_LIMIT_ERROR, t.getMessage()); + // copy over the IdentityProxy information as well and throw the serverapiexception. + ArrayList idList = ref.getIdProxyList(); + if (idList != null) { + // Iterate through entire arraylist and copy over each proxy id. + for (int i = 0 ; i < idList.size(); i++) { + ex.addProxyObject(idList.get(i)); + s_logger.info(t.getMessage() + "uuid: " + idList.get(i)); + } + } else { + s_logger.info(t.getMessage()); + } + // Also copy over the cserror code. + ex.setCSErrorCode(ref.getCSErrorCode()); throw ex; } else if (t instanceof InsufficientCapacityException) { - InsufficientCapacityException ref = (InsufficientCapacityException)t; - ServerApiException ex = new ServerApiException(BaseCmd.INSUFFICIENT_CAPACITY_ERROR, t.getMessage()); + InsufficientCapacityException ref = (InsufficientCapacityException)t; + ServerApiException ex = new ServerApiException(BaseCmd.INSUFFICIENT_CAPACITY_ERROR, t.getMessage()); // copy over the IdentityProxy information as well and throw the serverapiexception. - ArrayList idList = ref.getIdProxyList(); + ArrayList idList = ref.getIdProxyList(); if (idList != null) { - // Iterate through entire arraylist and copy over each proxy id. - for (int i = 0 ; i < idList.size(); i++) { - ex.addProxyObject(idList.get(i)); - s_logger.info(t.getMessage() + "uuid: " + idList.get(i)); - } + // Iterate through entire arraylist and copy over each proxy id. + for (int i = 0 ; i < idList.size(); i++) { + ex.addProxyObject(idList.get(i)); + s_logger.info(t.getMessage() + "uuid: " + idList.get(i)); + } } else { - s_logger.info(t.getMessage()); + s_logger.info(t.getMessage()); } // Also copy over the cserror code - ex.setCSErrorCode(ref.getCSErrorCode()); + ex.setCSErrorCode(ref.getCSErrorCode()); throw ex; } else if (t instanceof ResourceAllocationException) { - ResourceAllocationException ref = (ResourceAllocationException)t; + ResourceAllocationException ref = (ResourceAllocationException)t; ServerApiException ex = new ServerApiException(BaseCmd.RESOURCE_ALLOCATION_ERROR, t.getMessage()); // copy over the IdentityProxy information as well and throw the serverapiexception. ArrayList idList = ref.getIdProxyList(); if (idList != null) { - // Iterate through entire arraylist and copy over each proxy id. - for (int i = 0 ; i < idList.size(); i++) { - String id = idList.get(i); - ex.addProxyObject(id); - s_logger.warn("Exception: " + t.getMessage() + "uuid: " + id); - } + // Iterate through entire arraylist and copy over each proxy id. + for (int i = 0 ; i < idList.size(); i++) { + String id = idList.get(i); + ex.addProxyObject(id); + s_logger.warn("Exception: " + t.getMessage() + "uuid: " + id); + } } else { - s_logger.warn("Exception: ", t); + s_logger.warn("Exception: ", t); } // Also copy over the cserror code. - ex.setCSErrorCode(ref.getCSErrorCode()); + ex.setCSErrorCode(ref.getCSErrorCode()); throw ex; } else if (t instanceof ResourceUnavailableException) { - ResourceUnavailableException ref = (ResourceUnavailableException)t; + ResourceUnavailableException ref = (ResourceUnavailableException)t; ServerApiException ex = new ServerApiException(BaseCmd.RESOURCE_UNAVAILABLE_ERROR, t.getMessage()); // copy over the IdentityProxy information as well and throw the serverapiexception. ArrayList idList = ref.getIdProxyList(); if (idList != null) { - // Iterate through entire arraylist and copy over each proxy id. - for (int i = 0 ; i < idList.size(); i++) { - String id = idList.get(i); - ex.addProxyObject(id); - s_logger.warn("Exception: " + t.getMessage() + "uuid: " + id); - } + // Iterate through entire arraylist and copy over each proxy id. + for (int i = 0 ; i < idList.size(); i++) { + String id = idList.get(i); + ex.addProxyObject(id); + s_logger.warn("Exception: " + t.getMessage() + "uuid: " + id); + } } else { - s_logger.warn("Exception: ", t); + s_logger.warn("Exception: ", t); } // Also copy over the cserror code. - ex.setCSErrorCode(ref.getCSErrorCode()); + ex.setCSErrorCode(ref.getCSErrorCode()); throw ex; } else if (t instanceof AsyncCommandQueued) { throw (AsyncCommandQueued) t; @@ -323,18 +313,18 @@ public class ApiDispatcher { s_logger.error("Exception while executing " + cmd.getClass().getSimpleName() + ":", t); ServerApiException ex; if (UserContext.current().getCaller().getType() == Account.ACCOUNT_TYPE_ADMIN) { - ex = new ServerApiException(BaseCmd.INTERNAL_ERROR, t.getMessage()); + ex = new ServerApiException(BaseCmd.INTERNAL_ERROR, t.getMessage()); } else { ex = new ServerApiException(BaseCmd.INTERNAL_ERROR, BaseCmd.USER_ERROR_MESSAGE); } ex.setCSErrorCode(CSExceptionErrorCode.getCSErrCode(ex.getClass().getName())); - throw ex; + throw ex; } } } @SuppressWarnings({ "unchecked", "rawtypes" }) - public static void processParameters(BaseCmd cmd, Map params) { + public static void processParameters(BaseCmd cmd, Map params) { List entitiesToAccess = new ArrayList(); Map unpackedParams = cmd.unpackParams(params); @@ -348,7 +338,7 @@ public class ApiDispatcher { if ((unpackedParams.get(ApiConstants.PAGE) == null) && (pageSize != null && pageSize != BaseListCmd.PAGESIZE_UNLIMITED)) { ServerApiException ex = new ServerApiException(BaseCmd.PARAM_ERROR, "\"page\" parameter is required when \"pagesize\" is specified"); ex.setCSErrorCode(CSExceptionErrorCode.getCSErrCode(ex.getClass().getName())); - throw ex; + throw ex; } else if (pageSize == null && (unpackedParams.get(ApiConstants.PAGE) != null)) { throw new ServerApiException(BaseCmd.PARAM_ERROR, "\"pagesize\" parameter is required when \"page\" is specified"); } @@ -399,14 +389,14 @@ public class ApiDispatcher { throw new ServerApiException(BaseCmd.PARAM_ERROR, "Unable to execute API command " + cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8) + " due to invalid value. " + invEx.getMessage()); } catch (CloudRuntimeException cloudEx) { // FIXME: Better error message? This only happens if the API command is not executable, which typically - //means + //means // there was // and IllegalAccessException setting one of the parameters. throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Internal error executing API command " + cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8)); } //check access on the resource this field points to - try { + try { ACL checkAccess = field.getAnnotation(ACL.class); CommandType fieldType = parameterAnnotation.type(); @@ -426,17 +416,17 @@ public class ApiDispatcher { // Check if the parameter type is a single // Id or list of id's/name's switch (fieldType) { - case LIST: - CommandType listType = parameterAnnotation.collectionType(); - switch (listType) { - case LONG: - case UUID: - List listParam = (List) field.get(cmd); - for (Long entityId : listParam) { - Object entityObj = s_instance._entityMgr.findById(entity, entityId); - entitiesToAccess.add(entityObj); - } - break; + case LIST: + CommandType listType = parameterAnnotation.collectionType(); + switch (listType) { + case LONG: + case UUID: + List listParam = (List) field.get(cmd); + for (Long entityId : listParam) { + Object entityObj = s_instance._entityMgr.findById(entity, entityId); + entitiesToAccess.add(entityObj); + } + break; /* * case STRING: List listParam = * new ArrayList(); listParam = @@ -448,17 +438,17 @@ public class ApiDispatcher { * entitiesToAccess.add(entityObj); } * break; */ - default: - break; - } - break; - case LONG: - case UUID: - Object entityObj = s_instance._entityMgr.findById(entity, (Long) field.get(cmd)); - entitiesToAccess.add(entityObj); - break; default: break; + } + break; + case LONG: + case UUID: + Object entityObj = s_instance._entityMgr.findById(entity, (Long) field.get(cmd)); + entitiesToAccess.add(entityObj); + break; + default: + break; } if (ControlledEntity.class.isAssignableFrom(entity)) { @@ -474,22 +464,22 @@ public class ApiDispatcher { } } - } + } - } + } - } catch (IllegalArgumentException e) { - s_logger.error("Error initializing command " + cmd.getCommandName() + ", field " + field.getName() + " is not accessible."); - throw new CloudRuntimeException("Internal error initializing parameters for command " + cmd.getCommandName() + " [field " + field.getName() + " is not accessible]"); - } catch (IllegalAccessException e) { - s_logger.error("Error initializing command " + cmd.getCommandName() + ", field " + field.getName() + " is not accessible."); - throw new CloudRuntimeException("Internal error initializing parameters for command " + cmd.getCommandName() + " [field " + field.getName() + " is not accessible]"); - } + } catch (IllegalArgumentException e) { + s_logger.error("Error initializing command " + cmd.getCommandName() + ", field " + field.getName() + " is not accessible."); + throw new CloudRuntimeException("Internal error initializing parameters for command " + cmd.getCommandName() + " [field " + field.getName() + " is not accessible]"); + } catch (IllegalAccessException e) { + s_logger.error("Error initializing command " + cmd.getCommandName() + ", field " + field.getName() + " is not accessible."); + throw new CloudRuntimeException("Internal error initializing parameters for command " + cmd.getCommandName() + " [field " + field.getName() + " is not accessible]"); + } } //check access on the entities. - s_instance.doAccessChecks(cmd, entitiesToAccess); + getInstance().doAccessChecks(cmd, entitiesToAccess); } @@ -533,7 +523,7 @@ public class ApiDispatcher { // Invoke the getId method, get the internal long ID // If that fails hide exceptions as the uuid may not exist try { - internalId = (Long) ((InternalIdentity)objVO).getId(); + internalId = ((InternalIdentity)objVO).getId(); } catch (IllegalArgumentException e) { } catch (NullPointerException e) { } @@ -598,7 +588,7 @@ public class ApiDispatcher { // we ignore blank or null values and defer to the command to set a default // value for optional parameters ... if (paramObj != null && isNotBlank(paramObj.toString())) { - field.set(cmdObj, Float.valueOf(paramObj.toString())); + field.set(cmdObj, Float.valueOf(paramObj.toString())); } break; case INTEGER: @@ -606,7 +596,7 @@ public class ApiDispatcher { // we ignore blank or null values and defer to the command to set a default // value for optional parameters ... if (paramObj != null && isNotBlank(paramObj.toString())) { - field.set(cmdObj, Integer.valueOf(paramObj.toString())); + field.set(cmdObj, Integer.valueOf(paramObj.toString())); } break; case LIST: @@ -619,16 +609,16 @@ public class ApiDispatcher { case INTEGER: listParam.add(Integer.valueOf(token)); break; - case UUID: - if (token.isEmpty()) - break; - Long internalId = translateUuidToInternalId(token, annotation); - listParam.add(internalId); - break; - case LONG: { - listParam.add(Long.valueOf(token)); - } + case UUID: + if (token.isEmpty()) + break; + Long internalId = translateUuidToInternalId(token, annotation); + listParam.add(internalId); break; + case LONG: { + listParam.add(Long.valueOf(token)); + } + break; case SHORT: listParam.add(Short.valueOf(token)); case STRING: @@ -645,7 +635,7 @@ public class ApiDispatcher { field.set(cmdObj, internalId); break; case LONG: - field.set(cmdObj, Long.valueOf(paramObj.toString())); + field.set(cmdObj, Long.valueOf(paramObj.toString())); break; case SHORT: field.set(cmdObj, Short.valueOf(paramObj.toString())); @@ -686,31 +676,23 @@ public class ApiDispatcher { } public static void plugService(Field field, BaseCmd cmd) { - ComponentLocator locator = ComponentLocator.getLocator(ManagementServer.Name); - Class fc = field.getType(); - Object instance = null; - if (PluggableService.class.isAssignableFrom(fc)) { - instance = locator.getPluggableService(fc); - } + Class fc = field.getType(); + Object instance = null; - if (instance == null) { + if (instance == null) { throw new CloudRuntimeException("Unable to plug service " + fc.getSimpleName() + " in command " + cmd.getClass().getSimpleName()); - } + } - try { - field.setAccessible(true); - field.set(cmd, instance); - } catch (IllegalArgumentException e) { - s_logger.error("IllegalArgumentException at plugService for command " + cmd.getCommandName() + ", field " + field.getName()); - throw new CloudRuntimeException("Internal error at plugService for command " + cmd.getCommandName() + " [Illegal argumet at field " + field.getName() + "]"); - } catch (IllegalAccessException e) { - s_logger.error("Error at plugService for command " + cmd.getCommandName() + ", field " + field.getName() + " is not accessible."); - throw new CloudRuntimeException("Internal error at plugService for command " + cmd.getCommandName() + " [field " + field.getName() + " is not accessible]"); - } - } - - public static Long getIdentiyId(String tableName, String token) { - return s_instance._identityDao.getIdentityId(tableName, token); + try { + field.setAccessible(true); + field.set(cmd, instance); + } catch (IllegalArgumentException e) { + s_logger.error("IllegalArgumentException at plugService for command " + cmd.getCommandName() + ", field " + field.getName()); + throw new CloudRuntimeException("Internal error at plugService for command " + cmd.getCommandName() + " [Illegal argumet at field " + field.getName() + "]"); + } catch (IllegalAccessException e) { + s_logger.error("Error at plugService for command " + cmd.getCommandName() + ", field " + field.getName() + " is not accessible."); + throw new CloudRuntimeException("Internal error at plugService for command " + cmd.getCommandName() + " [field " + field.getName() + " is not accessible]"); + } } } diff --git a/server/src/com/cloud/api/ApiGsonHelper.java b/server/src/com/cloud/api/ApiGsonHelper.java index 6e64f7124e0..6163860f0c8 100644 --- a/server/src/com/cloud/api/ApiGsonHelper.java +++ b/server/src/com/cloud/api/ApiGsonHelper.java @@ -17,7 +17,6 @@ package com.cloud.api; import com.google.gson.GsonBuilder; -import com.cloud.utils.IdentityProxy; import org.apache.cloudstack.api.ResponseObject; import java.util.Map; @@ -28,7 +27,6 @@ public class ApiGsonHelper { s_gBuilder = new GsonBuilder().setDateFormat("yyyy-MM-dd'T'HH:mm:ssZ"); s_gBuilder.setVersion(1.3); s_gBuilder.registerTypeAdapter(ResponseObject.class, new ResponseObjectTypeAdapter()); - s_gBuilder.registerTypeAdapter(IdentityProxy.class, new IdentityTypeAdapter()); s_gBuilder.registerTypeAdapter(Map.class, new StringMapTypeAdapter()); } diff --git a/server/src/com/cloud/api/ApiResponseGsonHelper.java b/server/src/com/cloud/api/ApiResponseGsonHelper.java index c71193e8908..6bccf9a12af 100644 --- a/server/src/com/cloud/api/ApiResponseGsonHelper.java +++ b/server/src/com/cloud/api/ApiResponseGsonHelper.java @@ -17,7 +17,6 @@ package com.cloud.api; import com.google.gson.GsonBuilder; -import com.cloud.utils.IdentityProxy; import org.apache.cloudstack.api.ResponseObject; /** @@ -25,13 +24,12 @@ import org.apache.cloudstack.api.ResponseObject; */ public class ApiResponseGsonHelper { private static final GsonBuilder s_gBuilder; - + static { s_gBuilder = new GsonBuilder().setDateFormat("yyyy-MM-dd'T'HH:mm:ssZ"); s_gBuilder.setVersion(1.3); s_gBuilder.registerTypeAdapter(ResponseObject.class, new ResponseObjectTypeAdapter()); s_gBuilder.registerTypeAdapter(String.class, new EncodedStringTypeAdapter()); - s_gBuilder.registerTypeAdapter(IdentityProxy.class, new IdentityTypeAdapter()); } public static GsonBuilder getBuilder() { diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index 37be83efc5c..c346a6b86f1 100755 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -482,7 +482,7 @@ public class ApiResponseHelper implements ResponseGenerator { response.setEndPoint(result.getEndPoint()); response.setHttpsFlag(result.getHttpsFlag()); response.setMaxErrorRetry(result.getMaxErrorRetry()); - response.setObjectId(result.getId()); + response.setObjectId(result.getUuid()); response.setSecretKey(result.getSecretKey()); response.setSocketTimeout(result.getSocketTimeout()); response.setTemplateBucketName(result.getBucketName()); @@ -1265,6 +1265,7 @@ public class ApiResponseHelper implements ResponseGenerator { templateResponse.setFeatured(template.isFeatured()); templateResponse.setExtractable(template.isExtractable() && !(template.getTemplateType() == TemplateType.SYSTEM)); templateResponse.setPasswordEnabled(template.getEnablePassword()); + templateResponse.setSshKeyEnabled(template.getEnableSshKey()); templateResponse.setCrossZones(template.isCrossZones()); templateResponse.setFormat(template.getFormat()); templateResponse.setDetails(template.getDetails()); @@ -1346,6 +1347,7 @@ public class ApiResponseHelper implements ResponseGenerator { templateResponse.setFeatured(template.isFeatured()); templateResponse.setExtractable(template.isExtractable() && !(template.getTemplateType() == TemplateType.SYSTEM)); templateResponse.setPasswordEnabled(template.getEnablePassword()); + templateResponse.setSshKeyEnabled(template.getEnableSshKey()); templateResponse.setCrossZones(template.isCrossZones()); templateResponse.setFormat(template.getFormat()); if (template.getTemplateType() != null) { @@ -2789,11 +2791,6 @@ public class ApiResponseHelper implements ResponseGenerator { return response; } - @Override - public Long getIdentiyId(String tableName, String token) { - return ApiDispatcher.getIdentiyId(tableName, token); - } - @Override public ResourceTagResponse createResourceTagResponse(ResourceTag resourceTag, boolean keyValueOnly) { ResourceTagJoinVO rto = ApiDBUtils.newResourceTagView(resourceTag); diff --git a/server/src/com/cloud/api/ApiServer.java b/server/src/com/cloud/api/ApiServer.java index 6e13f13fbdf..8a2c6755ef0 100755 --- a/server/src/com/cloud/api/ApiServer.java +++ b/server/src/com/cloud/api/ApiServer.java @@ -45,13 +45,16 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import javax.annotation.PostConstruct; import javax.crypto.Mac; import javax.crypto.spec.SecretKeySpec; import javax.inject.Inject; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; -import org.apache.cloudstack.acl.APIAccessChecker; +import org.apache.cloudstack.acl.APIChecker; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.BaseAsyncCreateCmd; import org.apache.cloudstack.api.BaseCmd; @@ -74,7 +77,6 @@ import org.apache.cloudstack.api.command.user.vmgroup.ListVMGroupsCmd; import org.apache.cloudstack.api.command.user.volume.ListVolumesCmd; import org.apache.cloudstack.api.response.ExceptionResponse; import org.apache.cloudstack.api.response.ListResponse; -import org.apache.cloudstack.discovery.ApiDiscoveryService; import org.apache.commons.codec.binary.Base64; import org.apache.http.ConnectionClosedException; import org.apache.http.HttpException; @@ -104,12 +106,12 @@ import org.apache.http.protocol.ResponseContent; import org.apache.http.protocol.ResponseDate; import org.apache.http.protocol.ResponseServer; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.response.ApiResponseSerializer; import com.cloud.async.AsyncJob; import com.cloud.async.AsyncJobManager; import com.cloud.async.AsyncJobVO; -import com.cloud.cluster.StackMaid; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationVO; import com.cloud.configuration.dao.ConfigurationDao; @@ -127,70 +129,61 @@ import com.cloud.user.UserAccount; import com.cloud.user.UserContext; import com.cloud.user.UserVO; import com.cloud.utils.Pair; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.ReflectUtil; import com.cloud.utils.StringUtils; -import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.PluggableService; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CSExceptionErrorCode; -import com.cloud.uuididentity.dao.IdentityDao; +@Component public class ApiServer implements HttpRequestHandler { private static final Logger s_logger = Logger.getLogger(ApiServer.class.getName()); private static final Logger s_accessLogger = Logger.getLogger("apiserver." + ApiServer.class.getName()); public static boolean encodeApiResponse = false; public static String jsonContentType = "text/javascript"; - private ApiDispatcher _dispatcher; + @Inject ApiDispatcher _dispatcher; - @Inject private final AccountManager _accountMgr = null; - @Inject private final DomainManager _domainMgr = null; - @Inject private final AsyncJobManager _asyncMgr = null; + @Inject private AccountManager _accountMgr; + @Inject private DomainManager _domainMgr; + @Inject private AsyncJobManager _asyncMgr; @Inject private ConfigurationDao _configDao; - @Inject protected List _apiAccessCheckers; @Inject List _pluggableServices; - @Inject IdentityDao _identityDao; - protected List _apiDiscoveryServices; + @Inject List _apiAccessCheckers; private Account _systemAccount = null; private User _systemUser = null; private static int _workerCount = 0; private static ApiServer s_instance = null; private static final DateFormat _dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ"); - private final Map> _apiNameCmdClassMap = new HashMap>(); + private static Map> _apiNameCmdClassMap = new HashMap>(); private static ExecutorService _executor = new ThreadPoolExecutor(10, 150, 60, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("ApiServer")); - protected ApiServer() { - super(); + public ApiServer() { } - - public static void initApiServer(String[] apiConfig) { - if (s_instance == null) { - s_instance = new ApiServer(); - s_instance = ComponentContext.inject(s_instance); - s_instance.init(apiConfig); - } + + @PostConstruct + void initComponent() { + s_instance = this; + init(); } public static ApiServer getInstance() { - // Assumption: CloudStartupServlet would initialize ApiServer - if (s_instance == null) { - s_logger.fatal("ApiServer instance failed to initialize"); - } return s_instance; } - public void init(String[] apiConfig) { + public void init() { BaseCmd.setComponents(new ApiResponseHelper()); BaseListCmd.configure(); _systemAccount = _accountMgr.getSystemAccount(); _systemUser = _accountMgr.getSystemUser(); - _dispatcher = ApiDispatcher.getInstance(); Integer apiPort = null; // api port, null by default SearchCriteria sc = _configDao.createSearchCriteria(); @@ -203,13 +196,28 @@ public class ApiServer implements HttpRequestHandler { } } - for (ApiDiscoveryService discoveryService: _apiDiscoveryServices) { - _apiNameCmdClassMap.putAll(discoveryService.getApiNameCmdClassMapping()); + Map configs = _configDao.getConfiguration(); + String strSnapshotLimit = configs.get(Config.ConcurrentSnapshotsThresholdPerHost.key()); + if (strSnapshotLimit != null) { + Long snapshotLimit = NumbersUtil.parseLong(strSnapshotLimit, 1L); + if (snapshotLimit <= 0) { + s_logger.debug("Global config parameter " + Config.ConcurrentSnapshotsThresholdPerHost.toString() + + " is less or equal 0; defaulting to unlimited"); + } else { + _dispatcher.setCreateSnapshotQueueSizeLimit(snapshotLimit); + } } - if (_apiNameCmdClassMap.size() == 0) { - s_logger.fatal("ApiServer failed to generate apiname, cmd class mappings." - + "Please check and enable at least one ApiDiscovery adapter."); + Set> cmdClasses = ReflectUtil.getClassesWithAnnotation(APICommand.class, + new String[]{"org.apache.cloudstack.api", "com.cloud.api"}); + + for(Class cmdClass: cmdClasses) { + String apiName = cmdClass.getAnnotation(APICommand.class).name(); + if (_apiNameCmdClassMap.containsKey(apiName)) { + s_logger.error("API Cmd class " + cmdClass.getName() + " has non-unique apiname" + apiName); + continue; + } + _apiNameCmdClassMap.put(apiName, cmdClass); } encodeApiResponse = Boolean.valueOf(_configDao.getValue(Config.EncodeApiResponse.key())); @@ -403,12 +411,12 @@ public class ApiServer implements HttpRequestHandler { // BaseAsyncCmd: cmd is processed and submitted as an AsyncJob, job related info is serialized and returned. if (cmdObj instanceof BaseAsyncCmd) { Long objectId = null; - String objectEntityTable = null; + String objectUuid = null; if (cmdObj instanceof BaseAsyncCreateCmd) { BaseAsyncCreateCmd createCmd = (BaseAsyncCreateCmd) cmdObj; _dispatcher.dispatchCreateCmd(createCmd, params); objectId = createCmd.getEntityId(); - objectEntityTable = createCmd.getEntityTable(); + objectUuid = createCmd.getEntityUuid(); params.put("id", objectId.toString()); } else { ApiDispatcher.processParameters(cmdObj, params); @@ -452,8 +460,8 @@ public class ApiServer implements HttpRequestHandler { } if (objectId != null) { - SerializationContext.current().setUuidTranslation(true); - return ((BaseAsyncCreateCmd) asyncCmd).getResponse(jobId, objectId, objectEntityTable); + String objUuid = (objectUuid == null) ? objectId.toString() : objectUuid; + return ((BaseAsyncCreateCmd) asyncCmd).getResponse(jobId, objUuid); } SerializationContext.current().setUuidTranslation(true); @@ -463,6 +471,7 @@ public class ApiServer implements HttpRequestHandler { // if the command is of the listXXXCommand, we will need to also return the // the job id and status if possible + // For those listXXXCommand which we have already created DB views, this step is not needed since async job is joined in their db views. if (cmdObj instanceof BaseListCmd && !(cmdObj instanceof ListVMsCmd) && !(cmdObj instanceof ListRoutersCmd) && !(cmdObj instanceof ListSecurityGroupsCmd) && !(cmdObj instanceof ListTagsCmd) @@ -553,14 +562,14 @@ public class ApiServer implements HttpRequestHandler { if (userId != null) { User user = ApiDBUtils.findUserById(userId); if (!isCommandAvailable(user, commandName)) { - s_logger.warn("The given command:" + commandName + " does not exist or it is not available for user"); + s_logger.debug("The given command:" + commandName + " does not exist or it is not available for user with id:" + userId); throw new ServerApiException(BaseCmd.UNSUPPORTED_ACTION_ERROR, "The given command does not exist or it is not available for user"); } return true; } else { // check against every available command to see if the command exists or not - if (!isCommandAvailable(null, commandName) && !commandName.equals("login") && !commandName.equals("logout")) { - s_logger.warn("The given command:" + commandName + " does not exist or it is not available for user"); + if (!_apiNameCmdClassMap.containsKey(commandName) && !commandName.equals("login") && !commandName.equals("logout")) { + s_logger.debug("The given command:" + commandName + " does not exist or it is not available for user with id:" + userId); throw new ServerApiException(BaseCmd.UNSUPPORTED_ACTION_ERROR, "The given command does not exist or it is not available for user"); } } @@ -604,30 +613,29 @@ public class ApiServer implements HttpRequestHandler { // if api/secret key are passed to the parameters if ((signature == null) || (apiKey == null)) { - if (s_logger.isDebugEnabled()) { - s_logger.info("expired session, missing signature, or missing apiKey -- ignoring request...sig: " + signature + ", apiKey: " + apiKey); - } + s_logger.debug("Expired session, missing signature, or missing apiKey -- ignoring request. Signature: " + signature + ", apiKey: " + apiKey); return false; // no signature, bad request } Date expiresTS = null; + // FIXME: Hard coded signature, why not have an enum if ("3".equals(signatureVersion)) { // New signature authentication. Check for expire parameter and its validity if (expires == null) { - s_logger.info("missing Expires parameter -- ignoring request...sig: " + signature + ", apiKey: " + apiKey); + s_logger.debug("Missing Expires parameter -- ignoring request. Signature: " + signature + ", apiKey: " + apiKey); return false; } synchronized (_dateFormat) { try { expiresTS = _dateFormat.parse(expires); } catch (ParseException pe) { - s_logger.info("Incorrect date format for Expires parameter", pe); + s_logger.debug("Incorrect date format for Expires parameter", pe); return false; } } Date now = new Date(System.currentTimeMillis()); if (expiresTS.before(now)) { - s_logger.info("Request expired -- ignoring ...sig: " + signature + ", apiKey: " + apiKey); + s_logger.debug("Request expired -- ignoring ...sig: " + signature + ", apiKey: " + apiKey); return false; } } @@ -638,7 +646,7 @@ public class ApiServer implements HttpRequestHandler { // verify there is a user with this api key Pair userAcctPair = _accountMgr.findUserByApiKey(apiKey); if (userAcctPair == null) { - s_logger.info("apiKey does not map to a valid user -- ignoring request, apiKey: " + apiKey); + s_logger.debug("apiKey does not map to a valid user -- ignoring request, apiKey: " + apiKey); return false; } @@ -654,8 +662,8 @@ public class ApiServer implements HttpRequestHandler { UserContext.updateContext(user.getId(), account, null); if (!isCommandAvailable(user, commandName)) { - s_logger.warn("The given command:" + commandName + " does not exist or it is not available for user"); - throw new ServerApiException(BaseCmd.UNSUPPORTED_ACTION_ERROR, "The given command:" + commandName + " does not exist or it is not available for user"); + s_logger.debug("The given command:" + commandName + " does not exist or it is not available for user"); + throw new ServerApiException(BaseCmd.UNSUPPORTED_ACTION_ERROR, "The given command:" + commandName + " does not exist or it is not available for user with id:" + userId); } // verify secret key exists @@ -682,18 +690,13 @@ public class ApiServer implements HttpRequestHandler { if (ex instanceof ServerApiException && ((ServerApiException) ex).getErrorCode() == BaseCmd.UNSUPPORTED_ACTION_ERROR) { throw (ServerApiException) ex; } - s_logger.error("unable to verifty request signature", ex); + s_logger.error("unable to verify request signature", ex); } return false; } - public Long fetchDomainId(String domainUUID){ - try{ - Long domainId = _identityDao.getIdentityId("domain", domainUUID); - return domainId; - }catch(InvalidParameterValueException ex){ - return null; - } + public Long fetchDomainId(String domainUUID) { + return _domainMgr.getDomain(domainUUID).getId(); } public void loginUser(HttpSession session, String username, String password, Long domainId, String domainPath, String loginIpAddress ,Map requestParameters) throws CloudAuthenticationException { @@ -789,10 +792,16 @@ public class ApiServer implements HttpRequestHandler { return true; } - private boolean isCommandAvailable(User user, String commandName) { - for (APIAccessChecker apiChecker : _apiAccessCheckers) { + private boolean isCommandAvailable(User user, String commandName) throws PermissionDeniedException { + if (user == null) { + throw new PermissionDeniedException("User is null for role based API access check for command" + commandName); + } + + Account account = _accountMgr.getAccount(user.getAccountId()); + RoleType roleType = _accountMgr.getRoleType(account); + for (APIChecker apiChecker : _apiAccessCheckers) { // Fail the checking if any checker fails to verify - if (!apiChecker.canAccessAPI(user, commandName)) + if (!apiChecker.checkAccess(roleType, commandName)) return false; } return true; @@ -907,12 +916,8 @@ public class ApiServer implements HttpRequestHandler { HttpContext context = new BasicHttpContext(null); try { while (!Thread.interrupted() && _conn.isOpen()) { - try { - _httpService.handleRequest(_conn, context); - _conn.close(); - } finally { - StackMaid.current().exitCleanup(); - } + _httpService.handleRequest(_conn, context); + _conn.close(); } } catch (ConnectionClosedException ex) { if (s_logger.isTraceEnabled()) { diff --git a/server/src/com/cloud/api/ApiServlet.java b/server/src/com/cloud/api/ApiServlet.java index 19091f25ff2..e535030f26c 100755 --- a/server/src/com/cloud/api/ApiServlet.java +++ b/server/src/com/cloud/api/ApiServlet.java @@ -23,6 +23,7 @@ import java.util.Enumeration; import java.util.HashMap; import java.util.Map; +import javax.inject.Inject; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -31,51 +32,42 @@ import javax.servlet.http.HttpSession; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.ServerApiException; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; -import com.cloud.cluster.StackMaid; import com.cloud.exception.CloudAuthenticationException; -import com.cloud.server.ManagementServer; import com.cloud.user.Account; import com.cloud.user.AccountService; import com.cloud.user.UserContext; import com.cloud.utils.StringUtils; -import com.cloud.utils.component.ComponentLocator; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CloudRuntimeException; +@Component("apiServlet") @SuppressWarnings("serial") public class ApiServlet extends HttpServlet { public static final Logger s_logger = Logger.getLogger(ApiServlet.class.getName()); private static final Logger s_accessLogger = Logger.getLogger("apiserver." + ApiServer.class.getName()); - private ApiServer _apiServer = null; - private AccountService _accountMgr = null; + ApiServer _apiServer; + AccountService _accountMgr; public ApiServlet() { super(); _apiServer = ApiServer.getInstance(); + _accountMgr = ComponentContext.getComponent(AccountService.class); if (_apiServer == null) { throw new CloudRuntimeException("ApiServer not initialized"); } - ComponentLocator locator = ComponentLocator.getLocator(ManagementServer.Name); - _accountMgr = locator.getManager(AccountService.class); } @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) { - try { - processRequest(req, resp); - } finally { - StackMaid.current().exitCleanup(); - } + processRequest(req, resp); } @Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) { - try { - processRequest(req, resp); - } finally { - StackMaid.current().exitCleanup(); - } + processRequest(req, resp); } private void utf8Fixup(HttpServletRequest req, Map params) { @@ -128,7 +120,7 @@ public class ApiServlet extends HttpServlet { reqStr = auditTrailSb.toString() + " " + req.getQueryString(); s_logger.debug("===START=== " + StringUtils.cleanString(reqStr)); } - + try { HttpSession session = req.getSession(false); Object[] responseTypeParam = params.get("response"); @@ -305,7 +297,7 @@ public class ApiServlet extends HttpServlet { auditTrailSb.insert(0, "(userId=" + UserContext.current().getCallerUserId() + " accountId=" + UserContext.current().getCaller().getId() + " sessionId=" + (session != null ? session.getId() : null) - + ")"); + + ")"); try { String response = _apiServer.handleRequest(params, false, responseType, auditTrailSb); @@ -386,7 +378,7 @@ public class ApiServlet extends HttpServlet { private String getLoginSuccessResponse(HttpSession session, String responseType) { StringBuffer sb = new StringBuffer(); int inactiveInterval = session.getMaxInactiveInterval(); - + String user_UUID = (String)session.getAttribute("user_UUID"); session.removeAttribute("user_UUID"); diff --git a/server/src/com/cloud/api/IdentityTypeAdapter.java b/server/src/com/cloud/api/IdentityTypeAdapter.java deleted file mode 100644 index 369c2020c24..00000000000 --- a/server/src/com/cloud/api/IdentityTypeAdapter.java +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.api; - -import java.lang.reflect.Type; - -import com.cloud.uuididentity.dao.IdentityDao; -import com.cloud.uuididentity.dao.IdentityDaoImpl; -import com.google.gson.Gson; -import com.google.gson.JsonDeserializationContext; -import com.google.gson.JsonDeserializer; -import com.google.gson.JsonElement; -import com.google.gson.JsonObject; -import com.google.gson.JsonParseException; -import com.google.gson.JsonPrimitive; -import com.google.gson.JsonSerializationContext; -import com.google.gson.JsonSerializer; -import com.cloud.utils.IdentityProxy; - - -public class IdentityTypeAdapter implements JsonSerializer, JsonDeserializer { - - @Override - public JsonElement serialize(IdentityProxy src, Type srcType, JsonSerializationContext context) { - if(SerializationContext.current().getUuidTranslation()) { - assert(src != null); - if(src.getValue() == null) - return context.serialize(null); - - IdentityDao identityDao = new IdentityDaoImpl(); - if(src.getTableName() != null) { - String uuid = identityDao.getIdentityUuid(src.getTableName(), String.valueOf(src.getValue())); - if(uuid == null) - return context.serialize(null); - - // Exceptions set the _idFieldName in the IdentityProxy structure. So if this field is not - // null, prepare a structure of uuid and idFieldName and return the json representation of that. - String idName = src.getidFieldName(); - if (idName != null) { - // Prepare a structure. - JsonObject jsonObj = new JsonObject(); - jsonObj.add("uuid", new JsonPrimitive(uuid)); - jsonObj.add("uuidProperty", new JsonPrimitive(idName)); - return jsonObj; - } - return new JsonPrimitive(uuid); - } else { - return new JsonPrimitive(String.valueOf(src.getValue())); - } - } else { - return new Gson().toJsonTree(src); - } - } - - @Override - public IdentityProxy deserialize(JsonElement src, Type srcType, - JsonDeserializationContext context) throws JsonParseException { - - IdentityProxy obj = new IdentityProxy(); - JsonObject json = src.getAsJsonObject(); - obj.setTableName(json.get("_tableName").getAsString()); - if(json.get("_value") != null) - obj.setValue(json.get("_value").getAsLong()); - return obj; - } -} diff --git a/server/src/com/cloud/api/commands/AddTrafficMonitorCmd.java b/server/src/com/cloud/api/commands/AddTrafficMonitorCmd.java index fdbbbe4ca3c..c80a62b3b01 100644 --- a/server/src/com/cloud/api/commands/AddTrafficMonitorCmd.java +++ b/server/src/com/cloud/api/commands/AddTrafficMonitorCmd.java @@ -16,91 +16,93 @@ // under the License. package com.cloud.api.commands; -import org.apache.cloudstack.api.*; +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.TrafficMonitorResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.log4j.Logger; -import org.apache.cloudstack.api.APICommand; import com.cloud.exception.InvalidParameterValueException; import com.cloud.host.Host; import com.cloud.network.NetworkUsageManager; -import com.cloud.server.ManagementService; -import org.apache.cloudstack.api.response.TrafficMonitorResponse; import com.cloud.user.Account; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.exception.CloudRuntimeException; @APICommand(name = "addTrafficMonitor", description="Adds Traffic Monitor Host for Direct Network Usage", responseObject = TrafficMonitorResponse.class) public class AddTrafficMonitorCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AddTrafficMonitorCmd.class.getName()); - private static final String s_name = "addtrafficmonitorresponse"; - - ///////////////////////////////////////////////////// + public static final Logger s_logger = Logger.getLogger(AddTrafficMonitorCmd.class.getName()); + private static final String s_name = "addtrafficmonitorresponse"; + @Inject NetworkUsageManager networkUsageMgr; + + ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - - @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType = ZoneResponse.class, + + @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType = ZoneResponse.class, required = true, description="Zone in which to add the external firewall appliance.") - private Long zoneId; - - @Parameter(name=ApiConstants.URL, type=CommandType.STRING, required = true, description="URL of the traffic monitor Host") - private String url; + private Long zoneId; - @Parameter(name=ApiConstants.INCL_ZONES, type=CommandType.STRING, description="Traffic going into the listed zones will be metered") - private String inclZones; - - @Parameter(name=ApiConstants.EXCL_ZONES, type=CommandType.STRING, description="Traffic going into the listed zones will not be metered") - private String exclZones; - - /////////////////////////////////////////////////// - /////////////////// Accessors /////////////////////// - ///////////////////////////////////////////////////// - - public String getInclZones() { - return inclZones; - } - - public String getExclZones() { - return exclZones; - } + @Parameter(name=ApiConstants.URL, type=CommandType.STRING, required = true, description="URL of the traffic monitor Host") + private String url; - public Long getZoneId() { - return zoneId; - } + @Parameter(name=ApiConstants.INCL_ZONES, type=CommandType.STRING, description="Traffic going into the listed zones will be metered") + private String inclZones; - public String getUrl() { - return url; - } - - ///////////////////////////////////////////////////// - /////////////// API Implementation/////////////////// - ///////////////////////////////////////////////////// + @Parameter(name=ApiConstants.EXCL_ZONES, type=CommandType.STRING, description="Traffic going into the listed zones will not be metered") + private String exclZones; - @Override - public String getCommandName() { - return s_name; - } - - @Override + /////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getInclZones() { + return inclZones; + } + + public String getExclZones() { + return exclZones; + } + + public Long getZoneId() { + return zoneId; + } + + public String getUrl() { + return url; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override public long getEntityOwnerId() { return Account.ACCOUNT_ID_SYSTEM; } - - @Override + + @Override public void execute(){ - try { - ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name); - NetworkUsageManager networkUsageMgr = locator.getManager(NetworkUsageManager.class); - Host trafficMonitor = networkUsageMgr.addTrafficMonitor(this); - TrafficMonitorResponse response = networkUsageMgr.getApiResponse(trafficMonitor); - response.setObjectName("trafficmonitor"); - response.setResponseName(getCommandName()); - this.setResponseObject(response); - } catch (InvalidParameterValueException ipve) { - throw new ServerApiException(BaseCmd.PARAM_ERROR, ipve.getMessage()); - } catch (CloudRuntimeException cre) { - throw new ServerApiException(BaseCmd.INTERNAL_ERROR, cre.getMessage()); - } + try { + Host trafficMonitor = networkUsageMgr.addTrafficMonitor(this); + TrafficMonitorResponse response = networkUsageMgr.getApiResponse(trafficMonitor); + response.setObjectName("trafficmonitor"); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } catch (InvalidParameterValueException ipve) { + throw new ServerApiException(BaseCmd.PARAM_ERROR, ipve.getMessage()); + } catch (CloudRuntimeException cre) { + throw new ServerApiException(BaseCmd.INTERNAL_ERROR, cre.getMessage()); + } } } diff --git a/server/src/com/cloud/api/commands/DeleteTrafficMonitorCmd.java b/server/src/com/cloud/api/commands/DeleteTrafficMonitorCmd.java index 4c7d3a70546..9e84f03d8e9 100644 --- a/server/src/com/cloud/api/commands/DeleteTrafficMonitorCmd.java +++ b/server/src/com/cloud/api/commands/DeleteTrafficMonitorCmd.java @@ -16,71 +16,70 @@ // under the License. package com.cloud.api.commands; -import org.apache.cloudstack.api.response.HostResponse; -import org.apache.log4j.Logger; +import javax.inject.Inject; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; -import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.log4j.Logger; + import com.cloud.exception.InvalidParameterValueException; import com.cloud.network.NetworkUsageManager; -import com.cloud.server.ManagementService; import com.cloud.user.Account; -import com.cloud.utils.component.ComponentLocator; @APICommand(name = "deleteTrafficMonitor", description="Deletes an traffic monitor host.", responseObject = SuccessResponse.class) public class DeleteTrafficMonitorCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTrafficMonitorCmd.class.getName()); - private static final String s_name = "deletetrafficmonitorresponse"; - - ///////////////////////////////////////////////////// + public static final Logger s_logger = Logger.getLogger(DeleteTrafficMonitorCmd.class.getName()); + private static final String s_name = "deletetrafficmonitorresponse"; + @Inject NetworkUsageManager _networkUsageMgr; + + ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - - @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType = HostResponse.class, - required = true, description="Id of the Traffic Monitor Host.") - private Long id; - - /////////////////////////////////////////////////// - /////////////////// Accessors /////////////////////// - ///////////////////////////////////////////////////// - - public Long getId() { - return id; - } - - ///////////////////////////////////////////////////// - /////////////// API Implementation/////////////////// - ///////////////////////////////////////////////////// - @Override - public String getCommandName() { - return s_name; - } - - @Override + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType = HostResponse.class, + required = true, description="Id of the Traffic Monitor Host.") + private Long id; + + /////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override public long getEntityOwnerId() { return Account.ACCOUNT_ID_SYSTEM; } - - @Override + + @Override public void execute(){ - try { - ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name); - NetworkUsageManager _networkUsageMgr = locator.getManager(NetworkUsageManager.class); - boolean result = _networkUsageMgr.deleteTrafficMonitor(this); - if (result) { - SuccessResponse response = new SuccessResponse(getCommandName()); - response.setResponseName(getCommandName()); - this.setResponseObject(response); - } else { - throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to delete traffic monitor."); - } - } catch (InvalidParameterValueException e) { - throw new ServerApiException(BaseCmd.PARAM_ERROR, "Failed to delete traffic monitor."); - } + try { + boolean result = _networkUsageMgr.deleteTrafficMonitor(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to delete traffic monitor."); + } + } catch (InvalidParameterValueException e) { + throw new ServerApiException(BaseCmd.PARAM_ERROR, "Failed to delete traffic monitor."); + } } } diff --git a/server/src/com/cloud/api/commands/ListTrafficMonitorsCmd.java b/server/src/com/cloud/api/commands/ListTrafficMonitorsCmd.java index 21ad339137a..645bf3b7307 100644 --- a/server/src/com/cloud/api/commands/ListTrafficMonitorsCmd.java +++ b/server/src/com/cloud/api/commands/ListTrafficMonitorsCmd.java @@ -19,26 +19,28 @@ package com.cloud.api.commands; import java.util.ArrayList; import java.util.List; +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.command.user.offering.ListServiceOfferingsCmd; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.TrafficMonitorResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.log4j.Logger; -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseListCmd; -import org.apache.cloudstack.api.APICommand; -import org.apache.cloudstack.api.Parameter; -import org.apache.cloudstack.api.response.ListResponse; import com.cloud.host.Host; import com.cloud.network.NetworkUsageManager; -import com.cloud.server.ManagementService; -import org.apache.cloudstack.api.response.TrafficMonitorResponse; -import com.cloud.utils.component.ComponentLocator; + @APICommand(name = "listTrafficMonitors", description="List traffic monitor Hosts.", responseObject = TrafficMonitorResponse.class) public class ListTrafficMonitorsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListServiceOfferingsCmd.class.getName()); + public static final Logger s_logger = Logger.getLogger(ListServiceOfferingsCmd.class.getName()); private static final String s_name = "listtrafficmonitorsresponse"; + @Inject NetworkUsageManager networkUsageMgr; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// @@ -66,17 +68,15 @@ public class ListTrafficMonitorsCmd extends BaseListCmd { @Override public void execute(){ - ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name); - NetworkUsageManager networkUsageMgr = locator.getManager(NetworkUsageManager.class); - List trafficMonitors = networkUsageMgr.listTrafficMonitors(this); + List trafficMonitors = networkUsageMgr.listTrafficMonitors(this); ListResponse listResponse = new ListResponse(); List responses = new ArrayList(); for (Host trafficMonitor : trafficMonitors) { TrafficMonitorResponse response = networkUsageMgr.getApiResponse(trafficMonitor); - response.setObjectName("trafficmonitor"); - response.setResponseName(getCommandName()); - responses.add(response); + response.setObjectName("trafficmonitor"); + response.setResponseName(getCommandName()); + responses.add(response); } listResponse.setResponses(responses); diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java index b61f10a1ade..6760dec2f44 100644 --- a/server/src/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/com/cloud/api/query/QueryManagerImpl.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.api.command.admin.host.ListHostsCmd; @@ -57,6 +58,7 @@ import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.query.QueryService; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.query.dao.AccountJoinDao; import com.cloud.api.query.dao.AsyncJobJoinDao; @@ -97,15 +99,13 @@ import com.cloud.ha.HighAvailabilityManager; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.security.SecurityGroupVMMapVO; import com.cloud.network.security.dao.SecurityGroupVMMapDao; -import com.cloud.projects.ProjectInvitation; -import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.projects.Project; +import com.cloud.projects.Project.ListProjectResourcesCriteria; +import com.cloud.projects.ProjectInvitation; import com.cloud.projects.ProjectManager; import com.cloud.projects.dao.ProjectAccountDao; import com.cloud.projects.dao.ProjectDao; import com.cloud.server.Criteria; -import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.Volume; import com.cloud.user.Account; import com.cloud.user.AccountManager; @@ -115,7 +115,6 @@ import com.cloud.user.dao.AccountDao; import com.cloud.utils.DateUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; -import com.cloud.utils.component.Inject; import com.cloud.utils.component.Manager; import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchBuilder; @@ -130,6 +129,7 @@ import com.cloud.vm.dao.UserVmDao; * @author minc * */ +@Component @Local(value = {QueryService.class }) public class QueryManagerImpl implements QueryService, Manager { @@ -137,7 +137,7 @@ public class QueryManagerImpl implements QueryService, Manager { private String _name; - // public static ViewResponseHelper _responseGenerator; + // public static ViewResponseHelper _responseGenerator; @Inject private AccountManager _accountMgr; @@ -214,8 +214,7 @@ public class QueryManagerImpl implements QueryService, Manager { @Override public boolean configure(String name, Map params) throws ConfigurationException { _name = name; - // _responseGenerator = new ViewResponseHelper(); - return false; + return true; } @Override @@ -717,7 +716,7 @@ public class QueryManagerImpl implements QueryService, Manager { if (tags != null && !tags.isEmpty()) { int count = 0; - for (String key : tags.keySet()) { + for (String key : tags.keySet()) { sc.setParameters("key" + String.valueOf(count), key); sc.setParameters("value" + String.valueOf(count), tags.get(key)); count++; @@ -883,10 +882,10 @@ public class QueryManagerImpl implements QueryService, Manager { if (tags != null && !tags.isEmpty()) { int count = 0; for (String key : tags.keySet()) { - sc.setParameters("key" + String.valueOf(count), key); - sc.setParameters("value" + String.valueOf(count), tags.get(key)); - count++; - } + sc.setParameters("key" + String.valueOf(count), key); + sc.setParameters("value" + String.valueOf(count), tags.get(key)); + count++; + } } if (securityGroup != null) { @@ -974,10 +973,10 @@ public class QueryManagerImpl implements QueryService, Manager { //Filter searchFilter = new Filter(DomainRouterJoinVO.class, null, true, cmd.getStartIndex(), cmd.getPageSizeVal()); SearchBuilder sb = _routerJoinDao.createSearchBuilder(); sb.select(null, Func.DISTINCT, sb.entity().getId()); // select distinct - // ids to get - // number of - // records with - // pagination + // ids to get + // number of + // records with + // pagination _accountMgr.buildACLViewSearchBuilder(sb, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); sb.and("name", sb.entity().getHostName(), SearchCriteria.Op.LIKE); @@ -1095,7 +1094,7 @@ public class QueryManagerImpl implements QueryService, Manager { Filter searchFilter = new Filter(ProjectJoinVO.class, "id", false, startIndex, pageSize); SearchBuilder sb = _projectJoinDao.createSearchBuilder(); sb.select(null, Func.DISTINCT, sb.entity().getId()); // select distinct - // ids + // ids if (_accountMgr.isAdmin(caller.getType())) { if (domainId != null) { @@ -1302,7 +1301,7 @@ public class QueryManagerImpl implements QueryService, Manager { Long startIndex = cmd.getStartIndex(); Long pageSizeVal = cmd.getPageSizeVal(); - //long projectId, String accountName, String role, Long startIndex, Long pageSizeVal) { + //long projectId, String accountName, String role, Long startIndex, Long pageSizeVal) { Account caller = UserContext.current().getCaller(); //check that the project exists @@ -1546,7 +1545,7 @@ public class QueryManagerImpl implements QueryService, Manager { if (tags != null && !tags.isEmpty()) { int count = 0; - for (String key : tags.keySet()) { + for (String key : tags.keySet()) { sc.setParameters("key" + String.valueOf(count), key); sc.setParameters("value" + String.valueOf(count), tags.get(key)); count++; diff --git a/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java index cdfac3a8247..96b91df79f9 100644 --- a/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java @@ -20,21 +20,19 @@ import java.util.ArrayList; import java.util.List; import javax.ejb.Local; +import javax.inject.Inject; +import org.apache.cloudstack.api.response.DomainRouterResponse; +import org.apache.cloudstack.api.response.NicResponse; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.DomainRouterJoinVO; import com.cloud.configuration.dao.ConfigurationDao; - -import org.apache.cloudstack.api.response.DomainRouterResponse; -import org.apache.cloudstack.api.response.NicResponse; -import org.springframework.stereotype.Component; - import com.cloud.network.Networks.TrafficType; import com.cloud.network.router.VirtualRouter; import com.cloud.user.Account; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -47,9 +45,9 @@ public class DomainRouterJoinDaoImpl extends GenericDaoBase vrSearch; + private final SearchBuilder vrSearch; - private SearchBuilder vrIdSearch; + private final SearchBuilder vrIdSearch; protected DomainRouterJoinDaoImpl() { diff --git a/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java index 9a7ba468227..fa7618cbd3f 100644 --- a/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java @@ -25,21 +25,19 @@ import java.util.List; import java.util.Set; import javax.ejb.Local; +import javax.inject.Inject; +import org.apache.cloudstack.api.ApiConstants.HostDetails; +import org.apache.cloudstack.api.response.HostResponse; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.vo.HostJoinVO; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.host.Host; import com.cloud.host.HostStats; - -import org.apache.cloudstack.api.ApiConstants.HostDetails; -import org.apache.cloudstack.api.response.HostResponse; -import org.springframework.stereotype.Component; - import com.cloud.storage.StorageStats; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -52,9 +50,9 @@ public class HostJoinDaoImpl extends GenericDaoBase implements @Inject private ConfigurationDao _configDao; - private SearchBuilder hostSearch; + private final SearchBuilder hostSearch; - private SearchBuilder hostIdSearch; + private final SearchBuilder hostIdSearch; protected HostJoinDaoImpl() { @@ -97,14 +95,14 @@ public class HostJoinDaoImpl extends GenericDaoBase implements if (details.contains(HostDetails.all) || details.contains(HostDetails.capacity) || details.contains(HostDetails.stats) || details.contains(HostDetails.events)) { - hostResponse.setOsCategoryId(host.getOsCategoryUuid()); - hostResponse.setOsCategoryName(host.getOsCategoryName()); - hostResponse.setZoneName(host.getZoneName()); - hostResponse.setPodName(host.getPodName()); - if ( host.getClusterId() > 0) { - hostResponse.setClusterName(host.getClusterName()); - hostResponse.setClusterType(host.getClusterType().toString()); - } + hostResponse.setOsCategoryId(host.getOsCategoryUuid()); + hostResponse.setOsCategoryName(host.getOsCategoryName()); + hostResponse.setZoneName(host.getZoneName()); + hostResponse.setPodName(host.getPodName()); + if ( host.getClusterId() > 0) { + hostResponse.setClusterName(host.getClusterName()); + hostResponse.setClusterType(host.getClusterType().toString()); + } } DecimalFormat decimalFormat = new DecimalFormat("#.##"); diff --git a/server/src/com/cloud/api/query/dao/ProjectJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/ProjectJoinDaoImpl.java index 77f930343bf..5b2a350a56b 100644 --- a/server/src/com/cloud/api/query/dao/ProjectJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/ProjectJoinDaoImpl.java @@ -20,19 +20,17 @@ import java.util.ArrayList; import java.util.List; import javax.ejb.Local; +import javax.inject.Inject; +import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.vo.ProjectJoinVO; import com.cloud.api.query.vo.ResourceTagJoinVO; import com.cloud.configuration.dao.ConfigurationDao; - -import org.apache.cloudstack.api.response.ProjectResponse; -import org.springframework.stereotype.Component; - import com.cloud.projects.Project; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -45,9 +43,9 @@ public class ProjectJoinDaoImpl extends GenericDaoBase impl @Inject private ConfigurationDao _configDao; - private SearchBuilder prjSearch; + private final SearchBuilder prjSearch; - private SearchBuilder prjIdSearch; + private final SearchBuilder prjIdSearch; protected ProjectJoinDaoImpl() { diff --git a/server/src/com/cloud/api/query/dao/ResourceTagJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/ResourceTagJoinDaoImpl.java index 2d86ca03d7c..76316577525 100644 --- a/server/src/com/cloud/api/query/dao/ResourceTagJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/ResourceTagJoinDaoImpl.java @@ -20,18 +20,16 @@ import java.util.ArrayList; import java.util.List; import javax.ejb.Local; +import javax.inject.Inject; +import org.apache.cloudstack.api.response.ResourceTagResponse; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.ResourceTagJoinVO; import com.cloud.configuration.dao.ConfigurationDao; - -import org.apache.cloudstack.api.response.ResourceTagResponse; -import org.springframework.stereotype.Component; - import com.cloud.server.ResourceTag; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -44,9 +42,9 @@ public class ResourceTagJoinDaoImpl extends GenericDaoBase tagSearch; + private final SearchBuilder tagSearch; - private SearchBuilder tagIdSearch; + private final SearchBuilder tagIdSearch; protected ResourceTagJoinDaoImpl() { diff --git a/server/src/com/cloud/api/query/dao/SecurityGroupJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/SecurityGroupJoinDaoImpl.java index f6847aa9d2b..3e579c179e2 100644 --- a/server/src/com/cloud/api/query/dao/SecurityGroupJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/SecurityGroupJoinDaoImpl.java @@ -20,23 +20,21 @@ import java.util.ArrayList; import java.util.List; import javax.ejb.Local; +import javax.inject.Inject; +import org.apache.cloudstack.api.response.SecurityGroupResponse; +import org.apache.cloudstack.api.response.SecurityGroupRuleResponse; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.ResourceTagJoinVO; import com.cloud.api.query.vo.SecurityGroupJoinVO; import com.cloud.configuration.dao.ConfigurationDao; - -import org.apache.cloudstack.api.response.SecurityGroupResponse; -import org.apache.cloudstack.api.response.SecurityGroupRuleResponse; -import org.springframework.stereotype.Component; - import com.cloud.network.security.SecurityGroup; import com.cloud.network.security.SecurityRule.SecurityRuleType; import com.cloud.user.Account; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -49,9 +47,9 @@ public class SecurityGroupJoinDaoImpl extends GenericDaoBase sgSearch; + private final SearchBuilder sgSearch; - private SearchBuilder sgIdSearch; + private final SearchBuilder sgIdSearch; protected SecurityGroupJoinDaoImpl() { diff --git a/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java index 53a0ffed445..66aecc212d8 100644 --- a/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java @@ -18,19 +18,19 @@ package com.cloud.api.query.dao; import java.util.ArrayList; import java.util.List; -import javax.ejb.Local; +import javax.ejb.Local; +import javax.inject.Inject; + +import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.vo.StoragePoolJoinVO; import com.cloud.configuration.dao.ConfigurationDao; -import org.apache.cloudstack.api.response.StoragePoolResponse; -import org.springframework.stereotype.Component; - import com.cloud.storage.StoragePool; import com.cloud.storage.StorageStats; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -44,9 +44,9 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase spSearch; + private final SearchBuilder spSearch; - private SearchBuilder spIdSearch; + private final SearchBuilder spIdSearch; protected StoragePoolJoinDaoImpl() { diff --git a/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index ce3d8083767..6f5587f87ea 100644 --- a/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -24,23 +24,21 @@ import java.util.List; import java.util.Set; import javax.ejb.Local; - -import org.apache.log4j.Logger; - -import com.cloud.api.ApiDBUtils; -import com.cloud.api.query.vo.ResourceTagJoinVO; -import com.cloud.api.query.vo.UserVmJoinVO; -import com.cloud.configuration.dao.ConfigurationDao; +import javax.inject.Inject; import org.apache.cloudstack.api.ApiConstants.VMDetails; import org.apache.cloudstack.api.response.NicResponse; import org.apache.cloudstack.api.response.SecurityGroupResponse; import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.api.ApiDBUtils; +import com.cloud.api.query.vo.ResourceTagJoinVO; +import com.cloud.api.query.vo.UserVmJoinVO; +import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.user.Account; import com.cloud.uservm.UserVm; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -55,7 +53,7 @@ public class UserVmJoinDaoImpl extends GenericDaoBase implem @Inject private ConfigurationDao _configDao; - private SearchBuilder VmDetailSearch; + private final SearchBuilder VmDetailSearch; protected UserVmJoinDaoImpl() { @@ -68,6 +66,7 @@ public class UserVmJoinDaoImpl extends GenericDaoBase implem } + @Override public UserVmResponse newUserVmResponse(String objectName, UserVmJoinVO userVm, EnumSet details, Account caller) { UserVmResponse userVmResponse = new UserVmResponse(); @@ -216,8 +215,9 @@ public class UserVmJoinDaoImpl extends GenericDaoBase implem userVmResponse.setObjectName(objectName); return userVmResponse; - } + } + @Override public UserVmResponse setUserVmResponse(UserVmResponse userVmData, UserVmJoinVO uvo) { Long securityGroupId = uvo.getSecurityGroupId(); if (securityGroupId != null && securityGroupId.longValue() != 0) { diff --git a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java index 65ecd1bc57e..495c0ebc18c 100644 --- a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java @@ -20,26 +20,24 @@ import java.util.ArrayList; import java.util.List; import javax.ejb.Local; +import javax.inject.Inject; +import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.ResourceTagJoinVO; import com.cloud.api.query.vo.VolumeJoinVO; import com.cloud.configuration.dao.ConfigurationDao; - -import org.apache.cloudstack.api.response.VolumeResponse; -import org.springframework.stereotype.Component; - import com.cloud.offering.ServiceOffering; import com.cloud.storage.Storage; import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.Volume; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.Volume; import com.cloud.user.Account; import com.cloud.user.UserContext; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -53,9 +51,9 @@ public class VolumeJoinDaoImpl extends GenericDaoBase implem @Inject private ConfigurationDao _configDao; - private SearchBuilder volSearch; + private final SearchBuilder volSearch; - private SearchBuilder volIdSearch; + private final SearchBuilder volIdSearch; protected VolumeJoinDaoImpl() { @@ -176,12 +174,12 @@ public class VolumeJoinDaoImpl extends GenericDaoBase implem volResponse.setDestroyed(volume.getState() == Volume.State.Destroy); boolean isExtractable = true; if (volume.getVolumeType() != Volume.Type.DATADISK) { // Datadisk dont - // have any - // template - // dependence. + // have any + // template + // dependence. if (volume.getTemplateId() > 0) { // For ISO based volumes template - // = null and we allow extraction - // of all ISO based volumes + // = null and we allow extraction + // of all ISO based volumes isExtractable = volume.isExtractable() && volume.getTemplateType() != Storage.TemplateType.SYSTEM; } } diff --git a/server/src/com/cloud/api/response/ApiResponseSerializer.java b/server/src/com/cloud/api/response/ApiResponseSerializer.java index 470cc5f9587..11aee3d9390 100644 --- a/server/src/com/cloud/api/response/ApiResponseSerializer.java +++ b/server/src/com/cloud/api/response/ApiResponseSerializer.java @@ -37,7 +37,6 @@ import com.cloud.api.ApiResponseGsonHelper; import com.cloud.api.ApiServer; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.ResponseObject; -import com.cloud.utils.IdentityProxy; import com.cloud.utils.encoding.URLEncoder; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.uuididentity.dao.IdentityDao; @@ -226,27 +225,17 @@ public class ApiResponseSerializer { subObj.setObjectName(serializedName.value()); } serializeResponseObjXML(sb, subObj); - } else if (value instanceof IdentityProxy) { - // Only exception reponses carry a list of IdentityProxy objects. - IdentityProxy idProxy = (IdentityProxy)value; - String id = (idProxy.getValue() != null ? String.valueOf(idProxy.getValue()) : ""); - if(!id.isEmpty()) { - IdentityDao identityDao = new IdentityDaoImpl(); - id = identityDao.getIdentityUuid(idProxy.getTableName(), id); - } - if(id != null && !id.isEmpty()) { - // If this is the first IdentityProxy field encountered, put in a uuidList tag. - if (!usedUuidList) { - sb.append("<").append(serializedName.value()).append(">"); - usedUuidList = true; - } - sb.append("").append(id).append(""); - } - // Append the new idFieldName property also. - String idFieldName = idProxy.getidFieldName(); - if (idFieldName != null) { - sb.append("").append(idFieldName).append(""); - } + } else { + // Only exception reponses carry a list of uuid + // strings. + // If this is the first IdentityProxy field + // encountered, put in a uuidList tag. + if (!usedUuidList) { + sb.append("<").append(serializedName.value()).append(">"); + usedUuidList = true; + } + sb.append("").append(value).append(""); + // We have removed uuid property field due to removal of IdentityProxy class. } } if (usedUuidList) { @@ -256,19 +245,6 @@ public class ApiResponseSerializer { } else if (fieldValue instanceof Date) { sb.append("<").append(serializedName.value()).append(">").append(BaseCmd.getDateString((Date) fieldValue)). append(""); - } else if (fieldValue instanceof IdentityProxy) { - IdentityProxy idProxy = (IdentityProxy)fieldValue; - String id = (idProxy.getValue() != null ? String.valueOf(idProxy.getValue()) : ""); - if(!id.isEmpty()) { - IdentityDao identityDao = new IdentityDaoImpl(); - if(idProxy.getTableName() != null) { - id = identityDao.getIdentityUuid(idProxy.getTableName(), id); - } else { - s_logger.warn("IdentityProxy sanity check issue, invalid IdentityProxy table name found in class: " + obj.getClass().getName()); - } - } - if(id != null && !id.isEmpty()) - sb.append("<").append(serializedName.value()).append(">").append(id).append(""); } else { String resultString = escapeSpecialXmlChars(fieldValue.toString()); if (!(obj instanceof ExceptionResponse)) { diff --git a/server/src/com/cloud/async/AsyncJobExecutorContextImpl.java b/server/src/com/cloud/async/AsyncJobExecutorContextImpl.java index ed6441994eb..41814480d64 100644 --- a/server/src/com/cloud/async/AsyncJobExecutorContextImpl.java +++ b/server/src/com/cloud/async/AsyncJobExecutorContextImpl.java @@ -36,7 +36,6 @@ import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.user.AccountManager; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; -import com.cloud.utils.component.ComponentLocator; import com.cloud.vm.UserVmManager; import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.dao.DomainRouterDao; @@ -45,8 +44,8 @@ import com.cloud.vm.dao.UserVmDao; @Component @Local(value={AsyncJobExecutorContext.class}) public class AsyncJobExecutorContextImpl implements AsyncJobExecutorContext { - private String _name; - + private String _name; + @Inject private AgentManager _agentMgr; @Inject private NetworkManager _networkMgr; @Inject private UserVmManager _vmMgr; @@ -62,98 +61,98 @@ public class AsyncJobExecutorContextImpl implements AsyncJobExecutorContext { @Inject private AsyncJobDao _jobDao; @Inject private UserDao _userDao; @Inject private VirtualMachineManager _itMgr; - - @Inject private ManagementServer _managementServer; - - @Override - public ManagementServer getManagementServer() { - return _managementServer; - } - @Override - public AgentManager getAgentMgr() { - return _agentMgr; - } - - @Override - public NetworkManager getNetworkMgr() { - return _networkMgr; - } - - @Override - public UserVmManager getVmMgr() { - return _vmMgr; - } - - @Override - public StorageManager getStorageMgr() { - return _storageMgr; - } - - /**server/src/com/cloud/async/AsyncJobExecutorContext.java + @Inject private ManagementServer _managementServer; + + @Override + public ManagementServer getManagementServer() { + return _managementServer; + } + + @Override + public AgentManager getAgentMgr() { + return _agentMgr; + } + + @Override + public NetworkManager getNetworkMgr() { + return _networkMgr; + } + + @Override + public UserVmManager getVmMgr() { + return _vmMgr; + } + + @Override + public StorageManager getStorageMgr() { + return _storageMgr; + } + + /**server/src/com/cloud/async/AsyncJobExecutorContext.java * @return the _snapMgr */ - @Override + @Override public SnapshotManager getSnapshotMgr() { return _snapMgr; } @Override - public AccountManager getAccountMgr() { - return _accountMgr; - } - - @Override - public EventDao getEventDao() { - return _eventDao; - } - - @Override - public UserVmDao getVmDao() { - return _vmDao; - } - - @Override - public AccountDao getAccountDao() { - return _accountDao; - } - - @Override - public VolumeDao getVolumeDao() { - return _volumeDao; - } + public AccountManager getAccountMgr() { + return _accountMgr; + } - @Override + @Override + public EventDao getEventDao() { + return _eventDao; + } + + @Override + public UserVmDao getVmDao() { + return _vmDao; + } + + @Override + public AccountDao getAccountDao() { + return _accountDao; + } + + @Override + public VolumeDao getVolumeDao() { + return _volumeDao; + } + + @Override public DomainRouterDao getRouterDao() { - return _routerDao; - } - - @Override + return _routerDao; + } + + @Override public IPAddressDao getIpAddressDao() { - return _ipAddressDao; + return _ipAddressDao; } - - @Override + + @Override public AsyncJobDao getJobDao() { - return _jobDao; + return _jobDao; } - - @Override + + @Override public UserDao getUserDao() { - return _userDao; + return _userDao; } - - @Override - public VirtualMachineManager getItMgr() { - return _itMgr; - } - + + @Override + public VirtualMachineManager getItMgr() { + return _itMgr; + } + @Override public boolean configure(String name, Map params) throws ConfigurationException { - _name = name; - return true; + _name = name; + return true; } - + @Override public boolean start() { return true; @@ -163,9 +162,9 @@ public class AsyncJobExecutorContextImpl implements AsyncJobExecutorContext { public boolean stop() { return true; } - + @Override public String getName() { - return _name; + return _name; } } diff --git a/server/src/com/cloud/async/AsyncJobManagerImpl.java b/server/src/com/cloud/async/AsyncJobManagerImpl.java index 25b7e6086fd..1446236445a 100644 --- a/server/src/com/cloud/async/AsyncJobManagerImpl.java +++ b/server/src/com/cloud/async/AsyncJobManagerImpl.java @@ -35,7 +35,11 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.user.job.QueryAsyncJobResultCmd; +import org.apache.cloudstack.api.response.ExceptionResponse; import org.apache.log4j.Logger; import org.apache.log4j.NDC; import org.springframework.stereotype.Component; @@ -43,15 +47,10 @@ import org.springframework.stereotype.Component; import com.cloud.api.ApiDispatcher; import com.cloud.api.ApiGsonHelper; import com.cloud.api.ApiSerializerHelper; -import org.apache.cloudstack.api.BaseAsyncCmd; -import org.apache.cloudstack.api.BaseCmd; -import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.api.response.ExceptionResponse; import com.cloud.async.dao.AsyncJobDao; import com.cloud.cluster.ClusterManager; import com.cloud.cluster.ClusterManagerListener; import com.cloud.cluster.ManagementServerHostVO; -import com.cloud.cluster.StackMaid; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.exception.InvalidParameterValueException; @@ -64,7 +63,6 @@ import com.cloud.user.dao.AccountDao; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.GlobalLock; @@ -80,14 +78,14 @@ import com.google.gson.reflect.TypeToken; @Local(value={AsyncJobManager.class}) public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListener { public static final Logger s_logger = Logger.getLogger(AsyncJobManagerImpl.class.getName()); - private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 seconds - + private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 seconds + private static final int MAX_ONETIME_SCHEDULE_SIZE = 50; private static final int HEARTBEAT_INTERVAL = 2000; private static final int GC_INTERVAL = 10000; // 10 seconds - + private String _name; - + @Inject private AsyncJobExecutorContext _context; @Inject private SyncQueueManager _queueMgr; @Inject private ClusterManager _clusterMgr; @@ -97,201 +95,201 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe @Inject private ConfigurationDao _configDao; private long _jobExpireSeconds = 86400; // 1 day private long _jobCancelThresholdSeconds = 3600; // 1 hour (for cancelling the jobs blocking other jobs) - - private ApiDispatcher _dispatcher; + + @Inject private ApiDispatcher _dispatcher; private final ScheduledExecutorService _heartbeatScheduler = - Executors.newScheduledThreadPool(1, new NamedThreadFactory("AsyncJobMgr-Heartbeat")); + Executors.newScheduledThreadPool(1, new NamedThreadFactory("AsyncJobMgr-Heartbeat")); private ExecutorService _executor; @Override - public AsyncJobExecutorContext getExecutorContext() { - return _context; - } - - @Override - public AsyncJobVO getAsyncJob(long jobId) { - return _jobDao.findById(jobId); + public AsyncJobExecutorContext getExecutorContext() { + return _context; } - + @Override - public AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId) { - return _jobDao.findInstancePendingAsyncJob(instanceType, instanceId); + public AsyncJobVO getAsyncJob(long jobId) { + return _jobDao.findById(jobId); + } + + @Override + public AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId) { + return _jobDao.findInstancePendingAsyncJob(instanceType, instanceId); } - + @Override public List findInstancePendingAsyncJobs(AsyncJob.Type instanceType, Long accountId) { - return _jobDao.findInstancePendingAsyncJobs(instanceType, accountId); + return _jobDao.findInstancePendingAsyncJobs(instanceType, accountId); } - + @Override - public long submitAsyncJob(AsyncJobVO job) { - return submitAsyncJob(job, false); + public long submitAsyncJob(AsyncJobVO job) { + return submitAsyncJob(job, false); } @Override @DB public long submitAsyncJob(AsyncJobVO job, boolean scheduleJobExecutionInContext) { - Transaction txt = Transaction.currentTxn(); - try { - txt.start(); - job.setInitMsid(getMsid()); - _jobDao.persist(job); - txt.commit(); + Transaction txt = Transaction.currentTxn(); + try { + txt.start(); + job.setInitMsid(getMsid()); + _jobDao.persist(job); + txt.commit(); - // no sync source originally - job.setSyncSource(null); - scheduleExecution(job, scheduleJobExecutionInContext); - if(s_logger.isDebugEnabled()) { + // no sync source originally + job.setSyncSource(null); + scheduleExecution(job, scheduleJobExecutionInContext); + if(s_logger.isDebugEnabled()) { s_logger.debug("submit async job-" + job.getId() + ", details: " + job.toString()); } - return job.getId(); - } catch(Exception e) { - txt.rollback(); - String errMsg = "Unable to schedule async job for command " + job.getCmd() + ", unexpected exception."; + return job.getId(); + } catch(Exception e) { + txt.rollback(); + String errMsg = "Unable to schedule async job for command " + job.getCmd() + ", unexpected exception."; s_logger.warn(errMsg, e); throw new CloudRuntimeException(errMsg); - } + } } @Override @DB public void completeAsyncJob(long jobId, int jobStatus, int resultCode, Object resultObject) { - if(s_logger.isDebugEnabled()) { + if(s_logger.isDebugEnabled()) { s_logger.debug("Complete async job-" + jobId + ", jobStatus: " + jobStatus + - ", resultCode: " + resultCode + ", result: " + resultObject); + ", resultCode: " + resultCode + ", result: " + resultObject); } - - Transaction txt = Transaction.currentTxn(); - try { - txt.start(); - AsyncJobVO job = _jobDao.findById(jobId); - if(job == null) { - if(s_logger.isDebugEnabled()) { + + Transaction txt = Transaction.currentTxn(); + try { + txt.start(); + AsyncJobVO job = _jobDao.findById(jobId); + if(job == null) { + if(s_logger.isDebugEnabled()) { s_logger.debug("job-" + jobId + " no longer exists, we just log completion info here. " + jobStatus + - ", resultCode: " + resultCode + ", result: " + resultObject); + ", resultCode: " + resultCode + ", result: " + resultObject); } - - txt.rollback(); - return; - } + + txt.rollback(); + return; + } - job.setCompleteMsid(getMsid()); - job.setStatus(jobStatus); - job.setResultCode(resultCode); + job.setCompleteMsid(getMsid()); + job.setStatus(jobStatus); + job.setResultCode(resultCode); - // reset attached object - job.setInstanceType(null); - job.setInstanceId(null); + // reset attached object + job.setInstanceType(null); + job.setInstanceId(null); - if (resultObject != null) { + if (resultObject != null) { job.setResult(ApiSerializerHelper.toSerializedStringOld(resultObject)); - } + } - job.setLastUpdated(DateUtil.currentGMTTime()); - _jobDao.update(jobId, job); - txt.commit(); - } catch(Exception e) { - s_logger.error("Unexpected exception while completing async job-" + jobId, e); - txt.rollback(); - } + job.setLastUpdated(DateUtil.currentGMTTime()); + _jobDao.update(jobId, job); + txt.commit(); + } catch(Exception e) { + s_logger.error("Unexpected exception while completing async job-" + jobId, e); + txt.rollback(); + } } @Override @DB public void updateAsyncJobStatus(long jobId, int processStatus, Object resultObject) { - if(s_logger.isDebugEnabled()) { + if(s_logger.isDebugEnabled()) { s_logger.debug("Update async-job progress, job-" + jobId + ", processStatus: " + processStatus + - ", result: " + resultObject); + ", result: " + resultObject); } - - Transaction txt = Transaction.currentTxn(); - try { - txt.start(); - AsyncJobVO job = _jobDao.findById(jobId); - if(job == null) { - if(s_logger.isDebugEnabled()) { + + Transaction txt = Transaction.currentTxn(); + try { + txt.start(); + AsyncJobVO job = _jobDao.findById(jobId); + if(job == null) { + if(s_logger.isDebugEnabled()) { s_logger.debug("job-" + jobId + " no longer exists, we just log progress info here. progress status: " + processStatus); } - - txt.rollback(); - return; - } - - job.setProcessStatus(processStatus); - if(resultObject != null) { + + txt.rollback(); + return; + } + + job.setProcessStatus(processStatus); + if(resultObject != null) { job.setResult(ApiSerializerHelper.toSerializedStringOld(resultObject)); } - job.setLastUpdated(DateUtil.currentGMTTime()); - _jobDao.update(jobId, job); - txt.commit(); - } catch(Exception e) { - s_logger.error("Unexpected exception while updating async job-" + jobId + " status: ", e); - txt.rollback(); - } + job.setLastUpdated(DateUtil.currentGMTTime()); + _jobDao.update(jobId, job); + txt.commit(); + } catch(Exception e) { + s_logger.error("Unexpected exception while updating async job-" + jobId + " status: ", e); + txt.rollback(); + } } @Override @DB public void updateAsyncJobAttachment(long jobId, String instanceType, Long instanceId) { - if(s_logger.isDebugEnabled()) { + if(s_logger.isDebugEnabled()) { s_logger.debug("Update async-job attachment, job-" + jobId + ", instanceType: " + instanceType + - ", instanceId: " + instanceId); + ", instanceId: " + instanceId); } - - Transaction txt = Transaction.currentTxn(); - try { - txt.start(); + + Transaction txt = Transaction.currentTxn(); + try { + txt.start(); - AsyncJobVO job = _jobDao.createForUpdate(); - //job.setInstanceType(instanceType); - job.setInstanceId(instanceId); - job.setLastUpdated(DateUtil.currentGMTTime()); - _jobDao.update(jobId, job); + AsyncJobVO job = _jobDao.createForUpdate(); + //job.setInstanceType(instanceType); + job.setInstanceId(instanceId); + job.setLastUpdated(DateUtil.currentGMTTime()); + _jobDao.update(jobId, job); - txt.commit(); - } catch(Exception e) { - s_logger.error("Unexpected exception while updating async job-" + jobId + " attachment: ", e); - txt.rollback(); - } + txt.commit(); + } catch(Exception e) { + s_logger.error("Unexpected exception while updating async job-" + jobId + " attachment: ", e); + txt.rollback(); + } } @Override public void syncAsyncJobExecution(AsyncJob job, String syncObjType, long syncObjId, long queueSizeLimit) { - // This method is re-entrant. If an API developer wants to synchronized on an object, e.g. the router, - // when executing business logic, they will call this method (actually a method in BaseAsyncCmd that calls this). - // This method will get called every time their business logic executes. The first time it exectues for a job - // there will be no sync source, but on subsequent execution there will be a sync souce. If this is the first - // time the job executes we queue the job, otherwise we just return so that the business logic can execute. + // This method is re-entrant. If an API developer wants to synchronized on an object, e.g. the router, + // when executing business logic, they will call this method (actually a method in BaseAsyncCmd that calls this). + // This method will get called every time their business logic executes. The first time it exectues for a job + // there will be no sync source, but on subsequent execution there will be a sync souce. If this is the first + // time the job executes we queue the job, otherwise we just return so that the business logic can execute. if (job.getSyncSource() != null) { return; } - + if(s_logger.isDebugEnabled()) { s_logger.debug("Sync job-" + job.getId() + " execution on object " + syncObjType + "." + syncObjId); } - SyncQueueVO queue = null; + SyncQueueVO queue = null; - // to deal with temporary DB exceptions like DB deadlock/Lock-wait time out cased rollbacks - // we retry five times until we throw an exception - Random random = new Random(); + // to deal with temporary DB exceptions like DB deadlock/Lock-wait time out cased rollbacks + // we retry five times until we throw an exception + Random random = new Random(); - for(int i = 0; i < 5; i++) { + for(int i = 0; i < 5; i++) { queue = _queueMgr.queue(syncObjType, syncObjId, SyncQueueItem.AsyncJobContentType, job.getId(), queueSizeLimit); - if(queue != null) { + if(queue != null) { break; } - try { - Thread.sleep(1000 + random.nextInt(5000)); - } catch (InterruptedException e) { - } - } + try { + Thread.sleep(1000 + random.nextInt(5000)); + } catch (InterruptedException e) { + } + } - if (queue == null) { + if (queue == null) { throw new CloudRuntimeException("Unable to insert queue item into database, DB is full?"); - } else { - throw new AsyncCommandQueued(queue, "job-" + job.getId() + " queued"); - } + } else { + throw new AsyncCommandQueued(queue, "job-" + job.getId() + " queued"); + } } - + @Override public AsyncJob queryAsyncJobResult(QueryAsyncJobResultCmd cmd) { Account caller = UserContext.current().getCaller(); @@ -300,10 +298,10 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe if (job == null) { throw new InvalidParameterValueException("Unable to find a job by id " + cmd.getId()); } - + User userJobOwner = _accountMgr.getUserIncludingRemoved(job.getUserId()); Account jobOwner = _accountMgr.getAccount(userJobOwner.getAccountId()); - + //check permissions if (caller.getType() == Account.ACCOUNT_TYPE_NORMAL) { //regular user can see only jobs he owns @@ -313,7 +311,7 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe } else if (caller.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN) { _accountMgr.checkAccess(caller, null, true, jobOwner); } - + //poll the job queryAsyncJobResult(cmd.getId()); return _jobDao.findById(cmd.getId()); @@ -321,56 +319,56 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe @Override @DB public AsyncJobResult queryAsyncJobResult(long jobId) { - if(s_logger.isTraceEnabled()) { + if(s_logger.isTraceEnabled()) { s_logger.trace("Query async-job status, job-" + jobId); } - - Transaction txt = Transaction.currentTxn(); - AsyncJobResult jobResult = new AsyncJobResult(jobId); - - try { - txt.start(); - AsyncJobVO job = _jobDao.findById(jobId); - if(job != null) { - jobResult.setCmdOriginator(job.getCmdOriginator()); - jobResult.setJobStatus(job.getStatus()); - jobResult.setProcessStatus(job.getProcessStatus()); - jobResult.setResult(job.getResult()); - jobResult.setResultCode(job.getResultCode()); - jobResult.setUuid(job.getUuid()); - - if(job.getStatus() == AsyncJobResult.STATUS_SUCCEEDED || - job.getStatus() == AsyncJobResult.STATUS_FAILED) { - - if(s_logger.isDebugEnabled()) { + + Transaction txt = Transaction.currentTxn(); + AsyncJobResult jobResult = new AsyncJobResult(jobId); + + try { + txt.start(); + AsyncJobVO job = _jobDao.findById(jobId); + if(job != null) { + jobResult.setCmdOriginator(job.getCmdOriginator()); + jobResult.setJobStatus(job.getStatus()); + jobResult.setProcessStatus(job.getProcessStatus()); + jobResult.setResult(job.getResult()); + jobResult.setResultCode(job.getResultCode()); + jobResult.setUuid(job.getUuid()); + + if(job.getStatus() == AsyncJobResult.STATUS_SUCCEEDED || + job.getStatus() == AsyncJobResult.STATUS_FAILED) { + + if(s_logger.isDebugEnabled()) { s_logger.debug("Async job-" + jobId + " completed"); } - } else { - job.setLastPolled(DateUtil.currentGMTTime()); - _jobDao.update(jobId, job); - } - } else { - if(s_logger.isDebugEnabled()) { + } else { + job.setLastPolled(DateUtil.currentGMTTime()); + _jobDao.update(jobId, job); + } + } else { + if(s_logger.isDebugEnabled()) { s_logger.debug("Async job-" + jobId + " does not exist, invalid job id?"); } - - jobResult.setJobStatus(AsyncJobResult.STATUS_FAILED); - jobResult.setResult("job-" + jobId + " does not exist"); - } - txt.commit(); - } catch(Exception e) { - s_logger.error("Unexpected exception while querying async job-" + jobId + " status: ", e); - - jobResult.setJobStatus(AsyncJobResult.STATUS_FAILED); - jobResult.setResult("Exception: " + e.toString()); - txt.rollback(); - } - - if(s_logger.isTraceEnabled()) { + + jobResult.setJobStatus(AsyncJobResult.STATUS_FAILED); + jobResult.setResult("job-" + jobId + " does not exist"); + } + txt.commit(); + } catch(Exception e) { + s_logger.error("Unexpected exception while querying async job-" + jobId + " status: ", e); + + jobResult.setJobStatus(AsyncJobResult.STATUS_FAILED); + jobResult.setResult("Exception: " + e.toString()); + txt.rollback(); + } + + if(s_logger.isTraceEnabled()) { s_logger.trace("Job status: " + jobResult.toString()); } - - return jobResult; + + return jobResult; } private void scheduleExecution(final AsyncJobVO job) { @@ -382,7 +380,7 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe if (executeInContext) { runnable.run(); } else { - _executor.submit(runnable); + _executor.submit(runnable); } } @@ -392,66 +390,66 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe public void run() { try { long jobId = 0; - + try { - JmxUtil.registerMBean("AsyncJobManager", "Active Job " + job.getId(), new AsyncJobMBeanImpl(job)); + JmxUtil.registerMBean("AsyncJobManager", "Active Job " + job.getId(), new AsyncJobMBeanImpl(job)); } catch(Exception e) { - s_logger.warn("Unable to register active job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e)); + s_logger.warn("Unable to register active job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e)); } - + BaseAsyncCmd cmdObj = null; Transaction txn = Transaction.open(Transaction.CLOUD_DB); try { jobId = job.getId(); NDC.push("job-" + jobId); - + if(s_logger.isDebugEnabled()) { s_logger.debug("Executing " + job.getCmd() + " for job-" + jobId); } - + Class cmdClass = Class.forName(job.getCmd()); cmdObj = (BaseAsyncCmd)cmdClass.newInstance(); cmdObj.setJob(job); - + Type mapType = new TypeToken>() {}.getType(); Gson gson = ApiGsonHelper.getBuilder().create(); Map params = gson.fromJson(job.getCmdInfo(), mapType); - + // whenever we deserialize, the UserContext needs to be updated String userIdStr = params.get("ctxUserId"); String acctIdStr = params.get("ctxAccountId"); Long userId = null; Account accountObject = null; - + if (userIdStr != null) { userId = Long.parseLong(userIdStr); } - + if (acctIdStr != null) { accountObject = _accountDao.findById(Long.parseLong(acctIdStr)); } - + UserContext.registerContext(userId, accountObject, null, false); try { // dispatch could ultimately queue the job _dispatcher.dispatch(cmdObj, params); - + // serialize this to the async job table completeAsyncJob(jobId, AsyncJobResult.STATUS_SUCCEEDED, 0, cmdObj.getResponseObject()); } finally { UserContext.unregisterContext(); } - + // commands might need to be queued as part of synchronization here, so they just have to be re-dispatched from the queue mechanism... if (job.getSyncSource() != null) { _queueMgr.purgeItem(job.getSyncSource().getId()); checkQueue(job.getSyncSource().getQueueId()); } - + if (s_logger.isDebugEnabled()) { s_logger.debug("Done executing " + job.getCmd() + " for job-" + jobId); } - + } catch(Throwable e) { if (e instanceof AsyncCommandQueued) { if (s_logger.isDebugEnabled()) { @@ -469,16 +467,16 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe errorMsg = sApiEx.getDescription(); errorCode = sApiEx.getErrorCode(); } - + ExceptionResponse response = new ExceptionResponse(); response.setErrorCode(errorCode); response.setErrorText(errorMsg); response.setResponseName((cmdObj == null) ? "unknowncommandresponse" : cmdObj.getCommandName()); - + // FIXME: setting resultCode to BaseCmd.INTERNAL_ERROR is not right, usually executors have their exception handling // and we need to preserve that as much as possible here completeAsyncJob(jobId, AsyncJobResult.STATUS_FAILED, BaseCmd.INTERNAL_ERROR, response); - + // need to clean up any queue that happened as part of the dispatching and move on to the next item in the queue try { if (job.getSyncSource() != null) { @@ -490,14 +488,13 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe } } } finally { - + try { - JmxUtil.unregisterMBean("AsyncJobManager", "Active Job " + job.getId()); + JmxUtil.unregisterMBean("AsyncJobManager", "Active Job " + job.getId()); } catch(Exception e) { - s_logger.warn("Unable to unregister active job " + job.getId() + " from JMX monitoring"); + s_logger.warn("Unable to unregister active job " + job.getId() + " from JMX monitoring"); } - - StackMaid.current().exitCleanup(); + txn.close(); NDC.pop(); } @@ -520,17 +517,17 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe job.setFromPreviousSession(fromPreviousSession); job.setSyncSource(item); - + job.setCompleteMsid(getMsid()); _jobDao.update(job.getId(), job); - + try { - scheduleExecution(job); - } catch(RejectedExecutionException e) { - s_logger.warn("Execution for job-" + job.getId() + " is rejected, return it to the queue for next turn"); - _queueMgr.returnItem(item.getId()); - } - + scheduleExecution(job); + } catch(RejectedExecutionException e) { + s_logger.warn("Execution for job-" + job.getId() + " is rejected, return it to the queue for next turn"); + _queueMgr.returnItem(item.getId()); + } + } else { if(s_logger.isDebugEnabled()) { s_logger.debug("Unable to find related job for queue item: " + item.toString()); @@ -542,121 +539,117 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe @Override public void releaseSyncSource(AsyncJobExecutor executor) { - if(executor.getSyncSource() != null) { - if(s_logger.isDebugEnabled()) { + if(executor.getSyncSource() != null) { + if(s_logger.isDebugEnabled()) { s_logger.debug("Release sync source for job-" + executor.getJob().getId() + " sync source: " - + executor.getSyncSource().getContentType() + "-" - + executor.getSyncSource().getContentId()); + + executor.getSyncSource().getContentType() + "-" + + executor.getSyncSource().getContentId()); } - - _queueMgr.purgeItem(executor.getSyncSource().getId()); - checkQueue(executor.getSyncSource().getQueueId()); - } + + _queueMgr.purgeItem(executor.getSyncSource().getId()); + checkQueue(executor.getSyncSource().getQueueId()); + } } - + private void checkQueue(long queueId) { - while(true) { - try { - SyncQueueItemVO item = _queueMgr.dequeueFromOne(queueId, getMsid()); - if(item != null) { - if(s_logger.isDebugEnabled()) { + while(true) { + try { + SyncQueueItemVO item = _queueMgr.dequeueFromOne(queueId, getMsid()); + if(item != null) { + if(s_logger.isDebugEnabled()) { s_logger.debug("Executing sync queue item: " + item.toString()); } - - executeQueueItem(item, false); - } else { - break; - } - } catch(Throwable e) { - s_logger.error("Unexpected exception when kicking sync queue-" + queueId, e); - break; - } - } + + executeQueueItem(item, false); + } else { + break; + } + } catch(Throwable e) { + s_logger.error("Unexpected exception when kicking sync queue-" + queueId, e); + break; + } + } } - - private Runnable getHeartbeatTask() { - return new Runnable() { - @Override + + private Runnable getHeartbeatTask() { + return new Runnable() { + @Override public void run() { - try { - List l = _queueMgr.dequeueFromAny(getMsid(), MAX_ONETIME_SCHEDULE_SIZE); - if(l != null && l.size() > 0) { - for(SyncQueueItemVO item: l) { - if(s_logger.isDebugEnabled()) { + try { + List l = _queueMgr.dequeueFromAny(getMsid(), MAX_ONETIME_SCHEDULE_SIZE); + if(l != null && l.size() > 0) { + for(SyncQueueItemVO item: l) { + if(s_logger.isDebugEnabled()) { s_logger.debug("Execute sync-queue item: " + item.toString()); } - executeQueueItem(item, false); - } - } - } catch(Throwable e) { - s_logger.error("Unexpected exception when trying to execute queue item, ", e); - } finally { - StackMaid.current().exitCleanup(); - } - } - }; - } - - @DB - private Runnable getGCTask() { - return new Runnable() { - @Override + executeQueueItem(item, false); + } + } + } catch(Throwable e) { + s_logger.error("Unexpected exception when trying to execute queue item, ", e); + } + } + }; + } + + @DB + private Runnable getGCTask() { + return new Runnable() { + @Override public void run() { - GlobalLock scanLock = GlobalLock.getInternLock("AsyncJobManagerGC"); - try { - if(scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { - try { - reallyRun(); - } finally { - scanLock.unlock(); - } - } - } finally { - scanLock.releaseRef(); - } - } - + GlobalLock scanLock = GlobalLock.getInternLock("AsyncJobManagerGC"); + try { + if(scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { + try { + reallyRun(); + } finally { + scanLock.unlock(); + } + } + } finally { + scanLock.releaseRef(); + } + } + public void reallyRun() { - try { - s_logger.trace("Begin cleanup expired async-jobs"); - - Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - _jobExpireSeconds*1000); - - // limit to 100 jobs per turn, this gives cleanup throughput as 600 jobs per minute - // hopefully this will be fast enough to balance potential growth of job table - List l = _jobDao.getExpiredJobs(cutTime, 100); - if(l != null && l.size() > 0) { - for(AsyncJobVO job : l) { + try { + s_logger.trace("Begin cleanup expired async-jobs"); + + Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - _jobExpireSeconds*1000); + + // limit to 100 jobs per turn, this gives cleanup throughput as 600 jobs per minute + // hopefully this will be fast enough to balance potential growth of job table + List l = _jobDao.getExpiredJobs(cutTime, 100); + if(l != null && l.size() > 0) { + for(AsyncJobVO job : l) { expungeAsyncJob(job); - } - } - + } + } + // forcefully cancel blocking queue items if they've been staying there for too long - List blockItems = _queueMgr.getBlockedQueueItems(_jobCancelThresholdSeconds*1000, false); - if(blockItems != null && blockItems.size() > 0) { - for(SyncQueueItemVO item : blockItems) { + List blockItems = _queueMgr.getBlockedQueueItems(_jobCancelThresholdSeconds*1000, false); + if(blockItems != null && blockItems.size() > 0) { + for(SyncQueueItemVO item : blockItems) { if(item.getContentType().equalsIgnoreCase(SyncQueueItem.AsyncJobContentType)) { completeAsyncJob(item.getContentId(), AsyncJobResult.STATUS_FAILED, 0, getResetResultResponse("Job is cancelled as it has been blocking others for too long")); } - - // purge the item and resume queue processing - _queueMgr.purgeItem(item.getId()); - } - } - - s_logger.trace("End cleanup expired async-jobs"); - } catch(Throwable e) { - s_logger.error("Unexpected exception when trying to execute queue item, ", e); - } finally { - StackMaid.current().exitCleanup(); - } - } - - }; - } - + // purge the item and resume queue processing + _queueMgr.purgeItem(item.getId()); + } + } + + s_logger.trace("End cleanup expired async-jobs"); + } catch(Throwable e) { + s_logger.error("Unexpected exception when trying to execute queue item, ", e); + } + } + + + }; + } + @DB protected void expungeAsyncJob(AsyncJobVO job) { Transaction txn = Transaction.currentTxn(); @@ -667,132 +660,129 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe txn.commit(); } - private long getMsid() { - if(_clusterMgr != null) { + private long getMsid() { + if(_clusterMgr != null) { return _clusterMgr.getManagementNodeId(); } - - return MacAddress.getMacAddress().toLong(); - } - - private void cleanupPendingJobs(List l) { - if(l != null && l.size() > 0) { - for(SyncQueueItemVO item: l) { - if(s_logger.isInfoEnabled()) { + + return MacAddress.getMacAddress().toLong(); + } + + private void cleanupPendingJobs(List l) { + if(l != null && l.size() > 0) { + for(SyncQueueItemVO item: l) { + if(s_logger.isInfoEnabled()) { s_logger.info("Discard left-over queue item: " + item.toString()); } - - String contentType = item.getContentType(); + + String contentType = item.getContentType(); if(contentType != null && contentType.equalsIgnoreCase(SyncQueueItem.AsyncJobContentType)) { - Long jobId = item.getContentId(); - if(jobId != null) { - s_logger.warn("Mark job as failed as its correspoding queue-item has been discarded. job id: " + jobId); - completeAsyncJob(jobId, AsyncJobResult.STATUS_FAILED, 0, getResetResultResponse("Execution was cancelled because of server shutdown")); - } - } - _queueMgr.purgeItem(item.getId()); - } - } - } - + Long jobId = item.getContentId(); + if(jobId != null) { + s_logger.warn("Mark job as failed as its correspoding queue-item has been discarded. job id: " + jobId); + completeAsyncJob(jobId, AsyncJobResult.STATUS_FAILED, 0, getResetResultResponse("Execution was cancelled because of server shutdown")); + } + } + _queueMgr.purgeItem(item.getId()); + } + } + } + @Override public boolean configure(String name, Map params) throws ConfigurationException { - _name = name; - - int expireMinutes = NumbersUtil.parseInt( - _configDao.getValue(Config.JobExpireMinutes.key()), 24*60); - _jobExpireSeconds = (long)expireMinutes*60; - - _jobCancelThresholdSeconds = NumbersUtil.parseInt( - _configDao.getValue(Config.JobCancelThresholdMinutes.key()), 60); - _jobCancelThresholdSeconds *= 60; + _name = name; - _dispatcher = ApiDispatcher.getInstance(); - + int expireMinutes = NumbersUtil.parseInt( + _configDao.getValue(Config.JobExpireMinutes.key()), 24*60); + _jobExpireSeconds = (long)expireMinutes*60; + + _jobCancelThresholdSeconds = NumbersUtil.parseInt( + _configDao.getValue(Config.JobCancelThresholdMinutes.key()), 60); + _jobCancelThresholdSeconds *= 60; + + try { + final File dbPropsFile = PropertiesUtil.findConfigFile("db.properties"); + final Properties dbProps = new Properties(); + dbProps.load(new FileInputStream(dbPropsFile)); - try { - final File dbPropsFile = PropertiesUtil.findConfigFile("db.properties"); - final Properties dbProps = new Properties(); - dbProps.load(new FileInputStream(dbPropsFile)); - final int cloudMaxActive = Integer.parseInt(dbProps.getProperty("db.cloud.maxActive")); - + int poolSize = (cloudMaxActive * 2) / 3; - + s_logger.info("Start AsyncJobManager thread pool in size " + poolSize); _executor = Executors.newFixedThreadPool(poolSize, new NamedThreadFactory("Job-Executor")); - } catch (final Exception e) { - throw new ConfigurationException("Unable to load db.properties to configure AsyncJobManagerImpl"); - } - - return true; + } catch (final Exception e) { + throw new ConfigurationException("Unable to load db.properties to configure AsyncJobManagerImpl"); + } + + return true; } - + @Override - public void onManagementNodeJoined(List nodeList, long selfNodeId) { + public void onManagementNodeJoined(List nodeList, long selfNodeId) { } - + @Override - public void onManagementNodeLeft(List nodeList, long selfNodeId) { - for(ManagementServerHostVO msHost : nodeList) { - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - try { - txn.start(); - List items = _queueMgr.getActiveQueueItems(msHost.getId(), true); - cleanupPendingJobs(items); - _jobDao.resetJobProcess(msHost.getId(), BaseCmd.INTERNAL_ERROR, getSerializedErrorMessage("job cancelled because of management server restart")); - txn.commit(); - } catch(Throwable e) { - s_logger.warn("Unexpected exception ", e); - txn.rollback(); - } finally { - txn.close(); - } - } + public void onManagementNodeLeft(List nodeList, long selfNodeId) { + for(ManagementServerHostVO msHost : nodeList) { + Transaction txn = Transaction.open(Transaction.CLOUD_DB); + try { + txn.start(); + List items = _queueMgr.getActiveQueueItems(msHost.getId(), true); + cleanupPendingJobs(items); + _jobDao.resetJobProcess(msHost.getId(), BaseCmd.INTERNAL_ERROR, getSerializedErrorMessage("job cancelled because of management server restart")); + txn.commit(); + } catch(Throwable e) { + s_logger.warn("Unexpected exception ", e); + txn.rollback(); + } finally { + txn.close(); + } + } } - + @Override - public void onManagementNodeIsolated() { - } + public void onManagementNodeIsolated() { + } @Override public boolean start() { - try { - List l = _queueMgr.getActiveQueueItems(getMsid(), false); - cleanupPendingJobs(l); - _jobDao.resetJobProcess(getMsid(), BaseCmd.INTERNAL_ERROR, getSerializedErrorMessage("job cancelled because of management server restart")); - } catch(Throwable e) { - s_logger.error("Unexpected exception " + e.getMessage(), e); - } - - _heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), HEARTBEAT_INTERVAL, - HEARTBEAT_INTERVAL, TimeUnit.MILLISECONDS); - _heartbeatScheduler.scheduleAtFixedRate(getGCTask(), GC_INTERVAL, - GC_INTERVAL, TimeUnit.MILLISECONDS); - + try { + List l = _queueMgr.getActiveQueueItems(getMsid(), false); + cleanupPendingJobs(l); + _jobDao.resetJobProcess(getMsid(), BaseCmd.INTERNAL_ERROR, getSerializedErrorMessage("job cancelled because of management server restart")); + } catch(Throwable e) { + s_logger.error("Unexpected exception " + e.getMessage(), e); + } + + _heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), HEARTBEAT_INTERVAL, + HEARTBEAT_INTERVAL, TimeUnit.MILLISECONDS); + _heartbeatScheduler.scheduleAtFixedRate(getGCTask(), GC_INTERVAL, + GC_INTERVAL, TimeUnit.MILLISECONDS); + return true; } - + private static ExceptionResponse getResetResultResponse(String errorMessage) { - ExceptionResponse resultObject = new ExceptionResponse(); - resultObject.setErrorCode(BaseCmd.INTERNAL_ERROR); - resultObject.setErrorText(errorMessage); - return resultObject; + ExceptionResponse resultObject = new ExceptionResponse(); + resultObject.setErrorCode(BaseCmd.INTERNAL_ERROR); + resultObject.setErrorText(errorMessage); + return resultObject; } - + private static String getSerializedErrorMessage(String errorMessage) { return ApiSerializerHelper.toSerializedStringOld(getResetResultResponse(errorMessage)); } @Override public boolean stop() { - _heartbeatScheduler.shutdown(); - _executor.shutdown(); + _heartbeatScheduler.shutdown(); + _executor.shutdown(); return true; } - + @Override public String getName() { - return _name; + return _name; } } diff --git a/server/src/com/cloud/async/SyncQueueManagerImpl.java b/server/src/com/cloud/async/SyncQueueManagerImpl.java index 97ce8a6ccad..4f0daa00bfa 100644 --- a/server/src/com/cloud/async/SyncQueueManagerImpl.java +++ b/server/src/com/cloud/async/SyncQueueManagerImpl.java @@ -31,7 +31,6 @@ import org.springframework.stereotype.Component; import com.cloud.async.dao.SyncQueueDao; import com.cloud.async.dao.SyncQueueItemDao; import com.cloud.utils.DateUtil; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; @@ -40,9 +39,9 @@ import com.cloud.utils.exception.CloudRuntimeException; @Local(value={SyncQueueManager.class}) public class SyncQueueManagerImpl implements SyncQueueManager { public static final Logger s_logger = Logger.getLogger(SyncQueueManagerImpl.class.getName()); - + private String _name; - + @Inject private SyncQueueDao _syncQueueDao; @Inject private SyncQueueItemDao _syncQueueItemDao; @@ -50,203 +49,203 @@ public class SyncQueueManagerImpl implements SyncQueueManager { @DB public SyncQueueVO queue(String syncObjType, long syncObjId, String itemType, long itemId, long queueSizeLimit) { Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - - _syncQueueDao.ensureQueue(syncObjType, syncObjId); - SyncQueueVO queueVO = _syncQueueDao.find(syncObjType, syncObjId); - if(queueVO == null) - throw new CloudRuntimeException("Unable to queue item into DB, DB is full?"); + try { + txn.start(); - queueVO.setQueueSizeLimit(queueSizeLimit); - _syncQueueDao.update(queueVO.getId(), queueVO); - - Date dt = DateUtil.currentGMTTime(); - SyncQueueItemVO item = new SyncQueueItemVO(); - item.setQueueId(queueVO.getId()); - item.setContentType(itemType); - item.setContentId(itemId); - item.setCreated(dt); - - _syncQueueItemDao.persist(item); - txn.commit(); - - return queueVO; - } catch(Exception e) { - s_logger.error("Unexpected exception: ", e); - txn.rollback(); - } - return null; + _syncQueueDao.ensureQueue(syncObjType, syncObjId); + SyncQueueVO queueVO = _syncQueueDao.find(syncObjType, syncObjId); + if(queueVO == null) + throw new CloudRuntimeException("Unable to queue item into DB, DB is full?"); + + queueVO.setQueueSizeLimit(queueSizeLimit); + _syncQueueDao.update(queueVO.getId(), queueVO); + + Date dt = DateUtil.currentGMTTime(); + SyncQueueItemVO item = new SyncQueueItemVO(); + item.setQueueId(queueVO.getId()); + item.setContentType(itemType); + item.setContentId(itemId); + item.setCreated(dt); + + _syncQueueItemDao.persist(item); + txn.commit(); + + return queueVO; + } catch(Exception e) { + s_logger.error("Unexpected exception: ", e); + txn.rollback(); + } + return null; } - + @Override @DB public SyncQueueItemVO dequeueFromOne(long queueId, Long msid) { - Transaction txt = Transaction.currentTxn(); - try { - txt.start(); - - SyncQueueVO queueVO = _syncQueueDao.lockRow(queueId, true); - if(queueVO == null) { - s_logger.error("Sync queue(id: " + queueId + ") does not exist"); - txt.commit(); - return null; - } - - if(queueReadyToProcess(queueVO)) { - SyncQueueItemVO itemVO = _syncQueueItemDao.getNextQueueItem(queueVO.getId()); - if(itemVO != null) { - Long processNumber = queueVO.getLastProcessNumber(); - if(processNumber == null) - processNumber = new Long(1); - else - processNumber = processNumber + 1; - Date dt = DateUtil.currentGMTTime(); - queueVO.setLastProcessNumber(processNumber); - queueVO.setLastUpdated(dt); - queueVO.setQueueSize(queueVO.getQueueSize() + 1); - _syncQueueDao.update(queueVO.getId(), queueVO); - - itemVO.setLastProcessMsid(msid); - itemVO.setLastProcessNumber(processNumber); - itemVO.setLastProcessTime(dt); - _syncQueueItemDao.update(itemVO.getId(), itemVO); - - txt.commit(); - return itemVO; - } else { - if(s_logger.isDebugEnabled()) - s_logger.debug("Sync queue (" + queueId + ") is currently empty"); - } - } else { - if(s_logger.isDebugEnabled()) - s_logger.debug("There is a pending process in sync queue(id: " + queueId + ")"); - } - txt.commit(); - } catch(Exception e) { - s_logger.error("Unexpected exception: ", e); - txt.rollback(); - } - - return null; + Transaction txt = Transaction.currentTxn(); + try { + txt.start(); + + SyncQueueVO queueVO = _syncQueueDao.lockRow(queueId, true); + if(queueVO == null) { + s_logger.error("Sync queue(id: " + queueId + ") does not exist"); + txt.commit(); + return null; + } + + if(queueReadyToProcess(queueVO)) { + SyncQueueItemVO itemVO = _syncQueueItemDao.getNextQueueItem(queueVO.getId()); + if(itemVO != null) { + Long processNumber = queueVO.getLastProcessNumber(); + if(processNumber == null) + processNumber = new Long(1); + else + processNumber = processNumber + 1; + Date dt = DateUtil.currentGMTTime(); + queueVO.setLastProcessNumber(processNumber); + queueVO.setLastUpdated(dt); + queueVO.setQueueSize(queueVO.getQueueSize() + 1); + _syncQueueDao.update(queueVO.getId(), queueVO); + + itemVO.setLastProcessMsid(msid); + itemVO.setLastProcessNumber(processNumber); + itemVO.setLastProcessTime(dt); + _syncQueueItemDao.update(itemVO.getId(), itemVO); + + txt.commit(); + return itemVO; + } else { + if(s_logger.isDebugEnabled()) + s_logger.debug("Sync queue (" + queueId + ") is currently empty"); + } + } else { + if(s_logger.isDebugEnabled()) + s_logger.debug("There is a pending process in sync queue(id: " + queueId + ")"); + } + txt.commit(); + } catch(Exception e) { + s_logger.error("Unexpected exception: ", e); + txt.rollback(); + } + + return null; } - + @Override @DB public List dequeueFromAny(Long msid, int maxItems) { - - List resultList = new ArrayList(); - Transaction txt = Transaction.currentTxn(); - try { - txt.start(); - - List l = _syncQueueItemDao.getNextQueueItems(maxItems); - if(l != null && l.size() > 0) { - for(SyncQueueItemVO item : l) { - SyncQueueVO queueVO = _syncQueueDao.lockRow(item.getQueueId(), true); - SyncQueueItemVO itemVO = _syncQueueItemDao.lockRow(item.getId(), true); - if(queueReadyToProcess(queueVO) && itemVO.getLastProcessNumber() == null) { - Long processNumber = queueVO.getLastProcessNumber(); - if(processNumber == null) - processNumber = new Long(1); - else - processNumber = processNumber + 1; - - Date dt = DateUtil.currentGMTTime(); - queueVO.setLastProcessNumber(processNumber); - queueVO.setLastUpdated(dt); - queueVO.setQueueSize(queueVO.getQueueSize() + 1); - _syncQueueDao.update(queueVO.getId(), queueVO); - - itemVO.setLastProcessMsid(msid); - itemVO.setLastProcessNumber(processNumber); - itemVO.setLastProcessTime(dt); - _syncQueueItemDao.update(item.getId(), itemVO); - - resultList.add(item); - } - } - } - txt.commit(); - return resultList; - } catch(Exception e) { - s_logger.error("Unexpected exception: ", e); - txt.rollback(); - } - return null; + + List resultList = new ArrayList(); + Transaction txt = Transaction.currentTxn(); + try { + txt.start(); + + List l = _syncQueueItemDao.getNextQueueItems(maxItems); + if(l != null && l.size() > 0) { + for(SyncQueueItemVO item : l) { + SyncQueueVO queueVO = _syncQueueDao.lockRow(item.getQueueId(), true); + SyncQueueItemVO itemVO = _syncQueueItemDao.lockRow(item.getId(), true); + if(queueReadyToProcess(queueVO) && itemVO.getLastProcessNumber() == null) { + Long processNumber = queueVO.getLastProcessNumber(); + if(processNumber == null) + processNumber = new Long(1); + else + processNumber = processNumber + 1; + + Date dt = DateUtil.currentGMTTime(); + queueVO.setLastProcessNumber(processNumber); + queueVO.setLastUpdated(dt); + queueVO.setQueueSize(queueVO.getQueueSize() + 1); + _syncQueueDao.update(queueVO.getId(), queueVO); + + itemVO.setLastProcessMsid(msid); + itemVO.setLastProcessNumber(processNumber); + itemVO.setLastProcessTime(dt); + _syncQueueItemDao.update(item.getId(), itemVO); + + resultList.add(item); + } + } + } + txt.commit(); + return resultList; + } catch(Exception e) { + s_logger.error("Unexpected exception: ", e); + txt.rollback(); + } + return null; } - + @Override @DB public void purgeItem(long queueItemId) { - Transaction txt = Transaction.currentTxn(); - try { - txt.start(); - - SyncQueueItemVO itemVO = _syncQueueItemDao.findById(queueItemId); - if(itemVO != null) { - SyncQueueVO queueVO = _syncQueueDao.lockRow(itemVO.getQueueId(), true); - - _syncQueueItemDao.expunge(itemVO.getId()); - - //if item is active, reset queue information - if (itemVO.getLastProcessMsid() != null) { - queueVO.setLastUpdated(DateUtil.currentGMTTime()); - //decrement the count - assert (queueVO.getQueueSize() > 0) : "Count reduce happens when it's already <= 0!"; - queueVO.setQueueSize(queueVO.getQueueSize() - 1); - _syncQueueDao.update(queueVO.getId(), queueVO); - } - } - txt.commit(); - } catch(Exception e) { - s_logger.error("Unexpected exception: ", e); - txt.rollback(); - } + Transaction txt = Transaction.currentTxn(); + try { + txt.start(); + + SyncQueueItemVO itemVO = _syncQueueItemDao.findById(queueItemId); + if(itemVO != null) { + SyncQueueVO queueVO = _syncQueueDao.lockRow(itemVO.getQueueId(), true); + + _syncQueueItemDao.expunge(itemVO.getId()); + + //if item is active, reset queue information + if (itemVO.getLastProcessMsid() != null) { + queueVO.setLastUpdated(DateUtil.currentGMTTime()); + //decrement the count + assert (queueVO.getQueueSize() > 0) : "Count reduce happens when it's already <= 0!"; + queueVO.setQueueSize(queueVO.getQueueSize() - 1); + _syncQueueDao.update(queueVO.getId(), queueVO); + } + } + txt.commit(); + } catch(Exception e) { + s_logger.error("Unexpected exception: ", e); + txt.rollback(); + } } - + @Override @DB public void returnItem(long queueItemId) { - Transaction txt = Transaction.currentTxn(); - try { - txt.start(); - - SyncQueueItemVO itemVO = _syncQueueItemDao.findById(queueItemId); - if(itemVO != null) { - SyncQueueVO queueVO = _syncQueueDao.lockRow(itemVO.getQueueId(), true); - - itemVO.setLastProcessMsid(null); - itemVO.setLastProcessNumber(null); - itemVO.setLastProcessTime(null); - _syncQueueItemDao.update(queueItemId, itemVO); - - queueVO.setLastUpdated(DateUtil.currentGMTTime()); - _syncQueueDao.update(queueVO.getId(), queueVO); - } - txt.commit(); - } catch(Exception e) { - s_logger.error("Unexpected exception: ", e); - txt.rollback(); - } + Transaction txt = Transaction.currentTxn(); + try { + txt.start(); + + SyncQueueItemVO itemVO = _syncQueueItemDao.findById(queueItemId); + if(itemVO != null) { + SyncQueueVO queueVO = _syncQueueDao.lockRow(itemVO.getQueueId(), true); + + itemVO.setLastProcessMsid(null); + itemVO.setLastProcessNumber(null); + itemVO.setLastProcessTime(null); + _syncQueueItemDao.update(queueItemId, itemVO); + + queueVO.setLastUpdated(DateUtil.currentGMTTime()); + _syncQueueDao.update(queueVO.getId(), queueVO); + } + txt.commit(); + } catch(Exception e) { + s_logger.error("Unexpected exception: ", e); + txt.rollback(); + } } - + @Override - public List getActiveQueueItems(Long msid, boolean exclusive) { - return _syncQueueItemDao.getActiveQueueItems(msid, exclusive); + public List getActiveQueueItems(Long msid, boolean exclusive) { + return _syncQueueItemDao.getActiveQueueItems(msid, exclusive); } - + @Override public List getBlockedQueueItems(long thresholdMs, boolean exclusive) { return _syncQueueItemDao.getBlockedQueueItems(thresholdMs, exclusive); } - + @Override public boolean configure(String name, Map params) throws ConfigurationException { - _name = name; - return true; + _name = name; + return true; } - + @Override public boolean start() { return true; @@ -256,16 +255,16 @@ public class SyncQueueManagerImpl implements SyncQueueManager { public boolean stop() { return true; } - + @Override public String getName() { - return _name; + return _name; } private boolean queueReadyToProcess(SyncQueueVO queueVO) { return queueVO.getQueueSize() < queueVO.getQueueSizeLimit(); } - + @Override public void purgeAsyncJobQueueItemId(long asyncJobId) { Long itemId = _syncQueueItemDao.getQueueItemIdByContentIdAndType(asyncJobId, SyncQueueItem.AsyncJobContentType); diff --git a/server/src/com/cloud/async/dao/SyncQueueItemDaoImpl.java b/server/src/com/cloud/async/dao/SyncQueueItemDaoImpl.java index 8ee21f39af5..d2d292976d8 100644 --- a/server/src/com/cloud/async/dao/SyncQueueItemDaoImpl.java +++ b/server/src/com/cloud/async/dao/SyncQueueItemDaoImpl.java @@ -33,6 +33,7 @@ import org.springframework.stereotype.Component; import com.cloud.async.SyncQueueItemVO; import com.cloud.utils.DateUtil; +import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; @@ -43,6 +44,7 @@ import com.cloud.utils.db.Transaction; @Component @Local(value = { SyncQueueItemDao.class }) +@DB public class SyncQueueItemDaoImpl extends GenericDaoBase implements SyncQueueItemDao { private static final Logger s_logger = Logger.getLogger(SyncQueueItemDaoImpl.class); final GenericSearchBuilder queueIdSearch; diff --git a/server/src/com/cloud/baremetal/BareMetalVmManagerImpl.java b/server/src/com/cloud/baremetal/BareMetalVmManagerImpl.java index 1ce9b33c009..35983fa90be 100755 --- a/server/src/com/cloud/baremetal/BareMetalVmManagerImpl.java +++ b/server/src/com/cloud/baremetal/BareMetalVmManagerImpl.java @@ -40,6 +40,9 @@ import com.cloud.agent.api.baremetal.IpmISetBootDevCommand; import com.cloud.agent.api.baremetal.IpmiBootorResetCommand; import com.cloud.agent.manager.Commands; import org.apache.cloudstack.api.command.user.vm.StartVMCmd; +import org.springframework.context.annotation.Primary; +import org.springframework.stereotype.Component; + import com.cloud.baremetal.PxeServerManager.PxeServerType; import com.cloud.configuration.Resource.ResourceType; import com.cloud.configuration.dao.ConfigurationDao; @@ -81,7 +84,7 @@ import com.cloud.user.UserContext; import com.cloud.uservm.UserVm; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; -import com.cloud.utils.component.Adapters; +import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.Manager; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; @@ -102,6 +105,8 @@ import com.cloud.vm.VirtualMachineName; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.VirtualMachineProfile.Param; +@Component +@Primary @Local(value={BareMetalVmManager.class, BareMetalVmService.class}) public class BareMetalVmManagerImpl extends UserVmManagerImpl implements BareMetalVmManager, BareMetalVmService, Manager, StateListener { @@ -110,7 +115,6 @@ public class BareMetalVmManagerImpl extends UserVmManagerImpl implements BareMet @Inject PxeServerManager _pxeMgr; @Inject ResourceManager _resourceMgr; - // @com.cloud.utils.component.Inject (adapter=TemplateAdapter.class) @Inject protected List _adapters; @PostConstruct @@ -181,7 +185,7 @@ public class BareMetalVmManagerImpl extends UserVmManagerImpl implements BareMet * prepare() will check if current account has right for creating * template */ - TemplateAdapter adapter = Adapters.getAdapterByName(_adapters, TemplateAdapterType.BareMetal.getName()); + TemplateAdapter adapter = AdapterBase.getAdapterByName(_adapters, TemplateAdapterType.BareMetal.getName()); Long userId = UserContext.current().getCallerUserId(); userId = (userId == null ? User.UID_SYSTEM : userId); AccountVO account = _accountDao.findById(vm.getAccountId()); diff --git a/server/src/com/cloud/baremetal/PxeServerManagerImpl.java b/server/src/com/cloud/baremetal/PxeServerManagerImpl.java index 6e123afc710..7a9a783969f 100755 --- a/server/src/com/cloud/baremetal/PxeServerManagerImpl.java +++ b/server/src/com/cloud/baremetal/PxeServerManagerImpl.java @@ -41,7 +41,7 @@ import com.cloud.resource.ResourceStateAdapter; import com.cloud.resource.ServerResource; import com.cloud.resource.UnableDeleteHostException; import com.cloud.uservm.UserVm; -import com.cloud.utils.component.Adapters; +import com.cloud.utils.component.AdapterBase; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.ReservationContext; import com.cloud.vm.UserVmVO; @@ -87,7 +87,7 @@ public class PxeServerManagerImpl implements PxeServerManager, ResourceStateAdap protected PxeServerService getServiceByType(String type) { PxeServerService _service; - _service = Adapters.getAdapterByName(_services, type); + _service = AdapterBase.getAdapterByName(_services, type); if (_service == null) { throw new CloudRuntimeException("Cannot find PXE service for " + type); } diff --git a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java index e55bef69e6c..baaf39164cd 100755 --- a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -35,10 +35,7 @@ import com.cloud.capacity.CapacityVO; import com.cloud.storage.Storage; import com.cloud.storage.StoragePoolVO; import com.cloud.storage.dao.StoragePoolDao; -import com.cloud.storage.dao.StoragePoolDaoImpl; import com.cloud.utils.Pair; -import com.cloud.utils.StringUtils; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; @@ -60,116 +57,116 @@ public class CapacityDaoImpl extends GenericDaoBase implements private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1 = "SELECT DISTINCT capacity.cluster_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster on (cluster.id = capacity.cluster_id AND cluster.removed is NULL) WHERE "; private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2 = " AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " + - "AND cluster_id IN (SELECT distinct cluster_id FROM `cloud`.`op_host_capacity` WHERE "; + "AND cluster_id IN (SELECT distinct cluster_id FROM `cloud`.`op_host_capacity` WHERE "; private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART3 = " AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ?) "; - + private final SearchBuilder _hostIdTypeSearch; - private final SearchBuilder _hostOrPoolIdSearch; + private final SearchBuilder _hostOrPoolIdSearch; protected GenericSearchBuilder SummedCapacitySearch; - private SearchBuilder _allFieldsSearch; + private final SearchBuilder _allFieldsSearch; @Inject protected StoragePoolDao _storagePoolDao; - + private static final String LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY = "SELECT a.host_id FROM (host JOIN op_host_capacity a ON host.id = a.host_id AND host.cluster_id = ? AND host.type = ? " + - "AND (a.total_capacity * ? - a.used_capacity) >= ? and a.capacity_type = 1) " + - "JOIN op_host_capacity b ON a.host_id = b.host_id AND b.total_capacity - b.used_capacity >= ? AND b.capacity_type = 0"; - + "AND (a.total_capacity * ? - a.used_capacity) >= ? and a.capacity_type = 1) " + + "JOIN op_host_capacity b ON a.host_id = b.host_id AND b.total_capacity - b.used_capacity >= ? AND b.capacity_type = 0"; + private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1 = "SELECT cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) FROM `cloud`.`op_host_capacity` WHERE " ; private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2 = " AND capacity_type = ? GROUP BY cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC"; - + private static final String LIST_PODSINZONE_BY_HOST_CAPACITIES = "SELECT DISTINCT capacity.pod_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod " + - " ON (pod.id = capacity.pod_id AND pod.removed is NULL) WHERE " + - " capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " + - " AND pod_id IN (SELECT distinct pod_id FROM `cloud`.`op_host_capacity` WHERE " + - " capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ?) "; + " ON (pod.id = capacity.pod_id AND pod.removed is NULL) WHERE " + + " capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " + + " AND pod_id IN (SELECT distinct pod_id FROM `cloud`.`op_host_capacity` WHERE " + + " capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ?) "; private static final String ORDER_PODS_BY_AGGREGATE_CAPACITY = "SELECT pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) FROM `cloud`.`op_host_capacity` WHERE data_center_id = ? " + - " AND capacity_type = ? GROUP BY pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC"; - + " AND capacity_type = ? GROUP BY pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC"; + private static final String LIST_CAPACITY_BY_RESOURCE_STATE = "SELECT capacity.data_center_id, sum(capacity.used_capacity), sum(capacity.reserved_quantity), sum(capacity.total_capacity), capacity_capacity_type "+ - "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`data_center` dc ON (dc.id = capacity.data_center_id AND dc.removed is NULL)"+ - "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod ON (pod.id = capacity.pod_id AND pod.removed is NULL)"+ - "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster ON (cluster.id = capacity.cluster_id AND cluster.removed is NULL)"+ - "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host` host ON (host.id = capacity.host_id AND host.removed is NULL)"+ - "WHERE dc.allocation_state = ? AND pod.allocation_state = ? AND cluster.allocation_state = ? AND host.resource_state = ? AND capacity_type not in (3,4) "; - + "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`data_center` dc ON (dc.id = capacity.data_center_id AND dc.removed is NULL)"+ + "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod ON (pod.id = capacity.pod_id AND pod.removed is NULL)"+ + "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster ON (cluster.id = capacity.cluster_id AND cluster.removed is NULL)"+ + "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host` host ON (host.id = capacity.host_id AND host.removed is NULL)"+ + "WHERE dc.allocation_state = ? AND pod.allocation_state = ? AND cluster.allocation_state = ? AND host.resource_state = ? AND capacity_type not in (3,4) "; + private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " + - "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ - " capacity.capacity_type, capacity.data_center_id "+ - "FROM `cloud`.`op_host_capacity` capacity "+ - "WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled'"; + "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ + " capacity.capacity_type, capacity.data_center_id "+ + "FROM `cloud`.`op_host_capacity` capacity "+ + "WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled'"; private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART2 = " GROUP BY data_center_id, capacity_type order by percent desc limit "; private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " + - "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ - " capacity.capacity_type, capacity.data_center_id, pod_id "+ - "FROM `cloud`.`op_host_capacity` capacity "+ - "WHERE total_capacity > 0 AND pod_id is not null AND capacity_state='Enabled'"; + "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ + " capacity.capacity_type, capacity.data_center_id, pod_id "+ + "FROM `cloud`.`op_host_capacity` capacity "+ + "WHERE total_capacity > 0 AND pod_id is not null AND capacity_state='Enabled'"; private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART2 = " GROUP BY pod_id, capacity_type order by percent desc limit "; - + private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " + - "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ - "capacity.capacity_type, capacity.data_center_id, pod_id, cluster_id "+ - "FROM `cloud`.`op_host_capacity` capacity "+ - "WHERE total_capacity > 0 AND cluster_id is not null AND capacity_state='Enabled'"; + "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ + "capacity.capacity_type, capacity.data_center_id, pod_id, cluster_id "+ + "FROM `cloud`.`op_host_capacity` capacity "+ + "WHERE total_capacity > 0 AND cluster_id is not null AND capacity_state='Enabled'"; private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART2 = " GROUP BY cluster_id, capacity_type order by percent desc limit "; private static final String UPDATE_CAPACITY_STATE = "UPDATE `cloud`.`op_host_capacity` SET capacity_state = ? WHERE "; private static final String LIST_CLUSTERS_CROSSING_THRESHOLD = "SELECT cluster_id " + - "FROM (SELECT cluster_id, ( (sum(capacity.used_capacity) + sum(capacity.reserved_capacity) + ?)/sum(total_capacity) ) ratio "+ - "FROM `cloud`.`op_host_capacity` capacity "+ - "WHERE capacity.data_center_id = ? AND capacity.capacity_type = ? AND capacity.total_capacity > 0 "+ - "GROUP BY cluster_id) tmp " + - "WHERE tmp.ratio > ? "; - - + "FROM (SELECT cluster_id, ( (sum(capacity.used_capacity) + sum(capacity.reserved_capacity) + ?)/sum(total_capacity) ) ratio "+ + "FROM `cloud`.`op_host_capacity` capacity "+ + "WHERE capacity.data_center_id = ? AND capacity.capacity_type = ? AND capacity.total_capacity > 0 "+ + "GROUP BY cluster_id) tmp " + + "WHERE tmp.ratio > ? "; + + public CapacityDaoImpl() { - _hostIdTypeSearch = createSearchBuilder(); - _hostIdTypeSearch.and("hostId", _hostIdTypeSearch.entity().getHostOrPoolId(), SearchCriteria.Op.EQ); - _hostIdTypeSearch.and("type", _hostIdTypeSearch.entity().getCapacityType(), SearchCriteria.Op.EQ); - _hostIdTypeSearch.done(); - - _hostOrPoolIdSearch = createSearchBuilder(); - _hostOrPoolIdSearch.and("hostId", _hostOrPoolIdSearch.entity().getHostOrPoolId(), SearchCriteria.Op.EQ); - _hostOrPoolIdSearch.done(); - - _allFieldsSearch = createSearchBuilder(); - _allFieldsSearch.and("id", _allFieldsSearch.entity().getId(), SearchCriteria.Op.EQ); - _allFieldsSearch.and("hostId", _allFieldsSearch.entity().getHostOrPoolId(), SearchCriteria.Op.EQ); - _allFieldsSearch.and("zoneId", _allFieldsSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - _allFieldsSearch.and("podId", _allFieldsSearch.entity().getPodId(), SearchCriteria.Op.EQ); - _allFieldsSearch.and("clusterId", _allFieldsSearch.entity().getClusterId(), SearchCriteria.Op.EQ); - _allFieldsSearch.and("capacityType", _allFieldsSearch.entity().getCapacityType(), SearchCriteria.Op.EQ); - _allFieldsSearch.and("capacityState", _allFieldsSearch.entity().getCapacityState(), SearchCriteria.Op.EQ); - - _allFieldsSearch.done(); + _hostIdTypeSearch = createSearchBuilder(); + _hostIdTypeSearch.and("hostId", _hostIdTypeSearch.entity().getHostOrPoolId(), SearchCriteria.Op.EQ); + _hostIdTypeSearch.and("type", _hostIdTypeSearch.entity().getCapacityType(), SearchCriteria.Op.EQ); + _hostIdTypeSearch.done(); + + _hostOrPoolIdSearch = createSearchBuilder(); + _hostOrPoolIdSearch.and("hostId", _hostOrPoolIdSearch.entity().getHostOrPoolId(), SearchCriteria.Op.EQ); + _hostOrPoolIdSearch.done(); + + _allFieldsSearch = createSearchBuilder(); + _allFieldsSearch.and("id", _allFieldsSearch.entity().getId(), SearchCriteria.Op.EQ); + _allFieldsSearch.and("hostId", _allFieldsSearch.entity().getHostOrPoolId(), SearchCriteria.Op.EQ); + _allFieldsSearch.and("zoneId", _allFieldsSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + _allFieldsSearch.and("podId", _allFieldsSearch.entity().getPodId(), SearchCriteria.Op.EQ); + _allFieldsSearch.and("clusterId", _allFieldsSearch.entity().getClusterId(), SearchCriteria.Op.EQ); + _allFieldsSearch.and("capacityType", _allFieldsSearch.entity().getCapacityType(), SearchCriteria.Op.EQ); + _allFieldsSearch.and("capacityState", _allFieldsSearch.entity().getCapacityState(), SearchCriteria.Op.EQ); + + _allFieldsSearch.done(); } - + @Override public List listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long compute_requested, Float overProvFactor){ - - Transaction txn = Transaction.currentTxn(); - PreparedStatement pstmt = null; - List result = new ArrayList(); - StringBuilder sql = new StringBuilder(LIST_CLUSTERS_CROSSING_THRESHOLD); - - - try { - pstmt = txn.prepareAutoCloseStatement(sql.toString()); - pstmt.setLong(1, compute_requested); - pstmt.setLong(2, zoneId); - pstmt.setShort(3, capacityType); - pstmt.setFloat(4, disableThreshold*overProvFactor); - - ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - result.add(rs.getLong(1)); - } - return result; - } catch (SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + sql, e); - } catch (Throwable e) { - throw new CloudRuntimeException("Caught: " + sql, e); - } - } + + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList(); + StringBuilder sql = new StringBuilder(LIST_CLUSTERS_CROSSING_THRESHOLD); + + + try { + pstmt = txn.prepareAutoCloseStatement(sql.toString()); + pstmt.setLong(1, compute_requested); + pstmt.setLong(2, zoneId); + pstmt.setShort(3, capacityType); + pstmt.setFloat(4, disableThreshold*overProvFactor); + + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + result.add(rs.getLong(1)); + } + return result; + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } + } /*public static String preparePlaceHolders(int length) { StringBuilder builder = new StringBuilder(); @@ -188,17 +185,17 @@ public class CapacityDaoImpl extends GenericDaoBase implements } }*/ - + @Override public List findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId, String resource_state){ - + Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); StringBuilder sql = new StringBuilder(LIST_CAPACITY_BY_RESOURCE_STATE); List resourceIdList = new ArrayList(); - + if (zoneId != null){ sql.append(" AND capacity.data_center_id = ?"); resourceIdList.add(zoneId); @@ -237,29 +234,29 @@ public class CapacityDaoImpl extends GenericDaoBase implements throw new CloudRuntimeException("Caught: " + sql, e); } } - + @Override public List listCapacitiesGroupedByLevelAndType(Integer capacityType, Long zoneId, Long podId, Long clusterId, int level, Long limit){ - + StringBuilder finalQuery = new StringBuilder(); Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); - + switch(level){ - case 1: // List all the capacities grouped by zone, capacity Type - finalQuery.append(LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1); - break; - - case 2: // List all the capacities grouped by pod, capacity Type - finalQuery.append(LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1); - break; - - case 3: // List all the capacities grouped by cluster, capacity Type - finalQuery.append(LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1); - break; + case 1: // List all the capacities grouped by zone, capacity Type + finalQuery.append(LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1); + break; + + case 2: // List all the capacities grouped by pod, capacity Type + finalQuery.append(LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1); + break; + + case 3: // List all the capacities grouped by cluster, capacity Type + finalQuery.append(LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1); + break; } - + if (zoneId != null){ finalQuery.append(" AND data_center_id="+zoneId); } @@ -272,32 +269,32 @@ public class CapacityDaoImpl extends GenericDaoBase implements if (capacityType != null){ finalQuery.append(" AND capacity_type="+capacityType); } - + switch(level){ case 1: // List all the capacities grouped by zone, capacity Type finalQuery.append(LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART2); break; - + case 2: // List all the capacities grouped by pod, capacity Type finalQuery.append(LIST_CAPACITY_GROUP_BY_POD_TYPE_PART2); break; - + case 3: // List all the capacities grouped by cluster, capacity Type finalQuery.append(LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART2); break; } - + finalQuery.append(limit.toString()); - + try { pstmt = txn.prepareAutoCloseStatement(finalQuery.toString()); ResultSet rs = pstmt.executeQuery(); while (rs.next()) { SummedCapacity summedCapacity = new SummedCapacity( rs.getLong(1), rs.getLong(2), rs.getFloat(3), - (short)rs.getLong(4), rs.getLong(5), - level != 1 ? rs.getLong(6): null, - level == 3 ? rs.getLong(7): null); - + (short)rs.getLong(4), rs.getLong(5), + level != 1 ? rs.getLong(6): null, + level == 3 ? rs.getLong(7): null); + result.add(summedCapacity); } return result; @@ -306,61 +303,61 @@ public class CapacityDaoImpl extends GenericDaoBase implements } catch (Throwable e) { throw new CloudRuntimeException("Caught: " + finalQuery, e); } - + } - + @Override public List findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId){ - - SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); - SummedCapacitySearch.select("dcId", Func.NATIVE, SummedCapacitySearch.entity().getDataCenterId()); + + SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); + SummedCapacitySearch.select("dcId", Func.NATIVE, SummedCapacitySearch.entity().getDataCenterId()); SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity()); SummedCapacitySearch.select("sumReserved", Func.SUM, SummedCapacitySearch.entity().getReservedCapacity()); SummedCapacitySearch.select("sumTotal", Func.SUM, SummedCapacitySearch.entity().getTotalCapacity()); SummedCapacitySearch.select("capacityType", Func.NATIVE, SummedCapacitySearch.entity().getCapacityType()); - + if (zoneId==null && podId==null && clusterId==null){ // List all the capacities grouped by zone, capacity Type SummedCapacitySearch.groupBy(SummedCapacitySearch.entity().getDataCenterId(), SummedCapacitySearch.entity().getCapacityType()); }else { SummedCapacitySearch.groupBy(SummedCapacitySearch.entity().getCapacityType()); } - + if (zoneId != null){ - SummedCapacitySearch.and("dcId", SummedCapacitySearch.entity().getDataCenterId(), Op.EQ); + SummedCapacitySearch.and("dcId", SummedCapacitySearch.entity().getDataCenterId(), Op.EQ); } if (podId != null){ - SummedCapacitySearch.and("podId", SummedCapacitySearch.entity().getPodId(), Op.EQ); + SummedCapacitySearch.and("podId", SummedCapacitySearch.entity().getPodId(), Op.EQ); } if (clusterId != null){ - SummedCapacitySearch.and("clusterId", SummedCapacitySearch.entity().getClusterId(), Op.EQ); + SummedCapacitySearch.and("clusterId", SummedCapacitySearch.entity().getClusterId(), Op.EQ); } if (capacityType != null){ - SummedCapacitySearch.and("capacityType", SummedCapacitySearch.entity().getCapacityType(), Op.EQ); + SummedCapacitySearch.and("capacityType", SummedCapacitySearch.entity().getCapacityType(), Op.EQ); } SummedCapacitySearch.done(); - - + + SearchCriteria sc = SummedCapacitySearch.create(); if (zoneId != null){ - sc.setParameters("dcId", zoneId); + sc.setParameters("dcId", zoneId); } if (podId != null){ - sc.setParameters("podId", podId); + sc.setParameters("podId", podId); } if (clusterId != null){ - sc.setParameters("clusterId", clusterId); + sc.setParameters("clusterId", clusterId); } if (capacityType != null){ - sc.setParameters("capacityType", capacityType); + sc.setParameters("capacityType", capacityType); } - + Filter filter = new Filter(CapacityVO.class, null, true, null, null); List results = customSearchIncludingRemoved(sc, filter); return results; - + } - + public void updateAllocated(Long hostId, long allocatedAmount, short capacityType, boolean add) { Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; @@ -384,75 +381,47 @@ public class CapacityDaoImpl extends GenericDaoBase implements } } - + @Override public CapacityVO findByHostIdType(Long hostId, short capacityType) { - SearchCriteria sc = _hostIdTypeSearch.create(); - sc.setParameters("hostId", hostId); - sc.setParameters("type", capacityType); - return findOneBy(sc); + SearchCriteria sc = _hostIdTypeSearch.create(); + sc.setParameters("hostId", hostId); + sc.setParameters("type", capacityType); + return findOneBy(sc); } - + @Override public List listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){ - Transaction txn = Transaction.currentTxn(); + Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); StringBuilder sql = new StringBuilder(LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1); - + if(isZone){ - sql.append("capacity.data_center_id = ?"); + sql.append("capacity.data_center_id = ?"); }else{ - sql.append("capacity.pod_id = ?"); + sql.append("capacity.pod_id = ?"); } sql.append(LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2); if(isZone){ - sql.append("capacity.data_center_id = ?"); + sql.append("capacity.data_center_id = ?"); }else{ - sql.append("capacity.pod_id = ?"); + sql.append("capacity.pod_id = ?"); } sql.append(LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART3); try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(1, id); - pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU); - pstmt.setFloat(3, cpuOverprovisioningFactor); - pstmt.setLong(4, requiredCpu); - pstmt.setLong(5, id); - pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY); - pstmt.setFloat(7, 1); - pstmt.setLong(8, requiredRam); + pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU); + pstmt.setFloat(3, cpuOverprovisioningFactor); + pstmt.setLong(4, requiredCpu); + pstmt.setLong(5, id); + pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY); + pstmt.setFloat(7, 1); + pstmt.setLong(8, requiredRam); - ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - result.add(rs.getLong(1)); - } - return result; - } catch (SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + sql, e); - } catch (Throwable e) { - throw new CloudRuntimeException("Caught: " + sql, e); - } - } - - - @Override - public List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType, float cpuOverprovisioningFactor){ - Transaction txn = Transaction.currentTxn(); - PreparedStatement pstmt = null; - List result = new ArrayList(); - - StringBuilder sql = new StringBuilder(LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY); - try { - pstmt = txn.prepareAutoCloseStatement(sql.toString()); - pstmt.setLong(1, clusterId); - pstmt.setString(2, hostType); - pstmt.setFloat(3, cpuOverprovisioningFactor); - pstmt.setLong(4, requiredCpu); - pstmt.setLong(5, requiredRam); - ResultSet rs = pstmt.executeQuery(); while (rs.next()) { result.add(rs.getLong(1)); @@ -464,61 +433,89 @@ public class CapacityDaoImpl extends GenericDaoBase implements throw new CloudRuntimeException("Caught: " + sql, e); } } - - public static class SummedCapacity { - public long sumUsed; - public long sumReserved; - public long sumTotal; - public Float percentUsed; - public short capacityType; - public Long clusterId; - public Long podId; - public Long dcId; - public SummedCapacity() { - } - public SummedCapacity(long sumUsed, long sumReserved, long sumTotal, - short capacityType, Long clusterId, Long podId) { - super(); - this.sumUsed = sumUsed; - this.sumReserved = sumReserved; - this.sumTotal = sumTotal; - this.capacityType = capacityType; - this.clusterId = clusterId; - this.podId = podId; - } - public SummedCapacity(long sumUsed, long sumReserved, long sumTotal, + + + @Override + public List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType, float cpuOverprovisioningFactor){ + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList(); + + StringBuilder sql = new StringBuilder(LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY); + try { + pstmt = txn.prepareAutoCloseStatement(sql.toString()); + pstmt.setLong(1, clusterId); + pstmt.setString(2, hostType); + pstmt.setFloat(3, cpuOverprovisioningFactor); + pstmt.setLong(4, requiredCpu); + pstmt.setLong(5, requiredRam); + + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + result.add(rs.getLong(1)); + } + return result; + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } + } + + public static class SummedCapacity { + public long sumUsed; + public long sumReserved; + public long sumTotal; + public Float percentUsed; + public short capacityType; + public Long clusterId; + public Long podId; + public Long dcId; + public SummedCapacity() { + } + public SummedCapacity(long sumUsed, long sumReserved, long sumTotal, + short capacityType, Long clusterId, Long podId) { + super(); + this.sumUsed = sumUsed; + this.sumReserved = sumReserved; + this.sumTotal = sumTotal; + this.capacityType = capacityType; + this.clusterId = clusterId; + this.podId = podId; + } + public SummedCapacity(long sumUsed, long sumReserved, long sumTotal, short capacityType, Long clusterId, Long podId, Long zoneId) { - this(sumUsed, sumReserved, sumTotal, capacityType, clusterId, podId); - this.dcId = zoneId; - } - - public SummedCapacity(long sumUsed, long sumTotal, float percentUsed, short capacityType, Long zoneId, Long podId, Long clusterId) { - super(); - this.sumUsed = sumUsed; - this.sumTotal = sumTotal; - this.percentUsed = percentUsed; - this.capacityType = capacityType; + this(sumUsed, sumReserved, sumTotal, capacityType, clusterId, podId); + this.dcId = zoneId; + } + + public SummedCapacity(long sumUsed, long sumTotal, float percentUsed, short capacityType, Long zoneId, Long podId, Long clusterId) { + super(); + this.sumUsed = sumUsed; + this.sumTotal = sumTotal; + this.percentUsed = percentUsed; + this.capacityType = capacityType; this.clusterId = clusterId; this.podId = podId; this.dcId = zoneId; } - - public Short getCapacityType() { - return capacityType; - } - public Long getUsedCapacity() { - return sumUsed; - } - public long getReservedCapacity() { - return sumReserved; - } - public Long getTotalCapacity() { - return sumTotal; - } - public Long getDataCenterId() { + + public Short getCapacityType() { + return capacityType; + } + public Long getUsedCapacity() { + return sumUsed; + } + public long getReservedCapacity() { + return sumReserved; + } + public Long getTotalCapacity() { + return sumTotal; + } + public Long getDataCenterId() { return dcId; } - public Long getClusterId() { + public Long getClusterId() { return clusterId; } public Long getPodId() { @@ -527,110 +524,111 @@ public class CapacityDaoImpl extends GenericDaoBase implements public Float getPercentUsed() { return percentUsed; } - } - public List findByClusterPodZone(Long zoneId, Long podId, Long clusterId){ + } + @Override + public List findByClusterPodZone(Long zoneId, Long podId, Long clusterId){ - SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); + SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity()); SummedCapacitySearch.select("sumTotal", Func.SUM, SummedCapacitySearch.entity().getTotalCapacity()); SummedCapacitySearch.select("capacityType", Func.NATIVE, SummedCapacitySearch.entity().getCapacityType()); SummedCapacitySearch.groupBy(SummedCapacitySearch.entity().getCapacityType()); - + if(zoneId != null){ - SummedCapacitySearch.and("zoneId", SummedCapacitySearch.entity().getDataCenterId(), Op.EQ); + SummedCapacitySearch.and("zoneId", SummedCapacitySearch.entity().getDataCenterId(), Op.EQ); } if (podId != null){ - SummedCapacitySearch.and("podId", SummedCapacitySearch.entity().getPodId(), Op.EQ); + SummedCapacitySearch.and("podId", SummedCapacitySearch.entity().getPodId(), Op.EQ); } if (clusterId != null){ - SummedCapacitySearch.and("clusterId", SummedCapacitySearch.entity().getClusterId(), Op.EQ); + SummedCapacitySearch.and("clusterId", SummedCapacitySearch.entity().getClusterId(), Op.EQ); } SummedCapacitySearch.done(); - - + + SearchCriteria sc = SummedCapacitySearch.create(); if (zoneId != null){ - sc.setParameters("zoneId", zoneId); + sc.setParameters("zoneId", zoneId); } if (podId != null){ - sc.setParameters("podId", podId); + sc.setParameters("podId", podId); } if (clusterId != null){ - sc.setParameters("clusterId", clusterId); + sc.setParameters("clusterId", clusterId); } - - return customSearchIncludingRemoved(sc, null); - } - - @Override - public List findNonSharedStorageForClusterPodZone(Long zoneId, Long podId, Long clusterId){ - SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); + return customSearchIncludingRemoved(sc, null); + } + + @Override + public List findNonSharedStorageForClusterPodZone(Long zoneId, Long podId, Long clusterId){ + + SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity()); SummedCapacitySearch.select("sumTotal", Func.SUM, SummedCapacitySearch.entity().getTotalCapacity()); SummedCapacitySearch.select("capacityType", Func.NATIVE, SummedCapacitySearch.entity().getCapacityType()); SummedCapacitySearch.and("capacityType", SummedCapacitySearch.entity().getCapacityType(), Op.EQ); - - SearchBuilder nonSharedStorage = _storagePoolDao.createSearchBuilder(); - nonSharedStorage.and("poolTypes", nonSharedStorage.entity().getPoolType(), SearchCriteria.Op.IN); - SummedCapacitySearch.join("nonSharedStorage", nonSharedStorage, nonSharedStorage.entity().getId(), SummedCapacitySearch.entity().getHostOrPoolId(), JoinType.INNER); - nonSharedStorage.done(); - + + SearchBuilder nonSharedStorage = _storagePoolDao.createSearchBuilder(); + nonSharedStorage.and("poolTypes", nonSharedStorage.entity().getPoolType(), SearchCriteria.Op.IN); + SummedCapacitySearch.join("nonSharedStorage", nonSharedStorage, nonSharedStorage.entity().getId(), SummedCapacitySearch.entity().getHostOrPoolId(), JoinType.INNER); + nonSharedStorage.done(); + if(zoneId != null){ - SummedCapacitySearch.and("zoneId", SummedCapacitySearch.entity().getDataCenterId(), Op.EQ); + SummedCapacitySearch.and("zoneId", SummedCapacitySearch.entity().getDataCenterId(), Op.EQ); } if (podId != null){ - SummedCapacitySearch.and("podId", SummedCapacitySearch.entity().getPodId(), Op.EQ); + SummedCapacitySearch.and("podId", SummedCapacitySearch.entity().getPodId(), Op.EQ); } if (clusterId != null){ - SummedCapacitySearch.and("clusterId", SummedCapacitySearch.entity().getClusterId(), Op.EQ); + SummedCapacitySearch.and("clusterId", SummedCapacitySearch.entity().getClusterId(), Op.EQ); } SummedCapacitySearch.done(); - - + + SearchCriteria sc = SummedCapacitySearch.create(); sc.setJoinParameters("nonSharedStorage", "poolTypes", Storage.getNonSharedStoragePoolTypes().toArray()); sc.setParameters("capacityType", Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED); if (zoneId != null){ - sc.setParameters("zoneId", zoneId); + sc.setParameters("zoneId", zoneId); } if (podId != null){ - sc.setParameters("podId", podId); + sc.setParameters("podId", podId); } if (clusterId != null){ - sc.setParameters("clusterId", clusterId); + sc.setParameters("clusterId", clusterId); } - + return customSearchIncludingRemoved(sc, null); - } - + } + @Override public boolean removeBy(Short capacityType, Long zoneId, Long podId, Long clusterId, Long hostId) { SearchCriteria sc = _allFieldsSearch.create(); - + if (capacityType != null) { sc.setParameters("capacityType", capacityType); } - + if (zoneId != null) { sc.setParameters("zoneId", zoneId); } - + if (podId != null) { sc.setParameters("podId", podId); } - + if (clusterId != null) { sc.setParameters("clusterId", clusterId); } - + if (hostId != null) { sc.setParameters("hostId", hostId); } - + return remove(sc) > 0; } - + @Override public Pair, Map> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){ Transaction txn = Transaction.currentTxn(); @@ -639,7 +637,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements Map clusterCapacityMap = new HashMap(); StringBuilder sql = new StringBuilder(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1); - + if(isZone){ sql.append("data_center_id = ?"); }else{ @@ -708,13 +706,13 @@ public class CapacityDaoImpl extends GenericDaoBase implements PreparedStatement pstmt = null; List result = new ArrayList(); Map podCapacityMap = new HashMap(); - + StringBuilder sql = new StringBuilder(ORDER_PODS_BY_AGGREGATE_CAPACITY); try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(2, zoneId); pstmt.setShort(3, capacityTypeForOrdering); - + if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){ pstmt.setFloat(1, cpuOverprovisioningFactor); pstmt.setFloat(4, cpuOverprovisioningFactor); @@ -722,7 +720,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements pstmt.setFloat(1, 1); pstmt.setFloat(4, 1); } - + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { Long podId = rs.getLong(1); @@ -736,13 +734,13 @@ public class CapacityDaoImpl extends GenericDaoBase implements throw new CloudRuntimeException("Caught: " + sql, e); } } - + @Override public void updateCapacityState(Long dcId, Long podId, Long clusterId, Long hostId, String capacityState) { Transaction txn = Transaction.currentTxn(); StringBuilder sql = new StringBuilder(UPDATE_CAPACITY_STATE); List resourceIdList = new ArrayList(); - + if (dcId != null){ sql.append(" data_center_id = ?"); resourceIdList.add(dcId); @@ -759,7 +757,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements sql.append(" host_id = ?"); resourceIdList.add(hostId); } - + PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); diff --git a/server/src/com/cloud/cluster/CheckPointManager.java b/server/src/com/cloud/cluster/CheckPointManager.java deleted file mode 100644 index b6333e6c4fa..00000000000 --- a/server/src/com/cloud/cluster/CheckPointManager.java +++ /dev/null @@ -1,52 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.cluster; - - -/** - * TaskManager helps business logic deal with clustering failover. - * Say you're writing code that introduces an inconsistent state over - * of your operation? Who will come back to cleanup this state? TaskManager - * with different content during your process. If the server dies, TaskManager - * running elsewhere. If there are no clustered servers, then TaskManager will - * cleanup when the dead server resumes. - * - */ -public interface CheckPointManager { - /** - * responsible for cleaning up. - * - * @param context context information to be stored. - * @return Check point id. - */ - long pushCheckPoint(CleanupMaid context); - - /** - * update the task with new context - * @param taskId - * @param updatedContext new updated context. - */ - void updateCheckPointState(long taskId, CleanupMaid updatedContext); - - - /** - * removes the task as it is completed. - * - * @param taskId - */ - void popCheckPoint(long taskId); -} diff --git a/server/src/com/cloud/cluster/CheckPointManagerImpl.java b/server/src/com/cloud/cluster/CheckPointManagerImpl.java deleted file mode 100644 index e02a2f0dcc2..00000000000 --- a/server/src/com/cloud/cluster/CheckPointManagerImpl.java +++ /dev/null @@ -1,247 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.cluster; - -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import com.cloud.cluster.dao.StackMaidDao; -import com.cloud.configuration.Config; -import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.serializer.SerializerHelper; -import com.cloud.utils.DateUtil; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.component.Manager; -import com.cloud.utils.concurrency.NamedThreadFactory; -import com.cloud.utils.db.DB; -import com.cloud.utils.db.GlobalLock; - -@Component -@Local(value=CheckPointManager.class) -public class CheckPointManagerImpl implements CheckPointManager, Manager, ClusterManagerListener { - private static final Logger s_logger = Logger.getLogger(CheckPointManagerImpl.class); - - private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 seconds - private int _cleanupRetryInterval; - - private String _name; - - @Inject - private StackMaidDao _maidDao; - - @Inject - private ClusterManager _clusterMgr; - - @Inject ConfigurationDao _configDao; - - long _msId; - - private final ScheduledExecutorService _cleanupScheduler = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Task-Cleanup")); - - protected CheckPointManagerImpl() { - } - - @Override - public boolean configure(String name, Map xmlParams) throws ConfigurationException { - _name = name; - - if (s_logger.isInfoEnabled()) { - s_logger.info("Start configuring StackMaidManager : " + name); - } - - StackMaid.init(ManagementServerNode.getManagementServerId()); - _msId = ManagementServerNode.getManagementServerId(); - - _clusterMgr.registerListener(this); - - Map params = _configDao.getConfiguration(xmlParams); - - _cleanupRetryInterval = NumbersUtil.parseInt(params.get(Config.TaskCleanupRetryInterval.key()), 600); - _maidDao.takeover(_msId, _msId); - return true; - } - - private void cleanupLeftovers(List l) { - for (CheckPointVO maid : l) { - if (StackMaid.doCleanup(maid)) { - _maidDao.expunge(maid.getId()); - } - } - } - - @Override - public void onManagementNodeIsolated() { - } - - @DB - private Runnable getGCTask() { - return new Runnable() { - @Override - public void run() { - GlobalLock scanLock = GlobalLock.getInternLock("StackMaidManagerGC"); - try { - if (scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { - try { - reallyRun(); - } finally { - scanLock.unlock(); - } - } - } finally { - scanLock.releaseRef(); - } - } - - public void reallyRun() { - try { - Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - 7200000); - List l = _maidDao.listLeftoversByCutTime(cutTime); - cleanupLeftovers(l); - } catch (Throwable e) { - s_logger.error("Unexpected exception when trying to execute queue item, ", e); - } - } - }; - } - - @Override - public boolean start() { - _cleanupScheduler.schedule(new CleanupTask(), _cleanupRetryInterval > 0 ? _cleanupRetryInterval : 600, TimeUnit.SECONDS); - return true; - } - - @Override - public boolean stop() { - return true; - } - - @Override - public String getName() { - return _name; - } - - @Override - public void onManagementNodeJoined(List nodeList, long selfNodeId) { - // Nothing to do - } - - @Override - public void onManagementNodeLeft(List nodeList, long selfNodeId) { - for (ManagementServerHostVO node : nodeList) { - if (_maidDao.takeover(node.getMsid(), selfNodeId)) { - s_logger.info("Taking over from " + node.getMsid()); - _cleanupScheduler.execute(new CleanupTask()); - } - } - } - - @Override - @DB - public long pushCheckPoint(CleanupMaid context) { - long seq = _maidDao.pushCleanupDelegate(_msId, 0, context.getClass().getName(), context); - return seq; - } - - @Override - @DB - public void updateCheckPointState(long taskId, CleanupMaid updatedContext) { - CheckPointVO task = _maidDao.createForUpdate(); - task.setDelegate(updatedContext.getClass().getName()); - task.setContext(SerializerHelper.toSerializedStringOld(updatedContext)); - _maidDao.update(taskId, task); - } - - @Override - @DB - public void popCheckPoint(long taskId) { - _maidDao.remove(taskId); - } - - protected boolean cleanup(CheckPointVO task) { - s_logger.info("Cleaning up " + task); - CleanupMaid delegate = (CleanupMaid)SerializerHelper.fromSerializedString(task.getContext()); - assert delegate.getClass().getName().equals(task.getDelegate()) : "Deserializer says " + delegate.getClass().getName() + " but it's suppose to be " + task.getDelegate(); - - int result = delegate.cleanup(this); - if (result <= 0) { - if (result == 0) { - s_logger.info("Successfully cleaned up " + task.getId()); - } else { - s_logger.warn("Unsuccessful in cleaning up " + task + ". Procedure to cleanup manaully: " + delegate.getCleanupProcedure()); - } - popCheckPoint(task.getId()); - return true; - } else { - s_logger.error("Unable to cleanup " + task.getId()); - return false; - } - } - - class CleanupTask implements Runnable { - private Date _curDate; - public CleanupTask() { - _curDate = new Date(); - } - - @Override - public void run() { - try { - List tasks = _maidDao.listLeftoversByCutTime(_curDate, _msId); - tasks.addAll(_maidDao.listCleanupTasks(_msId)); - - List retries = new ArrayList(); - - for (CheckPointVO task : tasks) { - try { - if (!cleanup(task)) { - retries.add(task); - } - } catch (Exception e) { - s_logger.warn("Unable to clean up " + task, e); - - } - } - - if (retries.size() > 0) { - if (_cleanupRetryInterval > 0) { - _cleanupScheduler.schedule(this, _cleanupRetryInterval, TimeUnit.SECONDS); - } else { - for (CheckPointVO task : retries) { - s_logger.warn("Cleanup procedure for " + task + ": " + ((CleanupMaid)SerializerHelper.fromSerializedString(task.getContext())).getCleanupProcedure()); - } - } - } - - } catch (Exception e) { - s_logger.error("Unable to cleanup all of the tasks for " + _msId, e); - } - } - } -} diff --git a/server/src/com/cloud/cluster/ClusterManagerImpl.java b/server/src/com/cloud/cluster/ClusterManagerImpl.java index 013034f08a5..f71a8665951 100755 --- a/server/src/com/cloud/cluster/ClusterManagerImpl.java +++ b/server/src/com/cloud/cluster/ClusterManagerImpl.java @@ -29,7 +29,6 @@ import java.sql.SQLException; import java.sql.SQLRecoverableException; import java.util.ArrayList; import java.util.Date; -import java.util.Enumeration; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -75,8 +74,6 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Profiler; import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.component.Adapters; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.ConnectionConcierge; import com.cloud.utils.db.DB; @@ -123,7 +120,7 @@ public class ClusterManagerImpl implements ClusterManager { private final ExecutorService _executor; private ClusterServiceAdapter _currentServiceAdapter; - + @Inject private List _serviceAdapters; @@ -149,11 +146,11 @@ public class ClusterManagerImpl implements ClusterManager { private boolean _agentLBEnabled = false; private double _connectedAgentsThreshold = 0.7; private static boolean _agentLbHappened = false; - - private List _clusterPduOutgoingQueue = new ArrayList(); - private List _clusterPduIncomingQueue = new ArrayList(); - private Map _outgoingPdusWaitingForAck = new HashMap(); - + + private final List _clusterPduOutgoingQueue = new ArrayList(); + private final List _clusterPduIncomingQueue = new ArrayList(); + private final Map _outgoingPdusWaitingForAck = new HashMap(); + public ClusterManagerImpl() { _clusterPeers = new HashMap(); @@ -164,13 +161,13 @@ public class ClusterManagerImpl implements ClusterManager { // _executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cluster-Worker")); } - + private void registerRequestPdu(ClusterServiceRequestPdu pdu) { synchronized(_outgoingPdusWaitingForAck) { _outgoingPdusWaitingForAck.put(pdu.getSequenceId(), pdu); } } - + private ClusterServiceRequestPdu popRequestPdu(long ackSequenceId) { synchronized(_outgoingPdusWaitingForAck) { if(_outgoingPdusWaitingForAck.get(ackSequenceId) != null) { @@ -179,10 +176,10 @@ public class ClusterManagerImpl implements ClusterManager { return pdu; } } - + return null; } - + private void cancelClusterRequestToPeer(String strPeer) { List candidates = new ArrayList(); synchronized(_outgoingPdusWaitingForAck) { @@ -195,7 +192,7 @@ public class ClusterManagerImpl implements ClusterManager { _outgoingPdusWaitingForAck.remove(pdu.getSequenceId()); } } - + for(ClusterServiceRequestPdu pdu : candidates) { s_logger.warn("Cancel cluster request PDU to peer: " + strPeer + ", pdu: " + _gson.toJson(pdu)); synchronized(pdu) { @@ -203,76 +200,78 @@ public class ClusterManagerImpl implements ClusterManager { } } } - + private void addOutgoingClusterPdu(ClusterServicePdu pdu) { - synchronized(_clusterPduOutgoingQueue) { - _clusterPduOutgoingQueue.add(pdu); - _clusterPduOutgoingQueue.notifyAll(); - } + synchronized(_clusterPduOutgoingQueue) { + _clusterPduOutgoingQueue.add(pdu); + _clusterPduOutgoingQueue.notifyAll(); + } } - + private ClusterServicePdu popOutgoingClusterPdu(long timeoutMs) { - synchronized(_clusterPduOutgoingQueue) { - try { - _clusterPduOutgoingQueue.wait(timeoutMs); - } catch (InterruptedException e) { - } - - if(_clusterPduOutgoingQueue.size() > 0) { - ClusterServicePdu pdu = _clusterPduOutgoingQueue.get(0); - _clusterPduOutgoingQueue.remove(0); - return pdu; - } - } - return null; + synchronized(_clusterPduOutgoingQueue) { + try { + _clusterPduOutgoingQueue.wait(timeoutMs); + } catch (InterruptedException e) { + } + + if(_clusterPduOutgoingQueue.size() > 0) { + ClusterServicePdu pdu = _clusterPduOutgoingQueue.get(0); + _clusterPduOutgoingQueue.remove(0); + return pdu; + } + } + return null; } private void addIncomingClusterPdu(ClusterServicePdu pdu) { - synchronized(_clusterPduIncomingQueue) { - _clusterPduIncomingQueue.add(pdu); - _clusterPduIncomingQueue.notifyAll(); - } + synchronized(_clusterPduIncomingQueue) { + _clusterPduIncomingQueue.add(pdu); + _clusterPduIncomingQueue.notifyAll(); + } } - + private ClusterServicePdu popIncomingClusterPdu(long timeoutMs) { - synchronized(_clusterPduIncomingQueue) { - try { - _clusterPduIncomingQueue.wait(timeoutMs); - } catch (InterruptedException e) { - } - - if(_clusterPduIncomingQueue.size() > 0) { - ClusterServicePdu pdu = _clusterPduIncomingQueue.get(0); - _clusterPduIncomingQueue.remove(0); - return pdu; - } - } - return null; + synchronized(_clusterPduIncomingQueue) { + try { + _clusterPduIncomingQueue.wait(timeoutMs); + } catch (InterruptedException e) { + } + + if(_clusterPduIncomingQueue.size() > 0) { + ClusterServicePdu pdu = _clusterPduIncomingQueue.get(0); + _clusterPduIncomingQueue.remove(0); + return pdu; + } + } + return null; } - + private Runnable getClusterPduSendingTask() { return new Runnable() { + @Override public void run() { onSendingClusterPdu(); } }; } - + private Runnable getClusterPduNotificationTask() { return new Runnable() { + @Override public void run() { onNotifyingClusterPdu(); } }; } - + private void onSendingClusterPdu() { while(true) { try { ClusterServicePdu pdu = popOutgoingClusterPdu(1000); if(pdu == null) - continue; - + continue; + ClusterService peerService = null; for(int i = 0; i < 2; i++) { try { @@ -285,20 +284,20 @@ public class ClusterManagerImpl implements ClusterManager { try { if(s_logger.isDebugEnabled()) { s_logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + ". agent: " + pdu.getAgentId() - + ", pdu seq: " + pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage()); + + ", pdu seq: " + pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage()); } long startTick = System.currentTimeMillis(); String strResult = peerService.execute(pdu); if(s_logger.isDebugEnabled()) { s_logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + " completed. time: " + - (System.currentTimeMillis() - startTick) + "ms. agent: " + pdu.getAgentId() - + ", pdu seq: " + pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage()); + (System.currentTimeMillis() - startTick) + "ms. agent: " + pdu.getAgentId() + + ", pdu seq: " + pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage()); } - + if("true".equals(strResult)) break; - + } catch (RemoteException e) { invalidatePeerService(pdu.getDestPeer()); if(s_logger.isInfoEnabled()) { @@ -313,50 +312,51 @@ public class ClusterManagerImpl implements ClusterManager { } } } - + private void onNotifyingClusterPdu() { while(true) { try { final ClusterServicePdu pdu = popIncomingClusterPdu(1000); if(pdu == null) - continue; + continue; _executor.execute(new Runnable() { - public void run() { - if(pdu.getPduType() == ClusterServicePdu.PDU_TYPE_RESPONSE) { - ClusterServiceRequestPdu requestPdu = popRequestPdu(pdu.getAckSequenceId()); - if(requestPdu != null) { - requestPdu.setResponseResult(pdu.getJsonPackage()); - synchronized(requestPdu) { - requestPdu.notifyAll(); - } - } else { - s_logger.warn("Original request has already been cancelled. pdu: " + _gson.toJson(pdu)); - } - } else { - String result = dispatchClusterServicePdu(pdu); - if(result == null) - result = ""; - - if(pdu.getPduType() == ClusterServicePdu.PDU_TYPE_REQUEST) { - ClusterServicePdu responsePdu = new ClusterServicePdu(); - responsePdu.setPduType(ClusterServicePdu.PDU_TYPE_RESPONSE); - responsePdu.setSourcePeer(pdu.getDestPeer()); - responsePdu.setDestPeer(pdu.getSourcePeer()); - responsePdu.setAckSequenceId(pdu.getSequenceId()); - responsePdu.setJsonPackage(result); - - addOutgoingClusterPdu(responsePdu); - } - } - } + @Override + public void run() { + if(pdu.getPduType() == ClusterServicePdu.PDU_TYPE_RESPONSE) { + ClusterServiceRequestPdu requestPdu = popRequestPdu(pdu.getAckSequenceId()); + if(requestPdu != null) { + requestPdu.setResponseResult(pdu.getJsonPackage()); + synchronized(requestPdu) { + requestPdu.notifyAll(); + } + } else { + s_logger.warn("Original request has already been cancelled. pdu: " + _gson.toJson(pdu)); + } + } else { + String result = dispatchClusterServicePdu(pdu); + if(result == null) + result = ""; + + if(pdu.getPduType() == ClusterServicePdu.PDU_TYPE_REQUEST) { + ClusterServicePdu responsePdu = new ClusterServicePdu(); + responsePdu.setPduType(ClusterServicePdu.PDU_TYPE_RESPONSE); + responsePdu.setSourcePeer(pdu.getDestPeer()); + responsePdu.setDestPeer(pdu.getSourcePeer()); + responsePdu.setAckSequenceId(pdu.getSequenceId()); + responsePdu.setJsonPackage(result); + + addOutgoingClusterPdu(responsePdu); + } + } + } }); } catch(Throwable e) { s_logger.error("Unexcpeted exception: ", e); } } } - + private String dispatchClusterServicePdu(ClusterServicePdu pdu) { if(s_logger.isDebugEnabled()) { @@ -370,7 +370,7 @@ public class ClusterManagerImpl implements ClusterManager { assert(false); s_logger.error("Excection in gson decoding : ", e); } - + if (cmds.length == 1 && cmds[0] instanceof ChangeAgentCommand) { //intercepted ChangeAgentCommand cmd = (ChangeAgentCommand)cmds[0]; @@ -416,22 +416,22 @@ public class ClusterManagerImpl implements ClusterManager { answers[0] = new Answer(cmd, result, null); return _gson.toJson(answers); } else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand ) { - PropagateResourceEventCommand cmd = (PropagateResourceEventCommand) cmds[0]; - - s_logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId()); - - boolean result = false; - try { - result = executeResourceUserRequest(cmd.getHostId(), cmd.getEvent()); - s_logger.debug("Result is " + result); - } catch (AgentUnavailableException ex) { - s_logger.warn("Agent is unavailable", ex); - return null; - } - - Answer[] answers = new Answer[1]; - answers[0] = new Answer(cmd, result, null); - return _gson.toJson(answers); + PropagateResourceEventCommand cmd = (PropagateResourceEventCommand) cmds[0]; + + s_logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId()); + + boolean result = false; + try { + result = executeResourceUserRequest(cmd.getHostId(), cmd.getEvent()); + s_logger.debug("Result is " + result); + } catch (AgentUnavailableException ex) { + s_logger.warn("Agent is unavailable", ex); + return null; + } + + Answer[] answers = new Answer[1]; + answers[0] = new Answer(cmd, result, null); + return _gson.toJson(answers); } try { @@ -461,14 +461,15 @@ public class ClusterManagerImpl implements ClusterManager { } catch (OperationTimedoutException e) { s_logger.warn("Timed Out", e); } - + return null; } + @Override public void OnReceiveClusterServicePdu(ClusterServicePdu pdu) { - addIncomingClusterPdu(pdu); + addIncomingClusterPdu(pdu); } - + @Override public Answer[] sendToAgent(Long hostId, Command[] cmds, boolean stopOnError) throws AgentUnavailableException, OperationTimedoutException { Commands commands = new Commands(stopOnError ? OnError.Stop : OnError.Continue); @@ -558,7 +559,7 @@ public class ClusterManagerImpl implements ClusterManager { s_logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " " + _gson.toJson(cmds, Command[].class)); } - + ClusterServiceRequestPdu pdu = new ClusterServiceRequestPdu(); pdu.setSourcePeer(getSelfPeerName()); pdu.setDestPeer(strPeer); @@ -567,7 +568,7 @@ public class ClusterManagerImpl implements ClusterManager { pdu.setStopOnError(stopOnError); registerRequestPdu(pdu); addOutgoingClusterPdu(pdu); - + synchronized(pdu) { try { pdu.wait(); @@ -577,9 +578,9 @@ public class ClusterManagerImpl implements ClusterManager { if(s_logger.isDebugEnabled()) { s_logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " completed. result: " + - pdu.getResponseResult()); + pdu.getResponseResult()); } - + if(pdu.getResponseResult() != null && pdu.getResponseResult().length() > 0) { try { return _gson.fromJson(pdu.getResponseResult(), Answer[].class); @@ -590,7 +591,7 @@ public class ClusterManagerImpl implements ClusterManager { return null; } - + @Override public String getPeerName(long agentHostId) { @@ -625,18 +626,18 @@ public class ClusterManagerImpl implements ClusterManager { // Note : we don't check duplicates synchronized (_listeners) { - s_logger.info("register cluster listener " + listener.getClass()); - - _listeners.add(listener); + s_logger.info("register cluster listener " + listener.getClass()); + + _listeners.add(listener); } } @Override public void unregisterListener(ClusterManagerListener listener) { synchronized(_listeners) { - s_logger.info("unregister cluster listener " + listener.getClass()); - - _listeners.remove(listener); + s_logger.info("unregister cluster listener " + listener.getClass()); + + _listeners.remove(listener); } } @@ -663,7 +664,7 @@ public class ClusterManagerImpl implements ClusterManager { if(s_logger.isDebugEnabled()) { s_logger.debug("Notify management server node left to listeners."); } - + for(ManagementServerHostVO mshost : nodeList) { if(s_logger.isDebugEnabled()) s_logger.debug("Leaving node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); @@ -731,32 +732,32 @@ public class ClusterManagerImpl implements ClusterManager { Profiler profilerHeartbeatUpdate = new Profiler(); Profiler profilerPeerScan = new Profiler(); Profiler profilerAgentLB = new Profiler(); - + try { profiler.start(); - + profilerHeartbeatUpdate.start(); txn.transitToUserManagedConnection(getHeartbeatConnection()); if(s_logger.isTraceEnabled()) { s_logger.trace("Cluster manager heartbeat update, id:" + _mshostId); } - + _mshostDao.update(_mshostId, getCurrentRunId(), DateUtil.currentGMTTime()); profilerHeartbeatUpdate.stop(); - + profilerPeerScan.start(); if (s_logger.isTraceEnabled()) { s_logger.trace("Cluster manager peer-scan, id:" + _mshostId); } - + if (!_peerScanInited) { _peerScanInited = true; initPeerScan(); } - + peerScan(); profilerPeerScan.stop(); - + profilerAgentLB.start(); //initiate agent lb task will be scheduled and executed only once, and only when number of agents loaded exceeds _connectedAgentsThreshold if (_agentLBEnabled && !_agentLbHappened) { @@ -764,7 +765,7 @@ public class ClusterManagerImpl implements ClusterManager { sc.addAnd(sc.getEntity().getManagementServerId(), Op.NNULL); sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.Routing); List allManagedRoutingAgents = sc.list(); - + sc = SearchCriteria2.create(HostVO.class); sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.Routing); List allAgents = sc.list(); @@ -784,16 +785,16 @@ public class ClusterManagerImpl implements ClusterManager { profilerAgentLB.stop(); } finally { profiler.stop(); - + if(profiler.getDuration() >= _heartbeatInterval) { if(s_logger.isDebugEnabled()) s_logger.debug("Management server heartbeat takes too long to finish. profiler: " + profiler.toString() + - ", profilerHeartbeatUpdate: " + profilerHeartbeatUpdate.toString() + - ", profilerPeerScan: " + profilerPeerScan.toString() + - ", profilerAgentLB: " + profilerAgentLB.toString()); + ", profilerHeartbeatUpdate: " + profilerHeartbeatUpdate.toString() + + ", profilerPeerScan: " + profilerPeerScan.toString() + + ", profilerAgentLB: " + profilerAgentLB.toString()); } } - + } catch(CloudRuntimeException e) { s_logger.error("Runtime DB exception ", e.getCause()); @@ -933,33 +934,33 @@ public class ClusterManagerImpl implements ClusterManager { this._notificationMsgs.add(msg); this._notificationMsgs.notifyAll(); } - + switch(msg.getMessageType()) { case nodeAdded: - { - List l = msg.getNodes(); - if(l != null && l.size() > 0) { - for(ManagementServerHostVO mshost: l) { - _mshostPeerDao.updatePeerInfo(_mshostId, mshost.getId(), mshost.getRunid(), ManagementServerHost.State.Up); - } + { + List l = msg.getNodes(); + if(l != null && l.size() > 0) { + for(ManagementServerHostVO mshost: l) { + _mshostPeerDao.updatePeerInfo(_mshostId, mshost.getId(), mshost.getRunid(), ManagementServerHost.State.Up); } } - break; - + } + break; + case nodeRemoved: - { - List l = msg.getNodes(); - if(l != null && l.size() > 0) { - for(ManagementServerHostVO mshost: l) { - _mshostPeerDao.updatePeerInfo(_mshostId, mshost.getId(), mshost.getRunid(), ManagementServerHost.State.Down); - } + { + List l = msg.getNodes(); + if(l != null && l.size() > 0) { + for(ManagementServerHostVO mshost: l) { + _mshostPeerDao.updatePeerInfo(_mshostId, mshost.getId(), mshost.getRunid(), ManagementServerHost.State.Down); } } - break; - + } + break; + default : break; - + } } @@ -978,39 +979,39 @@ public class ClusterManagerImpl implements ClusterManager { // missed cleanup Date cutTime = DateUtil.currentGMTTime(); List inactiveList = _mshostDao.getInactiveList(new Date(cutTime.getTime() - _heartbeatThreshold)); - + // We don't have foreign key constraints to enforce the mgmt_server_id integrity in host table, when user manually // remove records from mshost table, this will leave orphan mgmt_serve_id reference in host table. List orphanList = _mshostDao.listOrphanMsids(); if(orphanList.size() > 0) { - for(Long orphanMsid : orphanList) { - // construct fake ManagementServerHostVO based on orphan MSID - s_logger.info("Add orphan management server msid found in host table to initial clustering notification, orphan msid: " + orphanMsid); - inactiveList.add(new ManagementServerHostVO(orphanMsid, 0, "orphan", 0, new Date())); - } - } else { - s_logger.info("We are good, no orphan management server msid in host table is found"); - } - - if(inactiveList.size() > 0) { - if(s_logger.isInfoEnabled()) { - s_logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp"); - for(ManagementServerHostVO host : inactiveList) - s_logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() + ", version: " + host.getVersion()); - } - - List downHostList = new ArrayList(); - for(ManagementServerHostVO host : inactiveList) { - if(!pingManagementNode(host)) { - s_logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and also not pingable"); - downHostList.add(host); - } + for(Long orphanMsid : orphanList) { + // construct fake ManagementServerHostVO based on orphan MSID + s_logger.info("Add orphan management server msid found in host table to initial clustering notification, orphan msid: " + orphanMsid); + inactiveList.add(new ManagementServerHostVO(orphanMsid, 0, "orphan", 0, new Date())); } - - if(downHostList.size() > 0) - this.queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeRemoved, downHostList)); } else { - s_logger.info("No inactive management server node found"); + s_logger.info("We are good, no orphan management server msid in host table is found"); + } + + if(inactiveList.size() > 0) { + if(s_logger.isInfoEnabled()) { + s_logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp"); + for(ManagementServerHostVO host : inactiveList) + s_logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() + ", version: " + host.getVersion()); + } + + List downHostList = new ArrayList(); + for(ManagementServerHostVO host : inactiveList) { + if(!pingManagementNode(host)) { + s_logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and also not pingable"); + downHostList.add(host); + } + } + + if(downHostList.size() > 0) + this.queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeRemoved, downHostList)); + } else { + s_logger.info("No inactive management server node found"); } } @@ -1019,7 +1020,7 @@ public class ClusterManagerImpl implements ClusterManager { Profiler profiler = new Profiler(); profiler.start(); - + Profiler profilerQueryActiveList = new Profiler(); profilerQueryActiveList.start(); List currentList = _mshostDao.getActiveList(new Date(cutTime.getTime() - _heartbeatThreshold)); @@ -1031,13 +1032,13 @@ public class ClusterManagerImpl implements ClusterManager { List invalidatedNodeList = new ArrayList(); if(_mshostId != null) { - + if(_mshostPeerDao.countStateSeenInPeers(_mshostId, _runId, ManagementServerHost.State.Down) > 0) { String msg = "We have detected that at least one management server peer reports that this management server is down, perform active fencing to avoid split-brain situation"; s_logger.error(msg); throw new ActiveFencingException(msg); } - + // only if we have already attached to cluster, will we start to check leaving nodes for(Map.Entry entry : _activePeers.entrySet()) { @@ -1070,7 +1071,7 @@ public class ClusterManagerImpl implements ClusterManager { } } profilerSyncClusterInfo.stop(); - + Profiler profilerInvalidatedNodeList = new Profiler(); profilerInvalidatedNodeList.start(); // process invalidated node list @@ -1134,16 +1135,16 @@ public class ClusterManagerImpl implements ClusterManager { if(newNodeList.size() > 0) { this.queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeAdded, newNodeList)); } - + profiler.stop(); - + if(profiler.getDuration() >= this._heartbeatInterval) { if(s_logger.isDebugEnabled()) s_logger.debug("Peer scan takes too long to finish. profiler: " + profiler.toString() - + ", profilerQueryActiveList: " + profilerQueryActiveList.toString() - + ", profilerSyncClusterInfo: " + profilerSyncClusterInfo.toString() - + ", profilerInvalidatedNodeList: " + profilerInvalidatedNodeList.toString() - + ", profilerRemovedList: " + profilerRemovedList.toString()); + + ", profilerQueryActiveList: " + profilerQueryActiveList.toString() + + ", profilerSyncClusterInfo: " + profilerSyncClusterInfo.toString() + + ", profilerInvalidatedNodeList: " + profilerInvalidatedNodeList.toString() + + ", profilerRemovedList: " + profilerRemovedList.toString()); } } @@ -1206,7 +1207,7 @@ public class ClusterManagerImpl implements ClusterManager { if (s_logger.isInfoEnabled()) { s_logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort()); } - + _mshostPeerDao.clearPeerInfo(_mshostId); // use seperate thread for heartbeat updates @@ -1294,8 +1295,8 @@ public class ClusterManagerImpl implements ClusterManager { } for(int i = 0; i < DEFAULT_OUTGOING_WORKERS; i++) - _executor.execute(getClusterPduSendingTask()); - + _executor.execute(getClusterPduSendingTask()); + // notification task itself in turn works as a task dispatcher _executor.execute(getClusterPduNotificationTask()); @@ -1309,9 +1310,9 @@ public class ClusterManagerImpl implements ClusterManager { } _agentLBEnabled = Boolean.valueOf(_configDao.getValue(Config.AgentLbEnable.key())); - + String connectedAgentsThreshold = configs.get("agent.load.threshold"); - + if (connectedAgentsThreshold != null) { _connectedAgentsThreshold = Double.parseDouble(connectedAgentsThreshold); } @@ -1365,7 +1366,7 @@ public class ClusterManagerImpl implements ClusterManager { s_logger.info("ping management node cluster service can not be performed on self"); return false; } - + int retry = 10; while (--retry > 0) { SocketChannel sch = null; @@ -1381,7 +1382,7 @@ public class ClusterManagerImpl implements ClusterManager { } catch (IOException e) { if (e instanceof ConnectException) { s_logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " due to ConnectException", e); - return false; + return false; } } finally { if (sch != null) { @@ -1397,7 +1398,7 @@ public class ClusterManagerImpl implements ClusterManager { } catch (InterruptedException ex) { } } - + s_logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " after retries"); return false; } @@ -1455,7 +1456,7 @@ public class ClusterManagerImpl implements ClusterManager { public boolean isAgentRebalanceEnabled() { return _agentLBEnabled; } - + @Override public Boolean propagateResourceEvent(long agentId, ResourceState.Event event) throws AgentUnavailableException { final String msPeer = getPeerName(agentId); @@ -1480,7 +1481,7 @@ public class ClusterManagerImpl implements ClusterManager { return answers[0].getResult(); } - + @Override public boolean executeResourceUserRequest(long hostId, ResourceState.Event event) throws AgentUnavailableException { return _resourceMgr.executeUserRequest(hostId, event); diff --git a/server/src/com/cloud/cluster/StackMaid.java b/server/src/com/cloud/cluster/StackMaid.java deleted file mode 100644 index b84d73d4219..00000000000 --- a/server/src/com/cloud/cluster/StackMaid.java +++ /dev/null @@ -1,153 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.cluster; - -import java.util.HashMap; -import java.util.Map; - -import org.apache.log4j.Logger; - -import com.cloud.cluster.dao.StackMaidDao; -import com.cloud.cluster.dao.StackMaidDaoImpl; -import com.cloud.serializer.SerializerHelper; -import com.cloud.utils.CleanupDelegate; -import com.cloud.utils.db.Transaction; - -public class StackMaid { - protected final static Logger s_logger = Logger.getLogger(StackMaid.class); - - private static ThreadLocal threadMaid = new ThreadLocal(); - - private static long msid_setby_manager = 0; - - private StackMaidDao maidDao = new StackMaidDaoImpl(); - private int currentSeq = 0; - private Map context = new HashMap(); - - public static void init(long msid) { - msid_setby_manager = msid; - } - - public static StackMaid current() { - StackMaid maid = threadMaid.get(); - if(maid == null) { - maid = new StackMaid(); - threadMaid.set(maid); - } - return maid; - } - - public void registerContext(String key, Object contextObject) { - assert(!context.containsKey(key)) : "Context key has already been registered"; - context.put(key, contextObject); - } - - public Object getContext(String key) { - return context.get(key); - } - - public void expungeMaidItem(long maidId) { - // this is a bit ugly, but when it is not loaded by component locator, this is just a workable way for now - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - try { - maidDao.expunge(maidId); - } finally { - txn.close(); - } - } - - public int push(String delegateClzName, Object context) { - assert(msid_setby_manager != 0) : "Fatal, make sure StackMaidManager is loaded"; - if(msid_setby_manager == 0) - s_logger.error("Fatal, make sure StackMaidManager is loaded"); - - return push(msid_setby_manager, delegateClzName, context); - } - - public int push(long currentMsid, String delegateClzName, Object context) { - int savePoint = currentSeq; - maidDao.pushCleanupDelegate(currentMsid, currentSeq++, delegateClzName, context); - return savePoint; - } - - public void pop(int savePoint) { - assert(msid_setby_manager != 0) : "Fatal, make sure StackMaidManager is loaded"; - if(msid_setby_manager == 0) - s_logger.error("Fatal, make sure StackMaidManager is loaded"); - - pop(msid_setby_manager, savePoint); - } - - public void pop() { - if(currentSeq > 0) - pop(currentSeq -1); - } - - /** - * must be called within thread context - * @param currentMsid - */ - public void pop(long currentMsid, int savePoint) { - while(currentSeq > savePoint) { - maidDao.popCleanupDelegate(currentMsid); - currentSeq--; - } - } - - public void exitCleanup() { - exitCleanup(msid_setby_manager); - } - - public void exitCleanup(long currentMsid) { - if(currentSeq > 0) { - CheckPointVO maid = null; - while((maid = maidDao.popCleanupDelegate(currentMsid)) != null) { - doCleanup(maid); - } - currentSeq = 0; - } - - context.clear(); - } - - public static boolean doCleanup(CheckPointVO maid) { - if(maid.getDelegate() != null) { - try { - Class clz = Class.forName(maid.getDelegate()); - Object delegate = clz.newInstance(); - if(delegate instanceof CleanupDelegate) { - return ((CleanupDelegate)delegate).cleanup(SerializerHelper.fromSerializedString(maid.getContext()), maid); - } else { - assert(false); - } - } catch (final ClassNotFoundException e) { - s_logger.error("Unable to load StackMaid delegate class: " + maid.getDelegate(), e); - } catch (final SecurityException e) { - s_logger.error("Security excetion when loading resource: " + maid.getDelegate()); - } catch (final IllegalArgumentException e) { - s_logger.error("Illegal argument excetion when loading resource: " + maid.getDelegate()); - } catch (final InstantiationException e) { - s_logger.error("Instantiation excetion when loading resource: " + maid.getDelegate()); - } catch (final IllegalAccessException e) { - s_logger.error("Illegal access exception when loading resource: " + maid.getDelegate()); - } - - return false; - } - return true; - } -} diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index bb943c96600..a8c1743be63 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -39,30 +39,32 @@ import javax.naming.NamingException; import javax.naming.directory.DirContext; import javax.naming.directory.InitialDirContext; +import org.apache.cloudstack.acl.SecurityChecker; +import org.apache.cloudstack.api.ApiConstants.LDAPParams; import org.apache.cloudstack.api.command.admin.config.UpdateCfgCmd; import org.apache.cloudstack.api.command.admin.ldap.LDAPConfigCmd; import org.apache.cloudstack.api.command.admin.ldap.LDAPRemoveCmd; -import org.apache.cloudstack.api.command.admin.network.DeleteNetworkOfferingCmd; import org.apache.cloudstack.api.command.admin.network.CreateNetworkOfferingCmd; +import org.apache.cloudstack.api.command.admin.network.DeleteNetworkOfferingCmd; import org.apache.cloudstack.api.command.admin.network.UpdateNetworkOfferingCmd; import org.apache.cloudstack.api.command.admin.offering.CreateDiskOfferingCmd; -import org.apache.cloudstack.api.command.admin.offering.*; +import org.apache.cloudstack.api.command.admin.offering.CreateServiceOfferingCmd; +import org.apache.cloudstack.api.command.admin.offering.DeleteDiskOfferingCmd; +import org.apache.cloudstack.api.command.admin.offering.DeleteServiceOfferingCmd; +import org.apache.cloudstack.api.command.admin.offering.UpdateDiskOfferingCmd; +import org.apache.cloudstack.api.command.admin.offering.UpdateServiceOfferingCmd; import org.apache.cloudstack.api.command.admin.pod.DeletePodCmd; import org.apache.cloudstack.api.command.admin.pod.UpdatePodCmd; import org.apache.cloudstack.api.command.admin.vlan.CreateVlanIpRangeCmd; +import org.apache.cloudstack.api.command.admin.vlan.DeleteVlanIpRangeCmd; import org.apache.cloudstack.api.command.admin.zone.CreateZoneCmd; import org.apache.cloudstack.api.command.admin.zone.DeleteZoneCmd; import org.apache.cloudstack.api.command.admin.zone.UpdateZoneCmd; -import org.apache.cloudstack.api.command.admin.offering.CreateServiceOfferingCmd; -import org.apache.cloudstack.api.command.admin.offering.DeleteServiceOfferingCmd; -import org.apache.cloudstack.api.command.admin.vlan.DeleteVlanIpRangeCmd; import org.apache.cloudstack.api.command.user.network.ListNetworkOfferingsCmd; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import org.apache.cloudstack.acl.SecurityChecker; import com.cloud.alert.AlertManager; -import org.apache.cloudstack.api.ApiConstants.LDAPParams; import com.cloud.api.ApiDBUtils; import com.cloud.capacity.dao.CapacityDao; import com.cloud.configuration.Resource.ResourceType; @@ -153,8 +155,6 @@ import com.cloud.user.UserContext; import com.cloud.user.dao.AccountDao; import com.cloud.utils.NumbersUtil; import com.cloud.utils.StringUtils; -import com.cloud.utils.component.Adapters; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; @@ -216,7 +216,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura // @com.cloud.utils.component.Inject(adapter = SecurityChecker.class) @Inject List _secChecker; - + @Inject CapacityDao _capacityDao; @Inject @@ -438,7 +438,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura String name = cmd.getCfgName(); String value = cmd.getValue(); UserContext.current().setEventDetails(" Name: " + name + " New Value: " + (((name.toLowerCase()).contains("password")) ? "*****" : - (((value == null) ? "" : value)))); + (((value == null) ? "" : value)))); // check if config value exists ConfigurationVO config = _configDao.findByName(name); if (config == null) { @@ -1454,9 +1454,9 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura } if (internalDns2 == null) { - internalDns2 = zone.getInternalDns2(); + internalDns2 = zone.getInternalDns2(); } - + if (guestCidr == null) { guestCidr = zone.getGuestNetworkCidr(); } @@ -1915,8 +1915,8 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura String description = cmd.getDisplayText(); Long numGibibytes = cmd.getDiskSize(); boolean isCustomized = cmd.isCustomized() != null ? cmd.isCustomized() : false; // false - // by - // default + // by + // default String tags = cmd.getTags(); // Long domainId = cmd.getDomainId() != null ? cmd.getDomainId() : // Long.valueOf(DomainVO.ROOT_DOMAIN); // disk offering @@ -2106,7 +2106,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura physicalNetworkId = network.getPhysicalNetworkId(); } } - + // Verify that zone exists DataCenterVO zone = _zoneDao.findById(zoneId); if (zone == null) { @@ -2148,8 +2148,8 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura } } } - - + + // Check if zone is enabled Account caller = UserContext.current().getCaller(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) { @@ -2234,7 +2234,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura //check resource limits _resourceLimitMgr.checkResourceLimit(vlanOwner, ResourceType.public_ip, accountIpRange); - + associateIpRangeToAccount = true; } } @@ -2272,24 +2272,24 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura public Vlan createVlanAndPublicIpRange(long zoneId, long networkId, long physicalNetworkId, boolean forVirtualNetwork, Long podId, String startIP, String endIP, String vlanGateway, String vlanNetmask, String vlanId, Account vlanOwner) { - - + + Network network = _networkMgr.getNetwork(networkId); - + //Validate the zone DataCenterVO zone = _zoneDao.findById(zoneId); if (zone == null) { throw new InvalidParameterValueException("Please specify a valid zone."); } - + // ACL check checkZoneAccess(UserContext.current().getCaller(), zone); - + //Validate the physical network if (_physicalNetworkDao.findById(physicalNetworkId) == null) { throw new InvalidParameterValueException("Please specify a valid physical network id"); } - + //Validate the pod if (podId != null) { Pod pod = _podDao.findById(podId); @@ -2302,10 +2302,10 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura //pod vlans can be created in basic zone only if (zone.getNetworkType() != NetworkType.Basic || network.getTrafficType() != TrafficType.Guest) { throw new InvalidParameterValueException("Pod id can be specified only for the networks of type " - + TrafficType.Guest + " in zone of type " + NetworkType.Basic); + + TrafficType.Guest + " in zone of type " + NetworkType.Basic); } } - + //1) if vlan is specified for the guest network range, it should be the same as network's vlan //2) if vlan is missing, default it to the guest network's vlan if (network.getTrafficType() == TrafficType.Guest) { @@ -2315,7 +2315,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura String[] vlan = uri.toString().split("vlan:\\/\\/"); networkVlanId = vlan[1]; } - + if (vlanId != null) { // if vlan is specified, throw an error if it's not equal to network's vlanId if (networkVlanId != null && !networkVlanId.equalsIgnoreCase(vlanId)) { @@ -2328,14 +2328,14 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura //vlan id is required for public network throw new InvalidParameterValueException("Vlan id is required when add ip range to the public network"); } - + if (vlanId == null) { vlanId = Vlan.UNTAGGED; } VlanType vlanType = forVirtualNetwork ? VlanType.VirtualNetwork : VlanType.DirectAttached; - - + + if (vlanOwner != null && zone.getNetworkType() != NetworkType.Advanced) { throw new InvalidParameterValueException("Vlan owner can be defined only in the zone of type " + NetworkType.Advanced); } @@ -2484,7 +2484,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura if (vlan == null) { throw new InvalidParameterValueException("Please specify a valid IP range id."); } - + boolean isAccountSpecific = false; List acctVln = _accountVlanMapDao.listAccountVlanMapsByVlan(vlan.getId()); // Check for account wide pool. It will have an entry for account_vlan_map. @@ -2502,25 +2502,25 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura if (vlan == null) { throw new CloudRuntimeException("Unable to acquire vlan configuration: " + vlanDbId); } - + if (s_logger.isDebugEnabled()) { s_logger.debug("lock vlan " + vlanDbId + " is acquired"); } - + List ips = _publicIpAddressDao.listByVlanId(vlanDbId); - + for (IPAddressVO ip : ips) { if (ip.isOneToOneNat()) { throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + " as ip " + ip + " belonging to the range is used for static nat purposes. Cleanup the rules first"); } - + if (ip.isSourceNat() && _networkMgr.getNetwork(ip.getAssociatedWithNetworkId()) != null) { throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + " as ip " + ip + " belonging to the range is a source nat ip for the network id=" + ip.getSourceNetworkId() + ". IP range with the source nat ip address can be removed either as a part of Network, or account removal"); } - + if (_firewallDao.countRulesByIpId(ip.getId()) > 0) { throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + " as ip " + ip + " belonging to the range has firewall rules applied. Cleanup the rules first"); @@ -2613,7 +2613,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura return true; } - + @DB protected boolean savePublicIPRange(String startIP, String endIP, long zoneId, long vlanDbId, long sourceNetworkid, long physicalNetworkId) { @@ -2816,7 +2816,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura } } - + private boolean validPod(long podId) { return (_podDao.findById(podId) != null); } @@ -3021,7 +3021,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura // in Acton, don't allow to specify more than 1 provider per service if (svcPrv.get(serviceStr) != null && svcPrv.get(serviceStr).size() > 1) { throw new InvalidParameterValueException("In the current release only one provider can be " + - "specified for the service"); + "specified for the service"); } for (String prvNameStr : svcPrv.get(serviceStr)) { // check if provider is supported @@ -3033,7 +3033,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura if (provider == Provider.JuniperSRX) { firewallProvider = Provider.JuniperSRX; } - + if ((service == Service.PortForwarding || service == Service.StaticNat) && provider == Provider.VirtualRouter){ firewallProvider = Provider.VirtualRouter; } @@ -3053,7 +3053,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura serviceProviderMap.put(service, providers); } else { throw new InvalidParameterValueException("Service " + serviceStr + " is not enabled for the network " + - "offering, can't add a provider to it"); + "offering, can't add a provider to it"); } } } @@ -3202,7 +3202,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura String multicastRateStr = _configDao.getValue("multicast.throttling.rate"); int multicastRate = ((multicastRateStr == null) ? 10 : Integer.parseInt(multicastRateStr)); tags = cleanupTags(tags); - + if (specifyVlan != specifyIpRanges) { throw new InvalidParameterValueException("SpecifyVlan should be equal to specifyIpRanges which is " + specifyIpRanges); } @@ -3211,11 +3211,11 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura if (!specifyVlan && type == GuestType.Shared) { throw new InvalidParameterValueException("SpecifyVlan should be true if network offering's type is " + type); } - + //specifyIpRanges should always be false for Isolated offering with Source nat service enabled if (specifyVlan && type == GuestType.Isolated && serviceProviderMap.containsKey(Service.SourceNat)) { throw new InvalidParameterValueException("SpecifyVlan should be false if the network offering type is " - + type + " and service " + Service.SourceNat.getName() + " is supported"); + + type + " and service " + Service.SourceNat.getName() + " is supported"); } // validate availability value @@ -3235,7 +3235,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura } } - + boolean dedicatedLb = false; boolean elasticLb = false; boolean sharedSourceNat = false; @@ -3245,7 +3245,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura boolean inline = false; if (serviceCapabilityMap != null && !serviceCapabilityMap.isEmpty()) { Map lbServiceCapabilityMap = serviceCapabilityMap.get(Service.Lb); - + if ((lbServiceCapabilityMap != null) && (!lbServiceCapabilityMap.isEmpty())) { String isolationCapability = lbServiceCapabilityMap.get(Capability.SupportedLBIsolation); if (isolationCapability != null) { @@ -3259,7 +3259,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura if (param != null) { elasticLb = param.contains("true"); } - + String inlineMode = lbServiceCapabilityMap.get(Capability.InlineMode); if (inlineMode != null) { _networkMgr.checkCapabilityForProvider(serviceProviderMap.get(Service.Lb), Service.Lb, Capability.InlineMode, inlineMode); @@ -3326,7 +3326,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura _ntwkOffServiceMapDao.persist(offService); s_logger.trace("Added service for the network offering: " + offService + " with provider " + provider.getName()); } - + if (vpcOff) { List supportedSvcs = new ArrayList(); supportedSvcs.addAll(serviceProviderMap.keySet()); @@ -3547,7 +3547,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura if (sourceNatSupported != null) { addOffering = addOffering && (_networkMgr.areServicesSupportedByNetworkOffering(offering.getId(), Network.Service.SourceNat) == sourceNatSupported); } - + if (forVpc != null) { addOffering = addOffering && (isOfferingForVpc(offering) == forVpc.booleanValue()); } else if (network != null){ @@ -3666,14 +3666,14 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura } if (availability == null) { throw new InvalidParameterValueException("Invalid value for Availability. Supported types: " - + Availability.Required + ", " + Availability.Optional); + + Availability.Required + ", " + Availability.Optional); } else { if (availability == NetworkOffering.Availability.Required) { boolean canOffBeRequired = (offeringToUpdate.getGuestType() == GuestType.Isolated && _networkMgr.areServicesSupportedByNetworkOffering(offeringToUpdate.getId(), Service.SourceNat)); if (!canOffBeRequired) { throw new InvalidParameterValueException("Availability can be " + - NetworkOffering.Availability.Required + " only for networkOfferings of type " + GuestType.Isolated + " and with " + NetworkOffering.Availability.Required + " only for networkOfferings of type " + GuestType.Isolated + " and with " + Service.SourceNat.getName() + " enabled"); } @@ -3681,7 +3681,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura List offerings = _networkOfferingDao.listByAvailability(Availability.Required, false); if (!offerings.isEmpty() && offerings.get(0).getId() != offeringToUpdate.getId()) { throw new InvalidParameterValueException("System already has network offering id=" + - offerings.get(0).getId() + " with availability " + Availability.Required); + offerings.get(0).getId() + " with availability " + Availability.Required); } } offering.setAvailability(availability); @@ -3697,12 +3697,12 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura @Override @ActionEvent(eventType = EventTypes.EVENT_ACCOUNT_MARK_DEFAULT_ZONE, eventDescription = "Marking account with the " + - "default zone", async=true) + "default zone", async=true) public AccountVO markDefaultZone(String accountName, long domainId, long defaultZoneId) { - - // Check if the account exists - Account account = _accountDao.findEnabledAccount(accountName, domainId); - if (account == null) { + + // Check if the account exists + Account account = _accountDao.findEnabledAccount(accountName, domainId); + if (account == null) { s_logger.error("Unable to find account by name: " + accountName + " in domain " + domainId); throw new InvalidParameterValueException("Account by name: " + accountName + " doesn't exist in domain " + domainId); } @@ -3710,20 +3710,20 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura // Don't allow modification of system account if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { throw new InvalidParameterValueException("Can not modify system account"); - } + } - AccountVO acctForUpdate = _accountDao.findById(account.getId()); - - acctForUpdate.setDefaultZoneId(defaultZoneId); - - if (_accountDao.update(account.getId(), acctForUpdate)) { - UserContext.current().setEventDetails("Default zone id= " + defaultZoneId); - return _accountDao.findById(account.getId()); - } else { - return null; - } + AccountVO acctForUpdate = _accountDao.findById(account.getId()); + + acctForUpdate.setDefaultZoneId(defaultZoneId); + + if (_accountDao.update(account.getId(), acctForUpdate)) { + UserContext.current().setEventDetails("Default zone id= " + defaultZoneId); + return _accountDao.findById(account.getId()); + } else { + return null; + } } - + // Note: This method will be used for entity name validations in the coming // releases (place holder for now) private void validateEntityName(String str) { @@ -3851,31 +3851,31 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura public ClusterVO getCluster(long id) { return _clusterDao.findById(id); } - + @Override public AllocationState findClusterAllocationState(ClusterVO cluster){ - - if(cluster.getAllocationState() == AllocationState.Disabled){ - return AllocationState.Disabled; - }else if(ApiDBUtils.findPodById(cluster.getPodId()).getAllocationState() == AllocationState.Disabled){ - return AllocationState.Disabled; - }else { - DataCenterVO zone = ApiDBUtils.findZoneById(cluster.getDataCenterId()); - return zone.getAllocationState(); - } + + if(cluster.getAllocationState() == AllocationState.Disabled){ + return AllocationState.Disabled; + }else if(ApiDBUtils.findPodById(cluster.getPodId()).getAllocationState() == AllocationState.Disabled){ + return AllocationState.Disabled; + }else { + DataCenterVO zone = ApiDBUtils.findZoneById(cluster.getDataCenterId()); + return zone.getAllocationState(); + } } @Override public AllocationState findPodAllocationState(HostPodVO pod){ - - if(pod.getAllocationState() == AllocationState.Disabled){ - return AllocationState.Disabled; - }else { - DataCenterVO zone = ApiDBUtils.findZoneById(pod.getDataCenterId()); - return zone.getAllocationState(); - } + + if(pod.getAllocationState() == AllocationState.Disabled){ + return AllocationState.Disabled; + }else { + DataCenterVO zone = ApiDBUtils.findZoneById(pod.getDataCenterId()); + return zone.getAllocationState(); + } } - + private boolean allowIpRangeOverlap(VlanVO vlan, boolean forVirtualNetwork, long networkId) { // FIXME - delete restriction for virtual network in the future if (vlan.getVlanType() == VlanType.DirectAttached && !forVirtualNetwork) { diff --git a/server/src/com/cloud/configuration/DefaultComponentLibrary.java b/server/src/com/cloud/configuration/DefaultComponentLibrary.java deleted file mode 100755 index 2bb54094a82..00000000000 --- a/server/src/com/cloud/configuration/DefaultComponentLibrary.java +++ /dev/null @@ -1,495 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.configuration; - -import java.io.Serializable; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import com.cloud.agent.manager.ClusteredAgentManagerImpl; -import com.cloud.alert.AlertManagerImpl; -import com.cloud.alert.dao.AlertDaoImpl; -import com.cloud.api.query.QueryManagerImpl; -import com.cloud.api.query.dao.AccountJoinDaoImpl; -import com.cloud.api.query.dao.AsyncJobJoinDaoImpl; -import com.cloud.api.query.dao.DomainRouterJoinDaoImpl; -import com.cloud.api.query.dao.InstanceGroupJoinDaoImpl; -import com.cloud.api.query.dao.ProjectAccountJoinDaoImpl; -import com.cloud.api.query.dao.ProjectInvitationJoinDaoImpl; -import com.cloud.api.query.dao.ProjectJoinDaoImpl; -import com.cloud.api.query.dao.ResourceTagJoinDaoImpl; -import com.cloud.api.query.dao.SecurityGroupJoinDaoImpl; -import com.cloud.api.query.dao.StoragePoolJoinDaoImpl; -import com.cloud.api.query.dao.UserAccountJoinDaoImpl; -import com.cloud.api.query.dao.UserVmJoinDaoImpl; -import com.cloud.api.query.dao.HostJoinDaoImpl; -import com.cloud.api.query.dao.VolumeJoinDaoImpl; -import com.cloud.async.AsyncJobExecutorContextImpl; -import com.cloud.async.AsyncJobManagerImpl; -import com.cloud.async.SyncQueueManagerImpl; -import com.cloud.async.dao.AsyncJobDaoImpl; -import com.cloud.async.dao.SyncQueueDaoImpl; -import com.cloud.async.dao.SyncQueueItemDaoImpl; -import com.cloud.capacity.CapacityManagerImpl; -import com.cloud.capacity.dao.CapacityDaoImpl; -import com.cloud.certificate.dao.CertificateDaoImpl; -import com.cloud.cluster.CheckPointManagerImpl; -import com.cloud.cluster.ClusterFenceManagerImpl; -import com.cloud.cluster.ClusterManagerImpl; -import com.cloud.cluster.agentlb.dao.HostTransferMapDaoImpl; -import com.cloud.cluster.dao.ManagementServerHostDaoImpl; -import com.cloud.cluster.dao.ManagementServerHostPeerDaoImpl; -import com.cloud.cluster.dao.StackMaidDaoImpl; -import com.cloud.configuration.dao.ConfigurationDaoImpl; -import com.cloud.configuration.dao.ResourceCountDaoImpl; -import com.cloud.configuration.dao.ResourceLimitDaoImpl; -import com.cloud.consoleproxy.ConsoleProxyManagerImpl; -import com.cloud.dao.EntityManager; -import com.cloud.dao.EntityManagerImpl; -import com.cloud.dc.ClusterDetailsDaoImpl; -import com.cloud.dc.dao.AccountVlanMapDaoImpl; -import com.cloud.dc.dao.ClusterDaoImpl; -import com.cloud.dc.dao.ClusterVSMMapDaoImpl; -import com.cloud.dc.dao.DataCenterDaoImpl; -import com.cloud.dc.dao.DataCenterIpAddressDaoImpl; -import com.cloud.dc.dao.DcDetailsDaoImpl; -import com.cloud.dc.dao.HostPodDaoImpl; -import com.cloud.dc.dao.PodVlanMapDaoImpl; -import com.cloud.dc.dao.StorageNetworkIpAddressDaoImpl; -import com.cloud.dc.dao.StorageNetworkIpRangeDaoImpl; -import com.cloud.dc.dao.VlanDaoImpl; -import com.cloud.domain.dao.DomainDaoImpl; -import com.cloud.event.dao.EventDaoImpl; -import com.cloud.event.dao.UsageEventDaoImpl; -import com.cloud.ha.HighAvailabilityManagerImpl; -import com.cloud.ha.dao.HighAvailabilityDaoImpl; -import com.cloud.host.dao.HostDaoImpl; -import com.cloud.host.dao.HostDetailsDaoImpl; -import com.cloud.host.dao.HostTagsDaoImpl; -import com.cloud.hypervisor.HypervisorGuruManagerImpl; -import com.cloud.hypervisor.dao.HypervisorCapabilitiesDaoImpl; -import com.cloud.keystore.KeystoreDaoImpl; -import com.cloud.keystore.KeystoreManagerImpl; -import com.cloud.maint.UpgradeManagerImpl; -import com.cloud.maint.dao.AgentUpgradeDaoImpl; -import com.cloud.network.ExternalLoadBalancerUsageManagerImpl; -import com.cloud.network.NetworkManagerImpl; -import com.cloud.network.StorageNetworkManagerImpl; -import com.cloud.network.as.AutoScaleManagerImpl; -import com.cloud.network.as.dao.AutoScalePolicyConditionMapDaoImpl; -import com.cloud.network.as.dao.AutoScalePolicyDaoImpl; -import com.cloud.network.as.dao.AutoScaleVmGroupDaoImpl; -import com.cloud.network.as.dao.AutoScaleVmGroupPolicyMapDaoImpl; -import com.cloud.network.as.dao.AutoScaleVmProfileDaoImpl; -import com.cloud.network.as.dao.ConditionDaoImpl; -import com.cloud.network.as.dao.CounterDaoImpl; -import com.cloud.network.dao.ExternalFirewallDeviceDaoImpl; -import com.cloud.network.dao.ExternalLoadBalancerDeviceDaoImpl; -import com.cloud.network.dao.FirewallRulesCidrsDaoImpl; -import com.cloud.network.dao.FirewallRulesDaoImpl; -import com.cloud.network.dao.IPAddressDaoImpl; -import com.cloud.network.dao.InlineLoadBalancerNicMapDaoImpl; -import com.cloud.network.dao.LBStickinessPolicyDaoImpl; -import com.cloud.network.dao.LoadBalancerDaoImpl; -import com.cloud.network.dao.LoadBalancerVMMapDaoImpl; -import com.cloud.network.dao.NetworkDaoImpl; -import com.cloud.network.dao.NetworkDomainDaoImpl; -import com.cloud.network.dao.NetworkExternalFirewallDaoImpl; -import com.cloud.network.dao.NetworkExternalLoadBalancerDaoImpl; -import com.cloud.network.dao.NetworkRuleConfigDaoImpl; -import com.cloud.network.dao.NetworkServiceMapDaoImpl; -import com.cloud.network.dao.PhysicalNetworkDaoImpl; -import com.cloud.network.dao.PhysicalNetworkServiceProviderDaoImpl; -import com.cloud.network.dao.PhysicalNetworkTrafficTypeDaoImpl; -import com.cloud.network.dao.PortProfileDaoImpl; -import com.cloud.network.dao.RemoteAccessVpnDaoImpl; -import com.cloud.network.dao.Site2SiteCustomerGatewayDaoImpl; -import com.cloud.network.dao.Site2SiteVpnConnectionDaoImpl; -import com.cloud.network.dao.Site2SiteVpnGatewayDaoImpl; -import com.cloud.network.dao.VirtualRouterProviderDaoImpl; -import com.cloud.network.dao.VpnUserDaoImpl; -import com.cloud.network.element.VirtualRouterElement; -import com.cloud.network.element.VirtualRouterElementService; -import com.cloud.network.firewall.FirewallManagerImpl; -import com.cloud.network.lb.LoadBalancingRulesManagerImpl; -import com.cloud.network.router.VpcVirtualNetworkApplianceManagerImpl; -import com.cloud.network.rules.RulesManagerImpl; -import com.cloud.network.rules.dao.PortForwardingRulesDaoImpl; -import com.cloud.network.security.SecurityGroupManagerImpl2; -import com.cloud.network.security.dao.SecurityGroupDaoImpl; -import com.cloud.network.security.dao.SecurityGroupRuleDaoImpl; -import com.cloud.network.security.dao.SecurityGroupRulesDaoImpl; -import com.cloud.network.security.dao.SecurityGroupVMMapDaoImpl; -import com.cloud.network.security.dao.SecurityGroupWorkDaoImpl; -import com.cloud.network.security.dao.VmRulesetLogDaoImpl; -import com.cloud.network.vpc.NetworkACLManagerImpl; -import com.cloud.network.vpc.VpcManagerImpl; -import com.cloud.network.vpc.dao.PrivateIpDaoImpl; -import com.cloud.network.vpc.dao.StaticRouteDaoImpl; -import com.cloud.network.vpc.dao.VpcDaoImpl; -import com.cloud.network.vpc.dao.VpcGatewayDaoImpl; -import com.cloud.network.vpc.dao.VpcOfferingDaoImpl; -import com.cloud.network.vpc.dao.VpcOfferingServiceMapDaoImpl; -import com.cloud.network.vpn.RemoteAccessVpnManagerImpl; -import com.cloud.network.vpn.Site2SiteVpnManagerImpl; -import com.cloud.offerings.dao.NetworkOfferingDaoImpl; -import com.cloud.offerings.dao.NetworkOfferingServiceMapDaoImpl; -import com.cloud.projects.ProjectManagerImpl; -import com.cloud.projects.dao.ProjectAccountDaoImpl; -import com.cloud.projects.dao.ProjectDaoImpl; -import com.cloud.projects.dao.ProjectInvitationDaoImpl; -import com.cloud.resource.ResourceManagerImpl; -import com.cloud.resourcelimit.ResourceLimitManagerImpl; -import com.cloud.service.dao.ServiceOfferingDaoImpl; -import com.cloud.storage.OCFS2ManagerImpl; -import com.cloud.storage.StorageManagerImpl; -import com.cloud.storage.dao.DiskOfferingDaoImpl; -import com.cloud.storage.dao.GuestOSCategoryDaoImpl; -import com.cloud.storage.dao.GuestOSDaoImpl; -import com.cloud.storage.dao.LaunchPermissionDaoImpl; -import com.cloud.storage.dao.S3DaoImpl; -import com.cloud.storage.dao.SnapshotDaoImpl; -import com.cloud.storage.dao.SnapshotPolicyDaoImpl; -import com.cloud.storage.dao.SnapshotScheduleDaoImpl; -import com.cloud.storage.dao.StoragePoolDaoImpl; -import com.cloud.storage.dao.StoragePoolHostDaoImpl; -import com.cloud.storage.dao.StoragePoolWorkDaoImpl; -import com.cloud.storage.dao.SwiftDaoImpl; -import com.cloud.storage.dao.UploadDaoImpl; -import com.cloud.storage.dao.VMTemplateDaoImpl; -import com.cloud.storage.dao.VMTemplateDetailsDaoImpl; -import com.cloud.storage.dao.VMTemplateHostDaoImpl; -import com.cloud.storage.dao.VMTemplatePoolDaoImpl; -import com.cloud.storage.dao.VMTemplateS3DaoImpl; -import com.cloud.storage.dao.VMTemplateSwiftDaoImpl; -import com.cloud.storage.dao.VMTemplateZoneDaoImpl; -import com.cloud.storage.dao.VolumeDaoImpl; -import com.cloud.storage.dao.VolumeHostDaoImpl; -import com.cloud.storage.download.DownloadMonitorImpl; -import com.cloud.storage.s3.S3ManagerImpl; -import com.cloud.storage.secondary.SecondaryStorageManagerImpl; -import com.cloud.storage.snapshot.SnapshotManagerImpl; -import com.cloud.storage.snapshot.SnapshotSchedulerImpl; -import com.cloud.storage.swift.SwiftManagerImpl; -import com.cloud.storage.upload.UploadMonitorImpl; -import com.cloud.tags.TaggedResourceManagerImpl; -import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.template.HyervisorTemplateAdapter; -import com.cloud.template.TemplateAdapter; -import com.cloud.template.TemplateAdapter.TemplateAdapterType; -import com.cloud.template.TemplateManagerImpl; -import com.cloud.user.AccountDetailsDaoImpl; -import com.cloud.user.AccountManagerImpl; -import com.cloud.user.DomainManagerImpl; -import com.cloud.user.dao.AccountDaoImpl; -import com.cloud.user.dao.SSHKeyPairDaoImpl; -import com.cloud.user.dao.UserAccountDaoImpl; -import com.cloud.user.dao.UserDaoImpl; -import com.cloud.user.dao.UserStatisticsDaoImpl; -import com.cloud.user.dao.UserStatsLogDaoImpl; -import com.cloud.utils.component.Adapter; -import com.cloud.utils.component.ComponentLibrary; -import com.cloud.utils.component.ComponentLibraryBase; -import com.cloud.utils.component.LegacyComponentLocator.ComponentInfo; -import com.cloud.utils.component.Manager; -import com.cloud.utils.component.PluggableService; -import com.cloud.utils.db.GenericDao; -import com.cloud.uuididentity.IdentityServiceImpl; -import com.cloud.uuididentity.dao.IdentityDaoImpl; -import com.cloud.vm.ClusteredVirtualMachineManagerImpl; -import com.cloud.vm.ItWorkDaoImpl; -import com.cloud.vm.UserVmManagerImpl; -import com.cloud.vm.dao.ConsoleProxyDaoImpl; -import com.cloud.vm.dao.DomainRouterDaoImpl; -import com.cloud.vm.dao.InstanceGroupDaoImpl; -import com.cloud.vm.dao.InstanceGroupVMMapDaoImpl; -import com.cloud.vm.dao.NicDaoImpl; -import com.cloud.vm.dao.SecondaryStorageVmDaoImpl; -import com.cloud.vm.dao.UserVmDaoImpl; -import com.cloud.vm.dao.UserVmDetailsDaoImpl; -import com.cloud.vm.dao.VMInstanceDaoImpl; -import com.cloud.event.dao.EventJoinDaoImpl; - - - -public class DefaultComponentLibrary extends ComponentLibraryBase implements ComponentLibrary { - protected void populateDaos() { - addDao("StackMaidDao", StackMaidDaoImpl.class); - addDao("VMTemplateZoneDao", VMTemplateZoneDaoImpl.class); - addDao("VMTemplateDetailsDao", VMTemplateDetailsDaoImpl.class); - addDao("DomainRouterDao", DomainRouterDaoImpl.class); - addDao("HostDao", HostDaoImpl.class); - addDao("VMInstanceDao", VMInstanceDaoImpl.class); - addDao("UserVmDao", UserVmDaoImpl.class); - ComponentInfo> info = addDao("ServiceOfferingDao", ServiceOfferingDaoImpl.class); - info.addParameter("cache.size", "50"); - info.addParameter("cache.time.to.live", "600"); - info = addDao("DiskOfferingDao", DiskOfferingDaoImpl.class); - info.addParameter("cache.size", "50"); - info.addParameter("cache.time.to.live", "600"); - info = addDao("DataCenterDao", DataCenterDaoImpl.class); - info.addParameter("cache.size", "50"); - info.addParameter("cache.time.to.live", "600"); - info = addDao("HostPodDao", HostPodDaoImpl.class); - info.addParameter("cache.size", "50"); - info.addParameter("cache.time.to.live", "600"); - addDao("IPAddressDao", IPAddressDaoImpl.class); - info = addDao("VlanDao", VlanDaoImpl.class); - info.addParameter("cache.size", "30"); - info.addParameter("cache.time.to.live", "3600"); - addDao("PodVlanMapDao", PodVlanMapDaoImpl.class); - addDao("AccountVlanMapDao", AccountVlanMapDaoImpl.class); - addDao("VolumeDao", VolumeDaoImpl.class); - addDao("EventDao", EventDaoImpl.class); - info = addDao("UserDao", UserDaoImpl.class); - info.addParameter("cache.size", "5000"); - info.addParameter("cache.time.to.live", "300"); - addDao("UserStatisticsDao", UserStatisticsDaoImpl.class); - addDao("UserStatsLogDao", UserStatsLogDaoImpl.class); - addDao("FirewallRulesDao", FirewallRulesDaoImpl.class); - addDao("LoadBalancerDao", LoadBalancerDaoImpl.class); - addDao("NetworkRuleConfigDao", NetworkRuleConfigDaoImpl.class); - addDao("LoadBalancerVMMapDao", LoadBalancerVMMapDaoImpl.class); - addDao("LBStickinessPolicyDao", LBStickinessPolicyDaoImpl.class); - addDao("CounterDao", CounterDaoImpl.class); - addDao("ConditionDao", ConditionDaoImpl.class); - addDao("AutoScalePolicyDao", AutoScalePolicyDaoImpl.class); - addDao("AutoScalePolicyConditionMapDao", AutoScalePolicyConditionMapDaoImpl.class); - addDao("AutoScaleVmProfileDao", AutoScaleVmProfileDaoImpl.class); - addDao("AutoScaleVmGroupDao", AutoScaleVmGroupDaoImpl.class); - addDao("AutoScaleVmGroupPolicyMapDao", AutoScaleVmGroupPolicyMapDaoImpl.class); - addDao("DataCenterIpAddressDao", DataCenterIpAddressDaoImpl.class); - addDao("SecurityGroupDao", SecurityGroupDaoImpl.class); - addDao("SecurityGroupRuleDao", SecurityGroupRuleDaoImpl.class); - addDao("SecurityGroupVMMapDao", SecurityGroupVMMapDaoImpl.class); - addDao("SecurityGroupRulesDao", SecurityGroupRulesDaoImpl.class); - addDao("SecurityGroupWorkDao", SecurityGroupWorkDaoImpl.class); - addDao("VmRulesetLogDao", VmRulesetLogDaoImpl.class); - addDao("AlertDao", AlertDaoImpl.class); - addDao("CapacityDao", CapacityDaoImpl.class); - addDao("DomainDao", DomainDaoImpl.class); - addDao("AccountDao", AccountDaoImpl.class); - addDao("ResourceLimitDao", ResourceLimitDaoImpl.class); - addDao("ResourceCountDao", ResourceCountDaoImpl.class); - addDao("UserAccountDao", UserAccountDaoImpl.class); - addDao("VMTemplateHostDao", VMTemplateHostDaoImpl.class); - addDao("VolumeHostDao", VolumeHostDaoImpl.class); - addDao("VMTemplateSwiftDao", VMTemplateSwiftDaoImpl.class); - addDao("VMTemplateS3Dao", VMTemplateS3DaoImpl.class); - addDao("UploadDao", UploadDaoImpl.class); - addDao("VMTemplatePoolDao", VMTemplatePoolDaoImpl.class); - addDao("LaunchPermissionDao", LaunchPermissionDaoImpl.class); - addDao("ConfigurationDao", ConfigurationDaoImpl.class); - info = addDao("VMTemplateDao", VMTemplateDaoImpl.class); - info.addParameter("cache.size", "100"); - info.addParameter("cache.time.to.live", "600"); - info.addParameter("routing.uniquename", "routing"); - addDao("HighAvailabilityDao", HighAvailabilityDaoImpl.class); - addDao("ConsoleProxyDao", ConsoleProxyDaoImpl.class); - addDao("SecondaryStorageVmDao", SecondaryStorageVmDaoImpl.class); - addDao("ManagementServerHostDao", ManagementServerHostDaoImpl.class); - addDao("ManagementServerHostPeerDao", ManagementServerHostPeerDaoImpl.class); - addDao("AgentUpgradeDao", AgentUpgradeDaoImpl.class); - addDao("SnapshotDao", SnapshotDaoImpl.class); - addDao("AsyncJobDao", AsyncJobDaoImpl.class); - addDao("SyncQueueDao", SyncQueueDaoImpl.class); - addDao("SyncQueueItemDao", SyncQueueItemDaoImpl.class); - addDao("GuestOSDao", GuestOSDaoImpl.class); - addDao("GuestOSCategoryDao", GuestOSCategoryDaoImpl.class); - addDao("StoragePoolDao", StoragePoolDaoImpl.class); - addDao("StoragePoolHostDao", StoragePoolHostDaoImpl.class); - addDao("DetailsDao", HostDetailsDaoImpl.class); - addDao("SnapshotPolicyDao", SnapshotPolicyDaoImpl.class); - addDao("SnapshotScheduleDao", SnapshotScheduleDaoImpl.class); - addDao("ClusterDao", ClusterDaoImpl.class); - addDao("CertificateDao", CertificateDaoImpl.class); - addDao("NetworkConfigurationDao", NetworkDaoImpl.class); - addDao("NetworkOfferingDao", NetworkOfferingDaoImpl.class); - addDao("NicDao", NicDaoImpl.class); - addDao("InstanceGroupDao", InstanceGroupDaoImpl.class); - addDao("InstanceGroupJoinDao", InstanceGroupJoinDaoImpl.class); - addDao("InstanceGroupVMMapDao", InstanceGroupVMMapDaoImpl.class); - addDao("RemoteAccessVpnDao", RemoteAccessVpnDaoImpl.class); - addDao("VpnUserDao", VpnUserDaoImpl.class); - addDao("ItWorkDao", ItWorkDaoImpl.class); - addDao("FirewallRulesDao", FirewallRulesDaoImpl.class); - addDao("PortForwardingRulesDao", PortForwardingRulesDaoImpl.class); - addDao("FirewallRulesCidrsDao", FirewallRulesCidrsDaoImpl.class); - addDao("SSHKeyPairDao", SSHKeyPairDaoImpl.class); - addDao("UsageEventDao", UsageEventDaoImpl.class); - addDao("ClusterDetailsDao", ClusterDetailsDaoImpl.class); - addDao("UserVmDetailsDao", UserVmDetailsDaoImpl.class); - addDao("StoragePoolWorkDao", StoragePoolWorkDaoImpl.class); - addDao("HostTagsDao", HostTagsDaoImpl.class); - addDao("NetworkDomainDao", NetworkDomainDaoImpl.class); - addDao("KeystoreDao", KeystoreDaoImpl.class); - addDao("DcDetailsDao", DcDetailsDaoImpl.class); - addDao("SwiftDao", SwiftDaoImpl.class); - addDao("S3Dao", S3DaoImpl.class); - addDao("AgentTransferMapDao", HostTransferMapDaoImpl.class); - addDao("ProjectDao", ProjectDaoImpl.class); - addDao("InlineLoadBalancerNicMapDao", InlineLoadBalancerNicMapDaoImpl.class); - addDao("ProjectsAccountDao", ProjectAccountDaoImpl.class); - addDao("ProjectInvitationDao", ProjectInvitationDaoImpl.class); - addDao("IdentityDao", IdentityDaoImpl.class); - addDao("AccountDetailsDao", AccountDetailsDaoImpl.class); - addDao("NetworkOfferingServiceMapDao", NetworkOfferingServiceMapDaoImpl.class); - info = addDao("HypervisorCapabilitiesDao",HypervisorCapabilitiesDaoImpl.class); - info.addParameter("cache.size", "100"); - info.addParameter("cache.time.to.live", "600"); - addDao("PhysicalNetworkDao", PhysicalNetworkDaoImpl.class); - addDao("PhysicalNetworkServiceProviderDao", PhysicalNetworkServiceProviderDaoImpl.class); - addDao("VirtualRouterProviderDao", VirtualRouterProviderDaoImpl.class); - addDao("ExternalLoadBalancerDeviceDao", ExternalLoadBalancerDeviceDaoImpl.class); - addDao("ExternalFirewallDeviceDao", ExternalFirewallDeviceDaoImpl.class); - addDao("NetworkExternalLoadBalancerDao", NetworkExternalLoadBalancerDaoImpl.class); - addDao("NetworkExternalFirewallDao", NetworkExternalFirewallDaoImpl.class); - addDao("ClusterVSMMapDao", ClusterVSMMapDaoImpl.class); - addDao("PortProfileDao", PortProfileDaoImpl.class); - addDao("PhysicalNetworkTrafficTypeDao", PhysicalNetworkTrafficTypeDaoImpl.class); - addDao("NetworkServiceMapDao", NetworkServiceMapDaoImpl.class); - addDao("StorageNetworkIpAddressDao", StorageNetworkIpAddressDaoImpl.class); - addDao("StorageNetworkIpRangeDao", StorageNetworkIpRangeDaoImpl.class); - addDao("VpcDao", VpcDaoImpl.class); - addDao("VpcOfferingDao", VpcOfferingDaoImpl.class); - addDao("VpcOfferingServiceMapDao", VpcOfferingServiceMapDaoImpl.class); - addDao("PrivateIpDao", PrivateIpDaoImpl.class); - addDao("VpcGatewayDao", VpcGatewayDaoImpl.class); - addDao("StaticRouteDao", StaticRouteDaoImpl.class); - addDao("TagsDao", ResourceTagsDaoImpl.class); - addDao("Site2SiteVpnGatewayDao", Site2SiteVpnGatewayDaoImpl.class); - addDao("Site2SiteCustomerGatewayDao", Site2SiteCustomerGatewayDaoImpl.class); - addDao("Site2SiteVpnConnnectionDao", Site2SiteVpnConnectionDaoImpl.class); - - addDao("UserVmJoinDao", UserVmJoinDaoImpl.class); - addDao("DomainRouterJoinDao", DomainRouterJoinDaoImpl.class); - addDao("SecurityGroupJoinDao", SecurityGroupJoinDaoImpl.class); - addDao("ResourceTagJoinDao", ResourceTagJoinDaoImpl.class); - addDao("EventJoinDao", EventJoinDaoImpl.class); - addDao("UserAccountJoinDao", UserAccountJoinDaoImpl.class); - addDao("ProjectJoinDao", ProjectJoinDaoImpl.class); - addDao("ProjectAccountJoinDao", ProjectAccountJoinDaoImpl.class); - addDao("ProjectInvitationJoinDao", ProjectInvitationJoinDaoImpl.class); - addDao("HostJoinDao", HostJoinDaoImpl.class); - addDao("VolumeJoinDao", VolumeJoinDaoImpl.class); - addDao("AccountJoinDao", AccountJoinDaoImpl.class); - addDao("AsyncJobJoinDao", AsyncJobJoinDaoImpl.class); - addDao("StoragePoolJoinDao", StoragePoolJoinDaoImpl.class); - } - - @Override - public synchronized Map>> getDaos() { - if (_daos.size() == 0) { - populateDaos(); - } - //FIXME: Incorrect method return definition - return _daos; - } - - protected void populateManagers() { - addManager("StackMaidManager", CheckPointManagerImpl.class); - addManager("Cluster Manager", ClusterManagerImpl.class); - addManager("ClusterFenceManager", ClusterFenceManagerImpl.class); - addManager("ClusteredAgentManager", ClusteredAgentManagerImpl.class); - addManager("SyncQueueManager", SyncQueueManagerImpl.class); - addManager("AsyncJobManager", AsyncJobManagerImpl.class); - addManager("AsyncJobExecutorContext", AsyncJobExecutorContextImpl.class); - addManager("configuration manager", ConfigurationManagerImpl.class); - addManager("account manager", AccountManagerImpl.class); - addManager("domain manager", DomainManagerImpl.class); - addManager("resource limit manager", ResourceLimitManagerImpl.class); - addManager("network manager", NetworkManagerImpl.class); - addManager("download manager", DownloadMonitorImpl.class); - addManager("upload manager", UploadMonitorImpl.class); - addManager("keystore manager", KeystoreManagerImpl.class); - addManager("secondary storage vm manager", SecondaryStorageManagerImpl.class); - addManager("vm manager", UserVmManagerImpl.class); - addManager("upgrade manager", UpgradeManagerImpl.class); - addManager("StorageManager", StorageManagerImpl.class); - addManager("Alert Manager", AlertManagerImpl.class); - addManager("Template Manager", TemplateManagerImpl.class); - addManager("Snapshot Manager", SnapshotManagerImpl.class); - addManager("SnapshotScheduler", SnapshotSchedulerImpl.class); - addManager("SecurityGroupManager", SecurityGroupManagerImpl2.class); - addManager("EntityManager", EntityManagerImpl.class); - addManager("LoadBalancingRulesManager", LoadBalancingRulesManagerImpl.class); - addManager("AutoScaleManager", AutoScaleManagerImpl.class); - addManager("RulesManager", RulesManagerImpl.class); - addManager("RemoteAccessVpnManager", RemoteAccessVpnManagerImpl.class); - addManager("Capacity Manager", CapacityManagerImpl.class); - addManager("VirtualMachineManager", ClusteredVirtualMachineManagerImpl.class); - addManager("HypervisorGuruManager", HypervisorGuruManagerImpl.class); - addManager("ResourceManager", ResourceManagerImpl.class); - addManager("IdentityManager", IdentityServiceImpl.class); - addManager("OCFS2Manager", OCFS2ManagerImpl.class); - addManager("FirewallManager", FirewallManagerImpl.class); - ComponentInfo info = addManager("ConsoleProxyManager", ConsoleProxyManagerImpl.class); - info.addParameter("consoleproxy.sslEnabled", "true"); - addManager("ProjectManager", ProjectManagerImpl.class); - addManager("SwiftManager", SwiftManagerImpl.class); - addManager("S3Manager", S3ManagerImpl.class); - addManager("StorageNetworkManager", StorageNetworkManagerImpl.class); - addManager("ExternalLoadBalancerUsageManager", ExternalLoadBalancerUsageManagerImpl.class); - addManager("HA Manager", HighAvailabilityManagerImpl.class); - addManager("VPC Manager", VpcManagerImpl.class); - addManager("VpcVirtualRouterManager", VpcVirtualNetworkApplianceManagerImpl.class); - addManager("NetworkACLManager", NetworkACLManagerImpl.class); - addManager("TaggedResourcesManager", TaggedResourceManagerImpl.class); - addManager("Site2SiteVpnManager", Site2SiteVpnManagerImpl.class); - addManager("QueryManager", QueryManagerImpl.class); - } - - @Override - public synchronized Map> getManagers() { - if (_managers.size() == 0) { - populateManagers(); - } - return _managers; - } - - protected void populateAdapters() { - addAdapter(TemplateAdapter.class, TemplateAdapterType.Hypervisor.getName(), HyervisorTemplateAdapter.class); - } - - @Override - public synchronized Map>> getAdapters() { - if (_adapters.size() == 0) { - populateAdapters(); - } - return _adapters; - } - - @Override - public synchronized Map, Class> getFactories() { - HashMap, Class> factories = new HashMap, Class>(); - factories.put(EntityManager.class, EntityManagerImpl.class); - return factories; - } - - protected void populateServices() { - addService("VirtualRouterElementService", VirtualRouterElementService.class, VirtualRouterElement.class); - } - - @Override - public synchronized Map> getPluggableServices() { - if (_pluggableServices.size() == 0) { - populateServices(); - } - return _pluggableServices; - } -} diff --git a/server/src/com/cloud/configuration/PremiumComponentLibrary.java b/server/src/com/cloud/configuration/PremiumComponentLibrary.java deleted file mode 100755 index b25f462f4d0..00000000000 --- a/server/src/com/cloud/configuration/PremiumComponentLibrary.java +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.configuration; - -import java.util.ArrayList; -import java.util.List; - -import com.cloud.baremetal.BareMetalPingServiceImpl; -import com.cloud.baremetal.BareMetalTemplateAdapter; -import com.cloud.baremetal.BareMetalVmManagerImpl; -import com.cloud.baremetal.ExternalDhcpManagerImpl; -import com.cloud.baremetal.PxeServerManager.PxeServerType; -import com.cloud.baremetal.PxeServerManagerImpl; -import com.cloud.baremetal.PxeServerService; -import com.cloud.ha.HighAvailabilityManagerExtImpl; -import com.cloud.network.ExternalNetworkDeviceManagerImpl; -import com.cloud.network.NetworkUsageManagerImpl; -import com.cloud.secstorage.CommandExecLogDaoImpl; -import com.cloud.secstorage.PremiumSecondaryStorageManagerImpl; -import com.cloud.template.TemplateAdapter; -import com.cloud.template.TemplateAdapter.TemplateAdapterType; -import com.cloud.upgrade.PremiumDatabaseUpgradeChecker; -import com.cloud.usage.dao.UsageDaoImpl; -import com.cloud.usage.dao.UsageIPAddressDaoImpl; -import com.cloud.usage.dao.UsageJobDaoImpl; -import com.cloud.utils.component.SystemIntegrityChecker; - -public class PremiumComponentLibrary extends DefaultComponentLibrary { - @Override - protected void populateDaos() { - super.populateDaos(); - addDao("UsageJobDao", UsageJobDaoImpl.class); - addDao("UsageDao", UsageDaoImpl.class); - addDao("UsageIpAddressDao", UsageIPAddressDaoImpl.class); - addDao("CommandExecLogDao", CommandExecLogDaoImpl.class); - } - - @Override - protected void populateManagers() { - // override FOSS SSVM manager - super.populateManagers(); - addManager("secondary storage vm manager", PremiumSecondaryStorageManagerImpl.class); - - addManager("HA Manager", HighAvailabilityManagerExtImpl.class); - addManager("ExternalNetworkManager", ExternalNetworkDeviceManagerImpl.class); - addManager("BareMetalVmManager", BareMetalVmManagerImpl.class); - addManager("ExternalDhcpManager", ExternalDhcpManagerImpl.class); - addManager("PxeServerManager", PxeServerManagerImpl.class); - addManager("NetworkUsageManager", NetworkUsageManagerImpl.class); - } - - @Override - protected void populateAdapters() { - super.populateAdapters(); - addAdapter(PxeServerService.class, PxeServerType.PING.getName(), BareMetalPingServiceImpl.class); - addAdapter(TemplateAdapter.class, TemplateAdapterType.BareMetal.getName(), BareMetalTemplateAdapter.class); - } -} diff --git a/server/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java b/server/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java index 2c550eaaeba..26121920569 100644 --- a/server/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java +++ b/server/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java @@ -34,7 +34,6 @@ import com.cloud.configuration.ResourceLimit; import com.cloud.domain.dao.DomainDaoImpl; import com.cloud.exception.UnsupportedServiceException; import com.cloud.user.dao.AccountDaoImpl; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; @@ -44,90 +43,90 @@ import com.cloud.utils.db.Transaction; @Component @Local(value={ResourceCountDao.class}) public class ResourceCountDaoImpl extends GenericDaoBase implements ResourceCountDao { - private SearchBuilder TypeSearch; - - private SearchBuilder AccountSearch; - private SearchBuilder DomainSearch; - - //protected final DomainDaoImpl _domainDao = ComponentLocator.inject(DomainDaoImpl.class); - //protected final AccountDaoImpl _accountDao = ComponentLocator.inject(AccountDaoImpl.class); + private final SearchBuilder TypeSearch; - @Inject protected DomainDaoImpl _domainDao; - @Inject protected AccountDaoImpl _accountDao; + private final SearchBuilder AccountSearch; + private final SearchBuilder DomainSearch; - public ResourceCountDaoImpl() { - TypeSearch = createSearchBuilder(); - TypeSearch.and("type", TypeSearch.entity().getType(), SearchCriteria.Op.EQ); - TypeSearch.and("accountId", TypeSearch.entity().getAccountId(), SearchCriteria.Op.EQ); - TypeSearch.and("domainId", TypeSearch.entity().getDomainId(), SearchCriteria.Op.EQ); - TypeSearch.done(); - - AccountSearch = createSearchBuilder(); - AccountSearch.and("accountId", AccountSearch.entity().getAccountId(), SearchCriteria.Op.NNULL); - AccountSearch.done(); - - DomainSearch = createSearchBuilder(); - DomainSearch.and("domainId", DomainSearch.entity().getDomainId(), SearchCriteria.Op.NNULL); - DomainSearch.done(); - } - - @Override - public ResourceCountVO findByOwnerAndType(long ownerId, ResourceOwnerType ownerType, ResourceType type) { - SearchCriteria sc = TypeSearch.create(); - sc.setParameters("type", type); - - if (ownerType == ResourceOwnerType.Account) { - sc.setParameters("accountId", ownerId); - return findOneIncludingRemovedBy(sc); - } else if (ownerType == ResourceOwnerType.Domain) { - sc.setParameters("domainId", ownerId); + //protected final DomainDaoImpl _domainDao = ComponentLocator.inject(DomainDaoImpl.class); + //protected final AccountDaoImpl _accountDao = ComponentLocator.inject(AccountDaoImpl.class); + + @Inject protected DomainDaoImpl _domainDao; + @Inject protected AccountDaoImpl _accountDao; + + public ResourceCountDaoImpl() { + TypeSearch = createSearchBuilder(); + TypeSearch.and("type", TypeSearch.entity().getType(), SearchCriteria.Op.EQ); + TypeSearch.and("accountId", TypeSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + TypeSearch.and("domainId", TypeSearch.entity().getDomainId(), SearchCriteria.Op.EQ); + TypeSearch.done(); + + AccountSearch = createSearchBuilder(); + AccountSearch.and("accountId", AccountSearch.entity().getAccountId(), SearchCriteria.Op.NNULL); + AccountSearch.done(); + + DomainSearch = createSearchBuilder(); + DomainSearch.and("domainId", DomainSearch.entity().getDomainId(), SearchCriteria.Op.NNULL); + DomainSearch.done(); + } + + @Override + public ResourceCountVO findByOwnerAndType(long ownerId, ResourceOwnerType ownerType, ResourceType type) { + SearchCriteria sc = TypeSearch.create(); + sc.setParameters("type", type); + + if (ownerType == ResourceOwnerType.Account) { + sc.setParameters("accountId", ownerId); return findOneIncludingRemovedBy(sc); - } else { - return null; - } - } - - @Override - public long getResourceCount(long ownerId, ResourceOwnerType ownerType, ResourceType type) { - ResourceCountVO vo = findByOwnerAndType(ownerId, ownerType, type); - if (vo != null) { - return vo.getCount(); - } else { - return 0; - } - } - - @Override - public void setResourceCount(long ownerId, ResourceOwnerType ownerType, ResourceType type, long count) { - ResourceCountVO resourceCountVO = findByOwnerAndType(ownerId, ownerType, type); + } else if (ownerType == ResourceOwnerType.Domain) { + sc.setParameters("domainId", ownerId); + return findOneIncludingRemovedBy(sc); + } else { + return null; + } + } + + @Override + public long getResourceCount(long ownerId, ResourceOwnerType ownerType, ResourceType type) { + ResourceCountVO vo = findByOwnerAndType(ownerId, ownerType, type); + if (vo != null) { + return vo.getCount(); + } else { + return 0; + } + } + + @Override + public void setResourceCount(long ownerId, ResourceOwnerType ownerType, ResourceType type, long count) { + ResourceCountVO resourceCountVO = findByOwnerAndType(ownerId, ownerType, type); if (count != resourceCountVO.getCount()) { resourceCountVO.setCount(count); update(resourceCountVO.getId(), resourceCountVO); } - } + } - @Override @Deprecated - public void updateDomainCount(long domainId, ResourceType type, boolean increment, long delta) { - delta = increment ? delta : delta * -1; + @Override @Deprecated + public void updateDomainCount(long domainId, ResourceType type, boolean increment, long delta) { + delta = increment ? delta : delta * -1; ResourceCountVO resourceCountVO = findByOwnerAndType(domainId, ResourceOwnerType.Domain, type); - resourceCountVO.setCount(resourceCountVO.getCount() + delta); - update(resourceCountVO.getId(), resourceCountVO); - } - - @Override - public boolean updateById(long id, boolean increment, long delta) { - delta = increment ? delta : delta * -1; - - ResourceCountVO resourceCountVO = findById(id); - resourceCountVO.setCount(resourceCountVO.getCount() + delta); - return update(resourceCountVO.getId(), resourceCountVO); - } - - @Override - public Set listRowsToUpdateForDomain(long domainId, ResourceType type) { - Set rowIds = new HashSet(); - Set domainIdsToUpdate = _domainDao.getDomainParentIds(domainId); + resourceCountVO.setCount(resourceCountVO.getCount() + delta); + update(resourceCountVO.getId(), resourceCountVO); + } + + @Override + public boolean updateById(long id, boolean increment, long delta) { + delta = increment ? delta : delta * -1; + + ResourceCountVO resourceCountVO = findById(id); + resourceCountVO.setCount(resourceCountVO.getCount() + delta); + return update(resourceCountVO.getId(), resourceCountVO); + } + + @Override + public Set listRowsToUpdateForDomain(long domainId, ResourceType type) { + Set rowIds = new HashSet(); + Set domainIdsToUpdate = _domainDao.getDomainParentIds(domainId); for (Long domainIdToUpdate : domainIdsToUpdate) { ResourceCountVO domainCountRecord = findByOwnerAndType(domainIdToUpdate, ResourceOwnerType.Domain, type); if (domainCountRecord != null) { @@ -135,34 +134,34 @@ public class ResourceCountDaoImpl extends GenericDaoBase } } return rowIds; - } - - @Override - public Set listAllRowsToUpdate(long ownerId, ResourceOwnerType ownerType, ResourceType type) { - Set rowIds = new HashSet(); - - if (ownerType == ResourceOwnerType.Account) { - //get records for account - ResourceCountVO accountCountRecord = findByOwnerAndType(ownerId, ResourceOwnerType.Account, type); - if (accountCountRecord != null) { - rowIds.add(accountCountRecord.getId()); - } - - //get records for account's domain and all its parent domains - rowIds.addAll(listRowsToUpdateForDomain(_accountDao.findByIdIncludingRemoved(ownerId).getDomainId(),type)); - } else if (ownerType == ResourceOwnerType.Domain) { - return listRowsToUpdateForDomain(ownerId, type); - } - - return rowIds; - } - - @Override @DB + } + + @Override + public Set listAllRowsToUpdate(long ownerId, ResourceOwnerType ownerType, ResourceType type) { + Set rowIds = new HashSet(); + + if (ownerType == ResourceOwnerType.Account) { + //get records for account + ResourceCountVO accountCountRecord = findByOwnerAndType(ownerId, ResourceOwnerType.Account, type); + if (accountCountRecord != null) { + rowIds.add(accountCountRecord.getId()); + } + + //get records for account's domain and all its parent domains + rowIds.addAll(listRowsToUpdateForDomain(_accountDao.findByIdIncludingRemoved(ownerId).getDomainId(),type)); + } else if (ownerType == ResourceOwnerType.Domain) { + return listRowsToUpdateForDomain(ownerId, type); + } + + return rowIds; + } + + @Override @DB public void createResourceCounts(long ownerId, ResourceLimit.ResourceOwnerType ownerType){ - + Transaction txn = Transaction.currentTxn(); txn.start(); - + ResourceType[] resourceTypes = Resource.ResourceType.values(); for (ResourceType resourceType : resourceTypes) { if (!resourceType.supportsOwner(ownerType)) { @@ -171,24 +170,24 @@ public class ResourceCountDaoImpl extends GenericDaoBase ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, ownerId, ownerType); persist(resourceCountVO); } - + txn.commit(); } - - private List listByDomainId(long domainId) { - SearchCriteria sc = TypeSearch.create(); + + private List listByDomainId(long domainId) { + SearchCriteria sc = TypeSearch.create(); sc.setParameters("domainId", domainId); return listBy(sc); - } - + } + private List listByAccountId(long accountId) { - SearchCriteria sc = TypeSearch.create(); + SearchCriteria sc = TypeSearch.create(); sc.setParameters("accountId", accountId); return listBy(sc); - } - + } + @Override public List listByOwnerId(long ownerId, ResourceOwnerType ownerType) { if (ownerType == ResourceOwnerType.Account) { @@ -199,26 +198,26 @@ public class ResourceCountDaoImpl extends GenericDaoBase return new ArrayList(); } } - - @Override - public List listResourceCountByOwnerType(ResourceOwnerType ownerType) { - if (ownerType == ResourceOwnerType.Account) { - return listBy(AccountSearch.create()); - } else if (ownerType == ResourceOwnerType.Domain) { - return listBy(DomainSearch.create()); - } else { - return new ArrayList(); - } - } - - @Override + + @Override + public List listResourceCountByOwnerType(ResourceOwnerType ownerType) { + if (ownerType == ResourceOwnerType.Account) { + return listBy(AccountSearch.create()); + } else if (ownerType == ResourceOwnerType.Domain) { + return listBy(DomainSearch.create()); + } else { + return new ArrayList(); + } + } + + @Override public ResourceCountVO persist(ResourceCountVO resourceCountVO){ - ResourceOwnerType ownerType = resourceCountVO.getResourceOwnerType(); - ResourceType resourceType = resourceCountVO.getType(); - if (!resourceType.supportsOwner(ownerType)) { - throw new UnsupportedServiceException("Resource type " + resourceType + " is not supported for owner of type " + ownerType.getName()); - } - + ResourceOwnerType ownerType = resourceCountVO.getResourceOwnerType(); + ResourceType resourceType = resourceCountVO.getType(); + if (!resourceType.supportsOwner(ownerType)) { + throw new UnsupportedServiceException("Resource type " + resourceType + " is not supported for owner of type " + ownerType.getName()); + } + return super.persist(resourceCountVO); } } \ No newline at end of file diff --git a/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java b/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java index a3de9466dab..a2399977ab0 100755 --- a/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java +++ b/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java @@ -49,7 +49,6 @@ import com.cloud.host.dao.HostDao; import com.cloud.info.ConsoleProxyInfo; import com.cloud.network.Network; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.component.ComponentLocator; import com.cloud.vm.ConsoleProxyVO; import com.cloud.vm.ReservationContext; import com.cloud.vm.UserVmVO; @@ -87,9 +86,9 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu VirtualMachineManager _itMgr; @Inject protected ConsoleProxyDao _cpDao; - + @Inject ConfigurationDao _configDao; - + public int getVncPort(VMInstanceVO vm) { if (vm.getHostId() == null) { return -1; @@ -112,7 +111,7 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu if (value != null) { _consoleProxyUrlPort = NumbersUtil.parseInt(value, ConsoleProxyManager.DEFAULT_PROXY_URL_PORT); } - + value = configs.get("consoleproxy.port"); if (value != null) { _consoleProxyPort = NumbersUtil.parseInt(value, ConsoleProxyManager.DEFAULT_PROXY_VNC_PORT); @@ -126,10 +125,10 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu _instance = configs.get("instance.name"); _consoleProxyUrlDomain = configs.get("consoleproxy.url.domain"); - + _listener = new ConsoleProxyListener(this); _agentMgr.registerForHostEvents(_listener, true, true, false); - + _itMgr.registerGuru(VirtualMachine.Type.ConsoleProxy, this); if (s_logger.isInfoEnabled()) { @@ -177,20 +176,20 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu } publicIp = host.getPrivateIpAddress(); } - + int urlPort = _consoleProxyUrlPort; if (host.getProxyPort() != null && host.getProxyPort().intValue() > 0) { urlPort = host.getProxyPort().intValue(); } - + return new ConsoleProxyInfo(_sslEnabled, publicIp, _consoleProxyPort, urlPort, _consoleProxyUrlDomain); } else { s_logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); } return null; } - + @Override public void onLoadReport(ConsoleProxyLoadReportCommand cmd) { } @@ -273,16 +272,16 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu @Override public void setManagementState(ConsoleProxyManagementState state) { } - + @Override public ConsoleProxyManagementState getManagementState() { - return null; + return null; } - + @Override public void resumeLastManagementState() { } - + @Override public void startAgentHttpHandlerInVM(StartupProxyCommand startupCmd) { } @@ -299,7 +298,7 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu } return VirtualMachineName.getConsoleProxyId(vmName); } - + @Override public ConsoleProxyVO findByName(String name) { // TODO Auto-generated method stub @@ -329,7 +328,7 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu // TODO Auto-generated method stub return false; } - + @Override public boolean finalizeCommandsOnStart(Commands cmds, VirtualMachineProfile profile) { // TODO Auto-generated method stub @@ -346,7 +345,7 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu public void finalizeStop(VirtualMachineProfile profile, StopAnswer answer) { // TODO Auto-generated method stub } - + @Override public void finalizeExpunge(ConsoleProxyVO proxy) { } @@ -366,7 +365,7 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu //not supported throw new UnsupportedOperationException("Unplug nic is not supported for vm of type " + vm.getType()); } - + @Override public void prepareStop(VirtualMachineProfile profile) { } diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyBalanceAllocator.java b/server/src/com/cloud/consoleproxy/ConsoleProxyBalanceAllocator.java index 84f6faca7ab..0a045eb9602 100644 --- a/server/src/com/cloud/consoleproxy/ConsoleProxyBalanceAllocator.java +++ b/server/src/com/cloud/consoleproxy/ConsoleProxyBalanceAllocator.java @@ -27,9 +27,6 @@ import javax.naming.ConfigurationException; import org.springframework.stereotype.Component; -import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.component.ComponentLocator; import com.cloud.vm.ConsoleProxyVO; import edu.emory.mathcs.backport.java.util.Collections; @@ -37,45 +34,45 @@ import edu.emory.mathcs.backport.java.util.Collections; @Component @Local(value={ConsoleProxyAllocator.class}) public class ConsoleProxyBalanceAllocator implements ConsoleProxyAllocator { - + private String _name; private final Random _rand = new Random(System.currentTimeMillis()); - - @Override - public ConsoleProxyVO allocProxy(List candidates, final Map loadInfo, long dataCenterId) { - if(candidates != null) { - - List allocationList = new ArrayList(); - for(ConsoleProxyVO proxy : candidates) { - allocationList.add(proxy); - } - - Collections.sort(candidates, new Comparator () { - @Override - public int compare(ConsoleProxyVO x, ConsoleProxyVO y) { - Integer loadOfX = loadInfo.get(x.getId()); - Integer loadOfY = loadInfo.get(y.getId()); - if(loadOfX != null && loadOfY != null) { - if(loadOfX < loadOfY) - return -1; - else if(loadOfX > loadOfY) - return 1; - return 0; - } else if(loadOfX == null && loadOfY == null) { - return 0; - } else { - if(loadOfX == null) - return -1; - return 1; - } - } - }); - - if(allocationList.size() > 0) - return allocationList.get(0); - } - return null; + @Override + public ConsoleProxyVO allocProxy(List candidates, final Map loadInfo, long dataCenterId) { + if(candidates != null) { + + List allocationList = new ArrayList(); + for(ConsoleProxyVO proxy : candidates) { + allocationList.add(proxy); + } + + Collections.sort(candidates, new Comparator () { + @Override + public int compare(ConsoleProxyVO x, ConsoleProxyVO y) { + Integer loadOfX = loadInfo.get(x.getId()); + Integer loadOfY = loadInfo.get(y.getId()); + + if(loadOfX != null && loadOfY != null) { + if(loadOfX < loadOfY) + return -1; + else if(loadOfX > loadOfY) + return 1; + return 0; + } else if(loadOfX == null && loadOfY == null) { + return 0; + } else { + if(loadOfX == null) + return -1; + return 1; + } + } + }); + + if(allocationList.size() > 0) + return allocationList.get(0); + } + return null; } @Override @@ -83,7 +80,7 @@ public class ConsoleProxyBalanceAllocator implements ConsoleProxyAllocator { _name = name; return true; } - + @Override public String getName() { return _name; diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 6ceeb5f5ebe..502ba3075e4 100755 --- a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -19,7 +19,6 @@ package com.cloud.consoleproxy; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Date; -import java.util.Enumeration; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -32,6 +31,9 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import javax.persistence.Table; +import org.apache.cloudstack.api.ServerApiException; +import com.cloud.offering.DiskOffering; +import com.cloud.storage.dao.DiskOfferingDao; import org.apache.log4j.Logger; import org.springframework.context.annotation.Primary; import org.springframework.stereotype.Component; @@ -55,7 +57,6 @@ import com.cloud.agent.api.proxy.StartConsoleProxyAgentHttpHandlerCommand; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.manager.Commands; -import org.apache.cloudstack.api.ServerApiException; import com.cloud.api.commands.DestroyConsoleProxyCmd; import com.cloud.certificate.dao.CertificateDao; import com.cloud.cluster.ClusterManager; @@ -110,7 +111,6 @@ import com.cloud.resource.UnableDeleteHostException; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.servlet.ConsoleProxyServlet; -import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StoragePoolVO; @@ -128,8 +128,6 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; -import com.cloud.utils.component.Adapters; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.component.Manager; import com.cloud.utils.db.DB; import com.cloud.utils.db.GlobalLock; @@ -140,7 +138,6 @@ import com.cloud.utils.db.Transaction; import com.cloud.utils.events.SubscriptionMgr; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; -import com.cloud.uuididentity.dao.IdentityDao; import com.cloud.vm.ConsoleProxyVO; import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; @@ -221,6 +218,8 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx @Inject ServiceOfferingDao _offeringDao; @Inject + DiskOfferingDao _diskOfferingDao; + @Inject NetworkOfferingDao _networkOfferingDao; @Inject StoragePoolDao _storagePoolDao; @@ -229,14 +228,12 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx @Inject ResourceManager _resourceMgr; @Inject - IdentityDao _identityDao; - @Inject NetworkDao _networkDao; @Inject RulesManager _rulesMgr; @Inject IPAddressDao _ipAddressDao; - + private ConsoleProxyListener _listener; private ServiceOfferingVO _serviceOffering; @@ -269,7 +266,7 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx private Map _zoneHostInfoMap; // map private Map _zoneProxyCountMap; // map private Map _zoneVmCountMap; // map - + private String _hashKey; private String _staticPublicIp; private int _staticPort; @@ -478,9 +475,9 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx assert (ksVo != null); if (_staticPublicIp == null) { - return new ConsoleProxyInfo(proxy.isSslEnabled(), proxy.getPublicIpAddress(), _consoleProxyPort, proxy.getPort(), ksVo.getDomainSuffix()); + return new ConsoleProxyInfo(proxy.isSslEnabled(), proxy.getPublicIpAddress(), _consoleProxyPort, proxy.getPort(), ksVo.getDomainSuffix()); } else { - return new ConsoleProxyInfo(proxy.isSslEnabled(), _staticPublicIp, _consoleProxyPort, _staticPort, ksVo.getDomainSuffix()); + return new ConsoleProxyInfo(proxy.isSslEnabled(), _staticPublicIp, _consoleProxyPort, _staticPort, ksVo.getDomainSuffix()); } } @@ -809,10 +806,10 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx private ConsoleProxyAllocator getCurrentAllocator() { // for now, only one adapter is supported - for(ConsoleProxyAllocator allocator : _consoleProxyAllocators) { - return allocator; - } - + for(ConsoleProxyAllocator allocator : _consoleProxyAllocators) { + return allocator; + } + return null; } @@ -903,26 +900,26 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx } if(!cmd.isReauthenticating()) { - String ticket = ConsoleProxyServlet.genAccessTicket(cmd.getHost(), cmd.getPort(), cmd.getSid(), cmd.getVmId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Console authentication. Ticket in 1 minute boundary for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + ticket); - } - - if (!ticket.equals(ticketInUrl)) { - Date now = new Date(); - // considering of minute round-up - String minuteEarlyTicket = ConsoleProxyServlet.genAccessTicket(cmd.getHost(), cmd.getPort(), cmd.getSid(), cmd.getVmId(), new Date(now.getTime() - 60 * 1000)); - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Console authentication. Ticket in 2-minute boundary for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + minuteEarlyTicket); - } - - if (!minuteEarlyTicket.equals(ticketInUrl)) { - s_logger.error("Access ticket expired or has been modified. vmId: " + cmd.getVmId() + "ticket in URL: " + ticketInUrl + ", tickets to check against: " + ticket + "," - + minuteEarlyTicket); - return new ConsoleAccessAuthenticationAnswer(cmd, false); - } - } + String ticket = ConsoleProxyServlet.genAccessTicket(cmd.getHost(), cmd.getPort(), cmd.getSid(), cmd.getVmId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Console authentication. Ticket in 1 minute boundary for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + ticket); + } + + if (!ticket.equals(ticketInUrl)) { + Date now = new Date(); + // considering of minute round-up + String minuteEarlyTicket = ConsoleProxyServlet.genAccessTicket(cmd.getHost(), cmd.getPort(), cmd.getSid(), cmd.getVmId(), new Date(now.getTime() - 60 * 1000)); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Console authentication. Ticket in 2-minute boundary for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + minuteEarlyTicket); + } + + if (!minuteEarlyTicket.equals(ticketInUrl)) { + s_logger.error("Access ticket expired or has been modified. vmId: " + cmd.getVmId() + "ticket in URL: " + ticketInUrl + ", tickets to check against: " + ticket + "," + + minuteEarlyTicket); + return new ConsoleAccessAuthenticationAnswer(cmd, false); + } + } } if (cmd.getVmId() != null && cmd.getVmId().isEmpty()) { @@ -932,14 +929,12 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx return new ConsoleAccessAuthenticationAnswer(cmd, false); } - vmId = _identityDao.getIdentityId("vm_instance", cmd.getVmId()); - if (vmId == null) { - s_logger.error("Invalid vm id " + cmd.getVmId() + " sent from console access authentication"); - return new ConsoleAccessAuthenticationAnswer(cmd, false); - } - - VMInstanceVO vm = _instanceDao.findById(vmId); + VirtualMachine vm = _instanceDao.findByUuid(cmd.getVmId()); if (vm == null) { + vm = _instanceDao.findById(Long.parseLong(cmd.getVmId())); + } + if (vm == null) { + s_logger.error("Invalid vm id " + cmd.getVmId() + " sent from console access authentication"); return new ConsoleAccessAuthenticationAnswer(cmd, false); } @@ -959,38 +954,38 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx s_logger.warn("sid " + sid + " in url does not match stored sid " + vm.getVncPassword()); return new ConsoleAccessAuthenticationAnswer(cmd, false); } - + if(cmd.isReauthenticating()) { ConsoleAccessAuthenticationAnswer authenticationAnswer = new ConsoleAccessAuthenticationAnswer(cmd, true); authenticationAnswer.setReauthenticating(true); s_logger.info("Re-authentication request, ask host " + vm.getHostId() + " for new console info"); - GetVncPortAnswer answer = (GetVncPortAnswer) _agentMgr.easySend(vm.getHostId(), new - GetVncPortCommand(vm.getId(), vm.getInstanceName())); + GetVncPortAnswer answer = (GetVncPortAnswer) _agentMgr.easySend(vm.getHostId(), new + GetVncPortCommand(vm.getId(), vm.getInstanceName())); if (answer != null && answer.getResult()) { - Ternary parsedHostInfo = ConsoleProxyServlet.parseHostInfo(answer.getAddress()); - - if(parsedHostInfo.second() != null && parsedHostInfo.third() != null) { - + Ternary parsedHostInfo = ConsoleProxyServlet.parseHostInfo(answer.getAddress()); + + if(parsedHostInfo.second() != null && parsedHostInfo.third() != null) { + s_logger.info("Re-authentication result. vm: " + vm.getId() + ", tunnel url: " + parsedHostInfo.second() - + ", tunnel session: " + parsedHostInfo.third()); - - authenticationAnswer.setTunnelUrl(parsedHostInfo.second()); - authenticationAnswer.setTunnelSession(parsedHostInfo.third()); - } else { + + ", tunnel session: " + parsedHostInfo.third()); + + authenticationAnswer.setTunnelUrl(parsedHostInfo.second()); + authenticationAnswer.setTunnelSession(parsedHostInfo.third()); + } else { s_logger.info("Re-authentication result. vm: " + vm.getId() + ", host address: " + parsedHostInfo.first() - + ", port: " + answer.getPort()); - - authenticationAnswer.setHost(parsedHostInfo.first()); - authenticationAnswer.setPort(answer.getPort()); - } + + ", port: " + answer.getPort()); + + authenticationAnswer.setHost(parsedHostInfo.first()); + authenticationAnswer.setPort(answer.getPort()); + } } else { s_logger.warn("Re-authentication request failed"); - - authenticationAnswer.setSuccess(false); + + authenticationAnswer.setSuccess(false); } - + return authenticationAnswer; } @@ -1198,11 +1193,11 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx } } else { if (s_logger.isDebugEnabled()) { - if (secondaryStorageHost != null) { - s_logger.debug("Zone host is ready, but console proxy template: " + template.getId() + " is not ready on secondary storage: " + secondaryStorageHost.getId()); - } else { - s_logger.debug("Zone host is ready, but console proxy template: " + template.getId() + " is not ready on secondary storage."); - } + if (secondaryStorageHost != null) { + s_logger.debug("Zone host is ready, but console proxy template: " + template.getId() + " is not ready on secondary storage: " + secondaryStorageHost.getId()); + } else { + s_logger.debug("Zone host is ready, but console proxy template: " + template.getId() + " is not ready on secondary storage."); + } } } } @@ -1411,7 +1406,7 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx result = result && _hostDao.remove(host.getId()); } } - + return result; } catch (ResourceUnavailableException e) { s_logger.warn("Unable to expunge " + proxy, e); @@ -1514,26 +1509,23 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx _itMgr.registerGuru(VirtualMachine.Type.ConsoleProxy, this); boolean useLocalStorage = Boolean.parseBoolean(configs.get(Config.SystemVMUseLocalStorage.key())); - + //check if there is a default service offering configured - String cpvmSrvcOffIdStr = configs.get(Config.ConsoleProxyServiceOffering.key()); + String cpvmSrvcOffIdStr = configs.get(Config.ConsoleProxyServiceOffering.key()); if (cpvmSrvcOffIdStr != null) { - - Long cpvmSrvcOffId = null; - try { - cpvmSrvcOffId = _identityDao.getIdentityId(DiskOfferingVO.class.getAnnotation(Table.class).name(),cpvmSrvcOffIdStr); - } catch (Exception e) { - String msg = "Can't find system service offering specified by global config, uuid=" + cpvmSrvcOffIdStr + " for console proxy vm"; - s_logger.warn(msg); + DiskOffering diskOffering = _diskOfferingDao.findByUuid(cpvmSrvcOffIdStr); + if (diskOffering == null) + diskOffering = _diskOfferingDao.findById(Long.parseLong(cpvmSrvcOffIdStr)); + if (diskOffering != null) { + _serviceOffering = _offeringDao.findById(diskOffering.getId()); + } else { + s_logger.warn("Can't find system service offering specified by global config, uuid=" + cpvmSrvcOffIdStr + " for console proxy vm"); } - if(cpvmSrvcOffId != null){ - _serviceOffering = _offeringDao.findById(cpvmSrvcOffId); - } - } + } if(_serviceOffering == null || !_serviceOffering.getSystemUse()){ - int ramSize = NumbersUtil.parseInt(_configDao.getValue("console.ram.size"), DEFAULT_PROXY_VM_RAMSIZE); - int cpuFreq = NumbersUtil.parseInt(_configDao.getValue("console.cpu.mhz"), DEFAULT_PROXY_VM_CPUMHZ); + int ramSize = NumbersUtil.parseInt(_configDao.getValue("console.ram.size"), DEFAULT_PROXY_VM_RAMSIZE); + int cpuFreq = NumbersUtil.parseInt(_configDao.getValue("console.cpu.mhz"), DEFAULT_PROXY_VM_CPUMHZ); _serviceOffering = new ServiceOfferingVO("System Offering For Console Proxy", 1, ramSize, cpuFreq, 0, 0, false, null, useLocalStorage, true, null, true, VirtualMachine.Type.ConsoleProxy, true); _serviceOffering.setUniqueName(ServiceOffering.consoleProxyDefaultOffUniqueName); _serviceOffering = _offeringDao.persistSystemServiceOffering(_serviceOffering); @@ -1552,7 +1544,7 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx _staticPublicIp = _configDao.getValue("consoleproxy.static.publicIp"); if (_staticPublicIp != null) { - _staticPort = NumbersUtil.parseInt(_configDao.getValue("consoleproxy.static.port"), 8443); + _staticPort = NumbersUtil.parseInt(_configDao.getValue("consoleproxy.static.port"), 8443); } if (s_logger.isInfoEnabled()) { @@ -2011,7 +2003,7 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx sc.addAnd(sc.getEntity().getName(), Op.EQ, name); return sc.find(); } - + public String getHashKey() { // although we may have race conditioning here, database transaction serialization should // give us the same key @@ -2036,8 +2028,8 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx //not supported throw new UnsupportedOperationException("Unplug nic is not supported for vm of type " + vm.getType()); } - - @Override - public void prepareStop(VirtualMachineProfile profile) { - } + + @Override + public void prepareStop(VirtualMachineProfile profile) { + } } diff --git a/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java b/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java index c28a2e498e3..13d3112c827 100755 --- a/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java +++ b/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java @@ -30,7 +30,6 @@ import com.cloud.host.Host.Type; import com.cloud.host.HostVO; import com.cloud.info.ConsoleProxyInfo; import com.cloud.resource.ResourceManager; -import com.cloud.utils.component.ComponentLocator; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.ConsoleProxyDao; @@ -41,31 +40,31 @@ public class StaticConsoleProxyManager extends AgentBasedConsoleProxyManager imp @Inject ConsoleProxyDao _proxyDao; @Inject ResourceManager _resourceMgr; @Inject ConfigurationDao _configDao; - + @Override protected HostVO findHost(VMInstanceVO vm) { - + List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByType(Type.ConsoleProxy, vm.getDataCenterIdToDeployIn()); - + return hosts.isEmpty() ? null : hosts.get(0); } - + @Override public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { return new ConsoleProxyInfo(false, _ip, _consoleProxyPort, _consoleProxyUrlPort, _consoleProxyUrlDomain); } - + @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); - + Map dbParams = _configDao.getConfiguration("ManagementServer", params); - + _ip = dbParams.get("public.ip"); if (_ip == null) { _ip = "127.0.0.1"; } - + return true; } } diff --git a/server/src/com/cloud/dc/dao/ClusterDaoImpl.java b/server/src/com/cloud/dc/dao/ClusterDaoImpl.java index f06b24daae5..86dc65e05bd 100644 --- a/server/src/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/server/src/com/cloud/dc/dao/ClusterDaoImpl.java @@ -33,7 +33,6 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.HostPodVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Grouping; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; @@ -53,73 +52,73 @@ public class ClusterDaoImpl extends GenericDaoBase implements C protected final SearchBuilder AvailHyperSearch; protected final SearchBuilder ZoneSearch; protected final SearchBuilder ZoneHyTypeSearch; - + private static final String GET_POD_CLUSTER_MAP_PREFIX = "SELECT pod_id, id FROM cloud.cluster WHERE cluster.id IN( "; private static final String GET_POD_CLUSTER_MAP_SUFFIX = " )"; @Inject protected HostPodDao _hostPodDao; - + public ClusterDaoImpl() { super(); - + HyTypeWithoutGuidSearch = createSearchBuilder(); HyTypeWithoutGuidSearch.and("hypervisorType", HyTypeWithoutGuidSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ); HyTypeWithoutGuidSearch.and("guid", HyTypeWithoutGuidSearch.entity().getGuid(), SearchCriteria.Op.NULL); HyTypeWithoutGuidSearch.done(); - + ZoneHyTypeSearch = createSearchBuilder(); ZoneHyTypeSearch.and("hypervisorType", ZoneHyTypeSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ); ZoneHyTypeSearch.and("dataCenterId", ZoneHyTypeSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); ZoneHyTypeSearch.done(); - + PodSearch = createSearchBuilder(); PodSearch.and("pod", PodSearch.entity().getPodId(), SearchCriteria.Op.EQ); PodSearch.and("name", PodSearch.entity().getName(), SearchCriteria.Op.EQ); PodSearch.done(); - + ZoneSearch = createSearchBuilder(); ZoneSearch.and("dataCenterId", ZoneSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); ZoneSearch.groupBy(ZoneSearch.entity().getHypervisorType()); ZoneSearch.done(); - + AvailHyperSearch = createSearchBuilder(); AvailHyperSearch.and("zoneId", AvailHyperSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); AvailHyperSearch.select(null, Func.DISTINCT, AvailHyperSearch.entity().getHypervisorType()); AvailHyperSearch.done(); } - + @Override public List listByZoneId(long zoneId) { SearchCriteria sc = ZoneSearch.create(); sc.setParameters("dataCenterId", zoneId); return listBy(sc); } - + @Override public List listByPodId(long podId) { SearchCriteria sc = PodSearch.create(); sc.setParameters("pod", podId); - + return listBy(sc); } - + @Override public ClusterVO findBy(String name, long podId) { SearchCriteria sc = PodSearch.create(); sc.setParameters("pod", podId); sc.setParameters("name", name); - + return findOneBy(sc); } - + @Override public List listByHyTypeWithoutGuid(String hyType) { SearchCriteria sc = HyTypeWithoutGuidSearch.create(); sc.setParameters("hypervisorType", hyType); - + return listBy(sc); } - + @Override public List listByDcHyType(long dcId, String hyType) { SearchCriteria sc = ZoneHyTypeSearch.create(); @@ -127,7 +126,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C sc.setParameters("hypervisorType", hyType); return listBy(sc); } - + @Override public List getAvailableHypervisorInZone(Long zoneId) { SearchCriteria sc = AvailHyperSearch.create(); @@ -139,13 +138,13 @@ public class ClusterDaoImpl extends GenericDaoBase implements C for (ClusterVO cluster : clusters) { hypers.add(cluster.getHypervisorType()); } - + return hypers; } - + @Override public Map> getPodClusterIdMap(List clusterIds){ - Transaction txn = Transaction.currentTxn(); + Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; Map> result = new HashMap>(); @@ -158,20 +157,20 @@ public class ClusterDaoImpl extends GenericDaoBase implements C sql.delete(sql.length()-1, sql.length()); sql.append(GET_POD_CLUSTER_MAP_SUFFIX); } - + pstmt = txn.prepareAutoCloseStatement(sql.toString()); ResultSet rs = pstmt.executeQuery(); while (rs.next()) { - Long podId = rs.getLong(1); - Long clusterIdInPod = rs.getLong(2); + Long podId = rs.getLong(1); + Long clusterIdInPod = rs.getLong(2); if(result.containsKey(podId)){ - List clusterList = result.get(podId); - clusterList.add(clusterIdInPod); - result.put(podId, clusterList); + List clusterList = result.get(podId); + clusterList.add(clusterIdInPod); + result.put(podId, clusterList); }else{ - List clusterList = new ArrayList(); - clusterList.add(clusterIdInPod); - result.put(podId, clusterList); + List clusterList = new ArrayList(); + clusterList.add(clusterIdInPod); + result.put(podId, clusterList); } } return result; @@ -181,49 +180,49 @@ public class ClusterDaoImpl extends GenericDaoBase implements C throw new CloudRuntimeException("Caught: " + GET_POD_CLUSTER_MAP_PREFIX, e); } } - + @Override public List listDisabledClusters(long zoneId, Long podId) { - GenericSearchBuilder clusterIdSearch = createSearchBuilder(Long.class); - clusterIdSearch.selectField(clusterIdSearch.entity().getId()); - clusterIdSearch.and("dataCenterId", clusterIdSearch.entity().getDataCenterId(), Op.EQ); - if(podId != null){ - clusterIdSearch.and("podId", clusterIdSearch.entity().getPodId(), Op.EQ); - } - clusterIdSearch.and("allocationState", clusterIdSearch.entity().getAllocationState(), Op.EQ); - clusterIdSearch.done(); + GenericSearchBuilder clusterIdSearch = createSearchBuilder(Long.class); + clusterIdSearch.selectField(clusterIdSearch.entity().getId()); + clusterIdSearch.and("dataCenterId", clusterIdSearch.entity().getDataCenterId(), Op.EQ); + if(podId != null){ + clusterIdSearch.and("podId", clusterIdSearch.entity().getPodId(), Op.EQ); + } + clusterIdSearch.and("allocationState", clusterIdSearch.entity().getAllocationState(), Op.EQ); + clusterIdSearch.done(); - - SearchCriteria sc = clusterIdSearch.create(); + + SearchCriteria sc = clusterIdSearch.create(); sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId); if (podId != null) { - sc.addAnd("podId", SearchCriteria.Op.EQ, podId); - } + sc.addAnd("podId", SearchCriteria.Op.EQ, podId); + } sc.addAnd("allocationState", SearchCriteria.Op.EQ, Grouping.AllocationState.Disabled); return customSearch(sc, null); } @Override public List listClustersWithDisabledPods(long zoneId) { - - GenericSearchBuilder disabledPodIdSearch = _hostPodDao.createSearchBuilder(Long.class); - disabledPodIdSearch.selectField(disabledPodIdSearch.entity().getId()); - disabledPodIdSearch.and("dataCenterId", disabledPodIdSearch.entity().getDataCenterId(), Op.EQ); - disabledPodIdSearch.and("allocationState", disabledPodIdSearch.entity().getAllocationState(), Op.EQ); - GenericSearchBuilder clusterIdSearch = createSearchBuilder(Long.class); - clusterIdSearch.selectField(clusterIdSearch.entity().getId()); - clusterIdSearch.join("disabledPodIdSearch", disabledPodIdSearch, clusterIdSearch.entity().getPodId(), disabledPodIdSearch.entity().getId(), JoinBuilder.JoinType.INNER); - clusterIdSearch.done(); + GenericSearchBuilder disabledPodIdSearch = _hostPodDao.createSearchBuilder(Long.class); + disabledPodIdSearch.selectField(disabledPodIdSearch.entity().getId()); + disabledPodIdSearch.and("dataCenterId", disabledPodIdSearch.entity().getDataCenterId(), Op.EQ); + disabledPodIdSearch.and("allocationState", disabledPodIdSearch.entity().getAllocationState(), Op.EQ); - - SearchCriteria sc = clusterIdSearch.create(); + GenericSearchBuilder clusterIdSearch = createSearchBuilder(Long.class); + clusterIdSearch.selectField(clusterIdSearch.entity().getId()); + clusterIdSearch.join("disabledPodIdSearch", disabledPodIdSearch, clusterIdSearch.entity().getPodId(), disabledPodIdSearch.entity().getId(), JoinBuilder.JoinType.INNER); + clusterIdSearch.done(); + + + SearchCriteria sc = clusterIdSearch.create(); sc.setJoinParameters("disabledPodIdSearch", "dataCenterId", zoneId); sc.setJoinParameters("disabledPodIdSearch", "allocationState", Grouping.AllocationState.Disabled); - + return customSearch(sc, null); } - + @Override public boolean remove(Long id) { Transaction txn = Transaction.currentTxn(); @@ -231,7 +230,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C ClusterVO cluster = createForUpdate(); cluster.setName(null); cluster.setGuid(null); - + update(id, cluster); boolean result = super.remove(id); diff --git a/server/src/com/cloud/dc/dao/HostPodDaoImpl.java b/server/src/com/cloud/dc/dao/HostPodDaoImpl.java index a06bd3fe10a..4844d028aee 100644 --- a/server/src/com/cloud/dc/dao/HostPodDaoImpl.java +++ b/server/src/com/cloud/dc/dao/HostPodDaoImpl.java @@ -24,6 +24,7 @@ import java.util.HashMap; import java.util.List; import javax.ejb.Local; +import javax.inject.Inject; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -37,7 +38,6 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; -import com.cloud.utils.component.ComponentLocator; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.VMInstanceDaoImpl; @@ -46,32 +46,32 @@ import com.cloud.vm.dao.VMInstanceDaoImpl; @Local(value={HostPodDao.class}) public class HostPodDaoImpl extends GenericDaoBase implements HostPodDao { private static final Logger s_logger = Logger.getLogger(HostPodDaoImpl.class); - - protected SearchBuilder DataCenterAndNameSearch; - protected SearchBuilder DataCenterIdSearch; - - protected HostPodDaoImpl() { - DataCenterAndNameSearch = createSearchBuilder(); - DataCenterAndNameSearch.and("dc", DataCenterAndNameSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - DataCenterAndNameSearch.and("name", DataCenterAndNameSearch.entity().getName(), SearchCriteria.Op.EQ); - DataCenterAndNameSearch.done(); - - DataCenterIdSearch = createSearchBuilder(); - DataCenterIdSearch.and("dcId", DataCenterIdSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - DataCenterIdSearch.done(); - } - - @Override + @Inject VMInstanceDaoImpl _vmDao; + + protected SearchBuilder DataCenterAndNameSearch; + protected SearchBuilder DataCenterIdSearch; + + protected HostPodDaoImpl() { + DataCenterAndNameSearch = createSearchBuilder(); + DataCenterAndNameSearch.and("dc", DataCenterAndNameSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + DataCenterAndNameSearch.and("name", DataCenterAndNameSearch.entity().getName(), SearchCriteria.Op.EQ); + DataCenterAndNameSearch.done(); + + DataCenterIdSearch = createSearchBuilder(); + DataCenterIdSearch.and("dcId", DataCenterIdSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + DataCenterIdSearch.done(); + } + + @Override public List listByDataCenterId(long id) { - SearchCriteria sc = DataCenterIdSearch.create(); - sc.setParameters("dcId", id); - - return listBy(sc); - } - - @Override + SearchCriteria sc = DataCenterIdSearch.create(); + sc.setParameters("dcId", id); + + return listBy(sc); + } + + @Override public List listByDataCenterIdVMTypeAndStates(long id, VirtualMachine.Type type, VirtualMachine.State... states) { - final VMInstanceDaoImpl _vmDao = ComponentLocator.inject(VMInstanceDaoImpl.class); SearchBuilder vmInstanceSearch = _vmDao.createSearchBuilder(); vmInstanceSearch.and("type", vmInstanceSearch.entity().getType(), SearchCriteria.Op.EQ); vmInstanceSearch.and("states", vmInstanceSearch.entity().getState(), SearchCriteria.Op.IN); @@ -90,51 +90,51 @@ public class HostPodDaoImpl extends GenericDaoBase implements H return listBy(sc); } - @Override + @Override public HostPodVO findByName(String name, long dcId) { - SearchCriteria sc = DataCenterAndNameSearch.create(); - sc.setParameters("dc", dcId); - sc.setParameters("name", name); - - return findOneBy(sc); - } - - @Override - public HashMap> getCurrentPodCidrSubnets(long zoneId, long podIdToSkip) { - HashMap> currentPodCidrSubnets = new HashMap>(); - - String selectSql = "SELECT id, cidr_address, cidr_size FROM host_pod_ref WHERE data_center_id=" + zoneId +" and removed IS NULL"; - Transaction txn = Transaction.currentTxn(); - try { - PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); - ResultSet rs = stmt.executeQuery(); - while (rs.next()) { - Long podId = rs.getLong("id"); - if (podId.longValue() == podIdToSkip) { + SearchCriteria sc = DataCenterAndNameSearch.create(); + sc.setParameters("dc", dcId); + sc.setParameters("name", name); + + return findOneBy(sc); + } + + @Override + public HashMap> getCurrentPodCidrSubnets(long zoneId, long podIdToSkip) { + HashMap> currentPodCidrSubnets = new HashMap>(); + + String selectSql = "SELECT id, cidr_address, cidr_size FROM host_pod_ref WHERE data_center_id=" + zoneId +" and removed IS NULL"; + Transaction txn = Transaction.currentTxn(); + try { + PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); + ResultSet rs = stmt.executeQuery(); + while (rs.next()) { + Long podId = rs.getLong("id"); + if (podId.longValue() == podIdToSkip) { continue; } - String cidrAddress = rs.getString("cidr_address"); - long cidrSize = rs.getLong("cidr_size"); - List cidrPair = new ArrayList(); - cidrPair.add(0, cidrAddress); - cidrPair.add(1, new Long(cidrSize)); - currentPodCidrSubnets.put(podId, cidrPair); - } + String cidrAddress = rs.getString("cidr_address"); + long cidrSize = rs.getLong("cidr_size"); + List cidrPair = new ArrayList(); + cidrPair.add(0, cidrAddress); + cidrPair.add(1, new Long(cidrSize)); + currentPodCidrSubnets.put(podId, cidrPair); + } } catch (SQLException ex) { - s_logger.warn("DB exception " + ex.getMessage(), ex); + s_logger.warn("DB exception " + ex.getMessage(), ex); return null; } - + return currentPodCidrSubnets; - } - + } + @Override public boolean remove(Long id) { Transaction txn = Transaction.currentTxn(); txn.start(); HostPodVO pod = createForUpdate(); pod.setName(null); - + update(id, pod); boolean result = super.remove(id); @@ -150,11 +150,11 @@ public class HostPodDaoImpl extends GenericDaoBase implements H podIdSearch.and("allocationState", podIdSearch.entity().getAllocationState(), Op.EQ); podIdSearch.done(); - + SearchCriteria sc = podIdSearch.create(); sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId); sc.addAnd("allocationState", SearchCriteria.Op.EQ, Grouping.AllocationState.Disabled); return customSearch(sc, null); } - + } diff --git a/server/src/com/cloud/dc/dao/VlanDaoImpl.java b/server/src/com/cloud/dc/dao/VlanDaoImpl.java index 8fc0580aaef..c5a635fd0c0 100644 --- a/server/src/com/cloud/dc/dao/VlanDaoImpl.java +++ b/server/src/com/cloud/dc/dao/VlanDaoImpl.java @@ -36,7 +36,6 @@ import com.cloud.dc.Vlan.VlanType; import com.cloud.dc.VlanVO; import com.cloud.network.dao.IPAddressDao; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.JoinBuilder; @@ -48,56 +47,56 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value={VlanDao.class}) public class VlanDaoImpl extends GenericDaoBase implements VlanDao { - - private final String FindZoneWideVlans = "SELECT * FROM vlan WHERE data_center_id=? and vlan_type=? and vlan_id!=? and id not in (select vlan_db_id from account_vlan_map)"; - - protected SearchBuilder ZoneVlanIdSearch; - protected SearchBuilder ZoneSearch; - protected SearchBuilder ZoneTypeSearch; - protected SearchBuilder ZoneTypeAllPodsSearch; - protected SearchBuilder ZoneTypePodSearch; - protected SearchBuilder ZoneVlanSearch; - protected SearchBuilder NetworkVlanSearch; - protected SearchBuilder PhysicalNetworkVlanSearch; - @Inject protected PodVlanMapDao _podVlanMapDao; - @Inject protected AccountVlanMapDao _accountVlanMapDao; - @Inject protected IPAddressDao _ipAddressDao; - + private final String FindZoneWideVlans = "SELECT * FROM vlan WHERE data_center_id=? and vlan_type=? and vlan_id!=? and id not in (select vlan_db_id from account_vlan_map)"; + + protected SearchBuilder ZoneVlanIdSearch; + protected SearchBuilder ZoneSearch; + protected SearchBuilder ZoneTypeSearch; + protected SearchBuilder ZoneTypeAllPodsSearch; + protected SearchBuilder ZoneTypePodSearch; + protected SearchBuilder ZoneVlanSearch; + protected SearchBuilder NetworkVlanSearch; + protected SearchBuilder PhysicalNetworkVlanSearch; + + @Inject protected PodVlanMapDao _podVlanMapDao; + @Inject protected AccountVlanMapDao _accountVlanMapDao; + @Inject protected IPAddressDao _ipAddressDao; + @Override public VlanVO findByZoneAndVlanId(long zoneId, String vlanId) { - SearchCriteria sc = ZoneVlanIdSearch.create(); - sc.setParameters("zoneId", zoneId); - sc.setParameters("vlanId", vlanId); + SearchCriteria sc = ZoneVlanIdSearch.create(); + sc.setParameters("zoneId", zoneId); + sc.setParameters("vlanId", vlanId); return findOneBy(sc); } - + @Override public List listByZone(long zoneId) { - SearchCriteria sc = ZoneSearch.create(); - sc.setParameters("zoneId", zoneId); - return listBy(sc); + SearchCriteria sc = ZoneSearch.create(); + sc.setParameters("zoneId", zoneId); + return listBy(sc); } - + public VlanDaoImpl() { - ZoneVlanIdSearch = createSearchBuilder(); - ZoneVlanIdSearch.and("zoneId", ZoneVlanIdSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + ZoneVlanIdSearch = createSearchBuilder(); + ZoneVlanIdSearch.and("zoneId", ZoneVlanIdSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); ZoneVlanIdSearch.and("vlanId", ZoneVlanIdSearch.entity().getVlanTag(), SearchCriteria.Op.EQ); ZoneVlanIdSearch.done(); - + ZoneSearch = createSearchBuilder(); ZoneSearch.and("zoneId", ZoneSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); ZoneSearch.done(); - + ZoneTypeSearch = createSearchBuilder(); ZoneTypeSearch.and("zoneId", ZoneTypeSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); ZoneTypeSearch.and("vlanType", ZoneTypeSearch.entity().getVlanType(), SearchCriteria.Op.EQ); ZoneTypeSearch.done(); - + NetworkVlanSearch = createSearchBuilder(); NetworkVlanSearch.and("networkOfferingId", NetworkVlanSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); NetworkVlanSearch.done(); - + PhysicalNetworkVlanSearch = createSearchBuilder(); PhysicalNetworkVlanSearch.and("physicalNetworkId", PhysicalNetworkVlanSearch.entity().getPhysicalNetworkId(), SearchCriteria.Op.EQ); PhysicalNetworkVlanSearch.done(); @@ -105,211 +104,211 @@ public class VlanDaoImpl extends GenericDaoBase implements VlanDao @Override public List listZoneWideVlans(long zoneId, VlanType vlanType, String vlanId){ - SearchCriteria sc = ZoneVlanSearch.create(); - sc.setParameters("zoneId", zoneId); - sc.setParameters("vlanId", vlanId); - sc.setParameters("vlanType", vlanType); - return listBy(sc); - } - - @Override - public List listByZoneAndType(long zoneId, VlanType vlanType) { - SearchCriteria sc = ZoneTypeSearch.create(); - sc.setParameters("zoneId", zoneId); - sc.setParameters("vlanType", vlanType); + SearchCriteria sc = ZoneVlanSearch.create(); + sc.setParameters("zoneId", zoneId); + sc.setParameters("vlanId", vlanId); + sc.setParameters("vlanType", vlanType); return listBy(sc); - } - - - @Override + } + + @Override + public List listByZoneAndType(long zoneId, VlanType vlanType) { + SearchCriteria sc = ZoneTypeSearch.create(); + sc.setParameters("zoneId", zoneId); + sc.setParameters("vlanType", vlanType); + return listBy(sc); + } + + + @Override public List listByType(VlanType vlanType) { SearchCriteria sc = ZoneTypeSearch.create(); sc.setParameters("vlanType", vlanType); return listBy(sc); } - @Override - public List listVlansForPod(long podId) { - //FIXME: use a join statement to improve the performance (should be minor since we expect only one or two - List vlanMaps = _podVlanMapDao.listPodVlanMapsByPod(podId); - List result = new ArrayList(); - for (PodVlanMapVO pvmvo: vlanMaps) { - result.add(findById(pvmvo.getVlanDbId())); - } - return result; - } + @Override + public List listVlansForPod(long podId) { + //FIXME: use a join statement to improve the performance (should be minor since we expect only one or two + List vlanMaps = _podVlanMapDao.listPodVlanMapsByPod(podId); + List result = new ArrayList(); + for (PodVlanMapVO pvmvo: vlanMaps) { + result.add(findById(pvmvo.getVlanDbId())); + } + return result; + } - @Override - public List listVlansForPodByType(long podId, VlanType vlanType) { - //FIXME: use a join statement to improve the performance (should be minor since we expect only one or two) - List vlanMaps = _podVlanMapDao.listPodVlanMapsByPod(podId); - List result = new ArrayList(); - for (PodVlanMapVO pvmvo: vlanMaps) { - VlanVO vlan =findById(pvmvo.getVlanDbId()); - if (vlan.getVlanType() == vlanType) { - result.add(vlan); - } - } - return result; - } - - @Override - public List listVlansForAccountByType(Long zoneId, long accountId, VlanType vlanType) { - //FIXME: use a join statement to improve the performance (should be minor since we expect only one or two) - List vlanMaps = _accountVlanMapDao.listAccountVlanMapsByAccount(accountId); - List result = new ArrayList(); - for (AccountVlanMapVO acvmvo: vlanMaps) { - VlanVO vlan =findById(acvmvo.getVlanDbId()); - if (vlan.getVlanType() == vlanType && (zoneId == null || vlan.getDataCenterId() == zoneId)) { - result.add(vlan); - } - } - return result; - } + @Override + public List listVlansForPodByType(long podId, VlanType vlanType) { + //FIXME: use a join statement to improve the performance (should be minor since we expect only one or two) + List vlanMaps = _podVlanMapDao.listPodVlanMapsByPod(podId); + List result = new ArrayList(); + for (PodVlanMapVO pvmvo: vlanMaps) { + VlanVO vlan =findById(pvmvo.getVlanDbId()); + if (vlan.getVlanType() == vlanType) { + result.add(vlan); + } + } + return result; + } - @Override - public void addToPod(long podId, long vlanDbId) { - PodVlanMapVO pvmvo = new PodVlanMapVO(podId, vlanDbId); - _podVlanMapDao.persist(pvmvo); - - } + @Override + public List listVlansForAccountByType(Long zoneId, long accountId, VlanType vlanType) { + //FIXME: use a join statement to improve the performance (should be minor since we expect only one or two) + List vlanMaps = _accountVlanMapDao.listAccountVlanMapsByAccount(accountId); + List result = new ArrayList(); + for (AccountVlanMapVO acvmvo: vlanMaps) { + VlanVO vlan =findById(acvmvo.getVlanDbId()); + if (vlan.getVlanType() == vlanType && (zoneId == null || vlan.getDataCenterId() == zoneId)) { + result.add(vlan); + } + } + return result; + } - @Override - public boolean configure(String name, Map params) - throws ConfigurationException { - boolean result = super.configure(name, params); + @Override + public void addToPod(long podId, long vlanDbId) { + PodVlanMapVO pvmvo = new PodVlanMapVO(podId, vlanDbId); + _podVlanMapDao.persist(pvmvo); + + } + + @Override + public boolean configure(String name, Map params) + throws ConfigurationException { + boolean result = super.configure(name, params); ZoneTypeAllPodsSearch = createSearchBuilder(); ZoneTypeAllPodsSearch.and("zoneId", ZoneTypeAllPodsSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); ZoneTypeAllPodsSearch.and("vlanType", ZoneTypeAllPodsSearch.entity().getVlanType(), SearchCriteria.Op.EQ); - + SearchBuilder PodVlanSearch = _podVlanMapDao.createSearchBuilder(); PodVlanSearch.and("podId", PodVlanSearch.entity().getPodId(), SearchCriteria.Op.NNULL); ZoneTypeAllPodsSearch.join("vlan", PodVlanSearch, PodVlanSearch.entity().getVlanDbId(), ZoneTypeAllPodsSearch.entity().getId(), JoinBuilder.JoinType.INNER); - + ZoneTypeAllPodsSearch.done(); PodVlanSearch.done(); - + ZoneTypePodSearch = createSearchBuilder(); ZoneTypePodSearch.and("zoneId", ZoneTypePodSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); ZoneTypePodSearch.and("vlanType", ZoneTypePodSearch.entity().getVlanType(), SearchCriteria.Op.EQ); - + SearchBuilder PodVlanSearch2 = _podVlanMapDao.createSearchBuilder(); PodVlanSearch2.and("podId", PodVlanSearch2.entity().getPodId(), SearchCriteria.Op.EQ); ZoneTypePodSearch.join("vlan", PodVlanSearch2, PodVlanSearch2.entity().getVlanDbId(), ZoneTypePodSearch.entity().getId(), JoinBuilder.JoinType.INNER); PodVlanSearch2.done(); ZoneTypePodSearch.done(); - return result; - } - - private VlanVO findNextVlan(long zoneId, Vlan.VlanType vlanType) { - List allVlans = listByZoneAndType(zoneId, vlanType); - List emptyVlans = new ArrayList(); - List fullVlans = new ArrayList(); - - // Try to find a VLAN that is partially allocated - for (VlanVO vlan : allVlans) { - long vlanDbId = vlan.getId(); - - int countOfAllocatedIps = _ipAddressDao.countIPs(zoneId, vlanDbId, true); - int countOfAllIps = _ipAddressDao.countIPs(zoneId, vlanDbId, false); - - if ((countOfAllocatedIps > 0) && (countOfAllocatedIps < countOfAllIps)) { - return vlan; - } else if (countOfAllocatedIps == 0) { - emptyVlans.add(vlan); - } else if (countOfAllocatedIps == countOfAllIps) { - fullVlans.add(vlan); - } - } - - if (emptyVlans.isEmpty()) { - return null; - } - - // Try to find an empty VLAN with the same tag/subnet as a VLAN that is full - for (VlanVO fullVlan : fullVlans) { - for (VlanVO emptyVlan : emptyVlans) { - if (fullVlan.getVlanTag().equals(emptyVlan.getVlanTag()) && - fullVlan.getVlanGateway().equals(emptyVlan.getVlanGateway()) && - fullVlan.getVlanNetmask().equals(emptyVlan.getVlanNetmask())) { - return emptyVlan; - } - } - } - - // Return a random empty VLAN - return emptyVlans.get(0); - } + return result; + } + + private VlanVO findNextVlan(long zoneId, Vlan.VlanType vlanType) { + List allVlans = listByZoneAndType(zoneId, vlanType); + List emptyVlans = new ArrayList(); + List fullVlans = new ArrayList(); + + // Try to find a VLAN that is partially allocated + for (VlanVO vlan : allVlans) { + long vlanDbId = vlan.getId(); + + int countOfAllocatedIps = _ipAddressDao.countIPs(zoneId, vlanDbId, true); + int countOfAllIps = _ipAddressDao.countIPs(zoneId, vlanDbId, false); + + if ((countOfAllocatedIps > 0) && (countOfAllocatedIps < countOfAllIps)) { + return vlan; + } else if (countOfAllocatedIps == 0) { + emptyVlans.add(vlan); + } else if (countOfAllocatedIps == countOfAllIps) { + fullVlans.add(vlan); + } + } + + if (emptyVlans.isEmpty()) { + return null; + } + + // Try to find an empty VLAN with the same tag/subnet as a VLAN that is full + for (VlanVO fullVlan : fullVlans) { + for (VlanVO emptyVlan : emptyVlans) { + if (fullVlan.getVlanTag().equals(emptyVlan.getVlanTag()) && + fullVlan.getVlanGateway().equals(emptyVlan.getVlanGateway()) && + fullVlan.getVlanNetmask().equals(emptyVlan.getVlanNetmask())) { + return emptyVlan; + } + } + } + + // Return a random empty VLAN + return emptyVlans.get(0); + } + + @Override + public boolean zoneHasDirectAttachUntaggedVlans(long zoneId) { + SearchCriteria sc = ZoneTypeAllPodsSearch.create(); + sc.setParameters("zoneId", zoneId); + sc.setParameters("vlanType", VlanType.DirectAttached); - @Override - public boolean zoneHasDirectAttachUntaggedVlans(long zoneId) { - SearchCriteria sc = ZoneTypeAllPodsSearch.create(); - sc.setParameters("zoneId", zoneId); - sc.setParameters("vlanType", VlanType.DirectAttached); - return listIncludingRemovedBy(sc).size() > 0; - } + } - public Pair assignPodDirectAttachIpAddress(long zoneId, - long podId, long accountId, long domainId) { - SearchCriteria sc = ZoneTypePodSearch.create(); - sc.setParameters("zoneId", zoneId); - sc.setParameters("vlanType", VlanType.DirectAttached); - sc.setJoinParameters("vlan", "podId", podId); - - VlanVO vlan = findOneIncludingRemovedBy(sc); - if (vlan == null) { - return null; - } - - return null; + public Pair assignPodDirectAttachIpAddress(long zoneId, + long podId, long accountId, long domainId) { + SearchCriteria sc = ZoneTypePodSearch.create(); + sc.setParameters("zoneId", zoneId); + sc.setParameters("vlanType", VlanType.DirectAttached); + sc.setJoinParameters("vlan", "podId", podId); + + VlanVO vlan = findOneIncludingRemovedBy(sc); + if (vlan == null) { + return null; + } + + return null; // String ipAddress = _ipAddressDao.assignIpAddress(accountId, domainId, vlan.getId(), false).getAddress(); // if (ipAddress == null) { // return null; // } // return new Pair(ipAddress, vlan); - } - - @Override - @DB - public List searchForZoneWideVlans(long dcId, String vlanType, String vlanId){ - - StringBuilder sql = new StringBuilder(FindZoneWideVlans); + } - Transaction txn = Transaction.currentTxn(); - PreparedStatement pstmt = null; - try { - pstmt = txn.prepareAutoCloseStatement(sql.toString()); - pstmt.setLong(1, dcId); - pstmt.setString(2, vlanType); - pstmt.setString(3, vlanId); - - ResultSet rs = pstmt.executeQuery(); - List zoneWideVlans = new ArrayList(); + @Override + @DB + public List searchForZoneWideVlans(long dcId, String vlanType, String vlanId){ - while (rs.next()) { - zoneWideVlans.add(toEntityBean(rs, false)); - } - - return zoneWideVlans; - } catch (SQLException e) { - throw new CloudRuntimeException("Unable to execute " + pstmt.toString(), e); - } - } - - @Override + StringBuilder sql = new StringBuilder(FindZoneWideVlans); + + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + try { + pstmt = txn.prepareAutoCloseStatement(sql.toString()); + pstmt.setLong(1, dcId); + pstmt.setString(2, vlanType); + pstmt.setString(3, vlanId); + + ResultSet rs = pstmt.executeQuery(); + List zoneWideVlans = new ArrayList(); + + while (rs.next()) { + zoneWideVlans.add(toEntityBean(rs, false)); + } + + return zoneWideVlans; + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to execute " + pstmt.toString(), e); + } + } + + @Override public List listVlansByNetworkId(long networkOfferingId) { - SearchCriteria sc = NetworkVlanSearch.create(); + SearchCriteria sc = NetworkVlanSearch.create(); sc.setParameters("networkOfferingId", networkOfferingId); return listBy(sc); } @Override public List listVlansByPhysicalNetworkId(long physicalNetworkId) { - SearchCriteria sc = PhysicalNetworkVlanSearch.create(); + SearchCriteria sc = PhysicalNetworkVlanSearch.create(); sc.setParameters("physicalNetworkId", physicalNetworkId); return listBy(sc); } diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java index bcc1d264219..3517bf8ed14 100755 --- a/server/src/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/com/cloud/deploy/FirstFitPlanner.java @@ -18,7 +18,6 @@ package com.cloud.deploy; import java.util.ArrayList; import java.util.Comparator; -import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -74,7 +73,6 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.user.AccountManager; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; -import com.cloud.utils.component.Adapters; import com.cloud.vm.DiskProfile; import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachine; @@ -106,7 +104,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { //@com.cloud.utils.component.Inject(adapter=StoragePoolAllocator.class) @Inject protected List _storagePoolAllocators; - + //@com.cloud.utils.component.Inject(adapter=HostAllocator.class) @Inject protected List _hostAllocators; protected String _allocationAlgorithm = "random"; @@ -115,7 +113,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { @Override public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) - throws InsufficientServerCapacityException { + throws InsufficientServerCapacityException { VirtualMachine vm = vmProfile.getVirtualMachine(); DataCenter dc = _dcDao.findById(vm.getDataCenterIdToDeployIn()); @@ -126,7 +124,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } return null; } - + ServiceOffering offering = vmProfile.getServiceOffering(); int cpu_requested = offering.getCpu() * offering.getSpeed(); long ram_requested = offering.getRamSize() * 1024L * 1024L; @@ -143,9 +141,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId()!=null ? "Yes": "No")); } - + String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); - + if(plan.getHostId() != null && haVmTag == null){ Long hostIdSpecified = plan.getHostId(); if (s_logger.isDebugEnabled()){ @@ -238,7 +236,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } s_logger.debug("Cannot choose the last host to deploy this VM "); } - + List clusterList = new ArrayList(); if (plan.getClusterId() != null) { @@ -272,7 +270,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } } else { s_logger.debug("Searching all possible resources under this Zone: "+ plan.getDataCenterId()); - + boolean applyAllocationAtPods = Boolean.parseBoolean(_configDao.getValue(Config.ApplyAllocationAlgorithmToPods.key())); if(applyAllocationAtPods){ //start scan at all pods under this zone. @@ -284,15 +282,15 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } } - + private DeployDestination scanPodsForDestination(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid){ - + ServiceOffering offering = vmProfile.getServiceOffering(); int requiredCpu = offering.getCpu() * offering.getSpeed(); long requiredRam = offering.getRamSize() * 1024L * 1024L; String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); - + //list pods under this zone by cpu and ram capacity List prioritizedPodIds = new ArrayList(); Pair, Map> podCapacityInfo = listPodsByCapacity(plan.getDataCenterId(), requiredCpu, requiredRam, cpuOverprovisioningFactor); @@ -313,16 +311,16 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } podsWithCapacity.removeAll(disabledPods); } - } + } }else{ if (s_logger.isDebugEnabled()) { s_logger.debug("No pods found having a host with enough capacity, returning."); } return null; } - + if(!podsWithCapacity.isEmpty()){ - + prioritizedPodIds = reorderPods(podCapacityInfo, vmProfile, plan); //loop over pods @@ -345,9 +343,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { return null; } } - + private DeployDestination scanClustersForDestinationInZoneOrPod(long id, boolean isZone, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid){ - + VirtualMachine vm = vmProfile.getVirtualMachine(); ServiceOffering offering = vmProfile.getServiceOffering(); DataCenter dc = _dcDao.findById(vm.getDataCenterIdToDeployIn()); @@ -355,7 +353,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { long requiredRam = offering.getRamSize() * 1024L * 1024L; String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); - + //list clusters under this zone by cpu and ram capacity Pair, Map> clusterCapacityInfo = listClustersByCapacity(id, requiredCpu, requiredRam, avoid, isZone, cpuOverprovisioningFactor); List prioritizedClusterIds = clusterCapacityInfo.first(); @@ -366,7 +364,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } prioritizedClusterIds.removeAll(avoid.getClustersToAvoid()); } - + if(!isRootAdmin(plan.getReservationContext())){ List disabledClusters = new ArrayList(); if(isZone){ @@ -397,7 +395,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { return null; } } - + /** * This method should reorder the given list of Cluster Ids by applying any necessary heuristic * for this planner @@ -409,7 +407,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { List reordersClusterIds = clusterCapacityInfo.first(); return reordersClusterIds; } - + /** * This method should reorder the given list of Pod Ids by applying any necessary heuristic * for this planner @@ -421,7 +419,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { List podIdsByCapacity = podCapacityInfo.first(); return podIdsByCapacity; } - + private List listDisabledClusters(long zoneId, Long podId){ List disabledClusters = _clusterDao.listDisabledClusters(zoneId, podId); if(podId == null){ @@ -431,70 +429,70 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } return disabledClusters; } - + private List listDisabledPods(long zoneId){ List disabledPods = _podDao.listDisabledPods(zoneId); return disabledPods; } - + private Map getCapacityThresholdMap(){ - // Lets build this real time so that the admin wont have to restart MS if he changes these values - Map disableThresholdMap = new HashMap(); - - String cpuDisableThresholdString = _configDao.getValue(Config.CPUCapacityDisableThreshold.key()); + // Lets build this real time so that the admin wont have to restart MS if he changes these values + Map disableThresholdMap = new HashMap(); + + String cpuDisableThresholdString = _configDao.getValue(Config.CPUCapacityDisableThreshold.key()); float cpuDisableThreshold = NumbersUtil.parseFloat(cpuDisableThresholdString, 0.85F); disableThresholdMap.put(Capacity.CAPACITY_TYPE_CPU, cpuDisableThreshold); - + String memoryDisableThresholdString = _configDao.getValue(Config.MemoryCapacityDisableThreshold.key()); float memoryDisableThreshold = NumbersUtil.parseFloat(memoryDisableThresholdString, 0.85F); disableThresholdMap.put(Capacity.CAPACITY_TYPE_MEMORY, memoryDisableThreshold); - - return disableThresholdMap; + + return disableThresholdMap; } private List getCapacitiesForCheckingThreshold(){ - List capacityList = new ArrayList(); - capacityList.add(Capacity.CAPACITY_TYPE_CPU); - capacityList.add(Capacity.CAPACITY_TYPE_MEMORY); - return capacityList; + List capacityList = new ArrayList(); + capacityList.add(Capacity.CAPACITY_TYPE_CPU); + capacityList.add(Capacity.CAPACITY_TYPE_MEMORY); + return capacityList; } - + private void removeClustersCrossingThreshold(List clusterListForVmAllocation, ExcludeList avoid, VirtualMachineProfile vmProfile, DeploymentPlan plan){ - - Map capacityThresholdMap = getCapacityThresholdMap(); - List capacityList = getCapacitiesForCheckingThreshold(); - List clustersCrossingThreshold = new ArrayList(); - + + Map capacityThresholdMap = getCapacityThresholdMap(); + List capacityList = getCapacitiesForCheckingThreshold(); + List clustersCrossingThreshold = new ArrayList(); + ServiceOffering offering = vmProfile.getServiceOffering(); int cpu_requested = offering.getCpu() * offering.getSpeed(); long ram_requested = offering.getRamSize() * 1024L * 1024L; - + // For each capacity get the cluster list crossing the threshold and remove it from the clusterList that will be used for vm allocation. for(short capacity : capacityList){ - - if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){ - return; - } - - if (capacity == Capacity.CAPACITY_TYPE_CPU){ - clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(Capacity.CAPACITY_TYPE_CPU, plan.getDataCenterId(), - capacityThresholdMap.get(capacity), cpu_requested, ApiDBUtils.getCpuOverprovisioningFactor()); - }else{ - clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), - capacityThresholdMap.get(capacity), ram_requested, 1.0f);//Mem overprov not supported yet - } - - if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){ - // addToAvoid Set - avoid.addClusterList(clustersCrossingThreshold); - // Remove clusters crossing disabled threshold - clusterListForVmAllocation.removeAll(clustersCrossingThreshold); - - s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" + - " crosses the disable capacity threshold: " + capacityThresholdMap.get(capacity) + " for capacity Type : " + capacity + ", skipping these clusters"); - } - + if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){ + return; + } + + if (capacity == Capacity.CAPACITY_TYPE_CPU){ + clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(Capacity.CAPACITY_TYPE_CPU, plan.getDataCenterId(), + capacityThresholdMap.get(capacity), cpu_requested, ApiDBUtils.getCpuOverprovisioningFactor()); + }else{ + clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), + capacityThresholdMap.get(capacity), ram_requested, 1.0f);//Mem overprov not supported yet + } + + + if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){ + // addToAvoid Set + avoid.addClusterList(clustersCrossingThreshold); + // Remove clusters crossing disabled threshold + clusterListForVmAllocation.removeAll(clustersCrossingThreshold); + + s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" + + " crosses the disable capacity threshold: " + capacityThresholdMap.get(capacity) + " for capacity Type : " + capacity + ", skipping these clusters"); + } + } } @@ -506,7 +504,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan); - + for(Long clusterId : clusterList){ Cluster clusterVO = _clusterDao.findById(clusterId); @@ -515,7 +513,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { avoid.addCluster(clusterVO.getId()); continue; } - + s_logger.debug("Checking resources in Cluster: "+clusterId + " under Pod: "+clusterVO.getPodId()); //search for resources(hosts and storage) under this zone, pod, cluster. DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext()); @@ -595,11 +593,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { if (s_logger.isTraceEnabled()) { s_logger.trace("ClusterId List having enough CPU and RAM capacity & in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity); } - + return result; } - + protected Pair, Map> listPodsByCapacity(long zoneId, int requiredCpu, long requiredRam, float cpuOverprovisioningFactor){ //look at the aggregate available cpu and ram per pod //although an aggregate value may be false indicator that a pod can host a vm, it will at the least eliminate those pods which definitely cannot @@ -632,7 +630,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { if (s_logger.isTraceEnabled()) { s_logger.trace("PodId List having enough CPU and RAM capacity & in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity); } - + return result; } @@ -719,7 +717,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { break; } } - + if(suitableHosts.isEmpty()){ s_logger.debug("No suitable hosts found"); } @@ -801,8 +799,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { // when deploying VM based on ISO, we have a service offering and an additional disk offering, use-local storage flag is actually // saved in service offering, overrde the flag from service offering when it is a ROOT disk if(!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) { - if(toBeCreated.getVolumeType() == Volume.Type.ROOT) - useLocalStorage = true; + if(toBeCreated.getVolumeType() == Volume.Type.ROOT) + useLocalStorage = true; } } diskProfile.setUseLocalStorage(useLocalStorage); @@ -816,7 +814,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { break; } } - + if(!foundPotentialPools){ s_logger.debug("No suitable pools found for volume: "+toBeCreated +" under cluster: "+plan.getClusterId()); //No suitable storage pools found under this cluster for this volume. - remove any suitable pools found for other volumes. @@ -862,7 +860,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } return false; } - + @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); diff --git a/server/src/com/cloud/event/EventUtils.java b/server/src/com/cloud/event/EventUtils.java index 68317bf32d6..53d224e186f 100755 --- a/server/src/com/cloud/event/EventUtils.java +++ b/server/src/com/cloud/event/EventUtils.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.event; +import javax.annotation.PostConstruct; import javax.inject.Inject; import org.springframework.stereotype.Component; @@ -33,6 +34,10 @@ public class EventUtils { @Inject AccountDao _placeHoderAccountDao; public EventUtils() { + } + + @PostConstruct + void init() { _eventDao = _placeHoderEventDao; _accountDao = _placeHoderAccountDao; } diff --git a/server/src/com/cloud/event/dao/EventJoinDaoImpl.java b/server/src/com/cloud/event/dao/EventJoinDaoImpl.java index 764df99557f..873ee292224 100644 --- a/server/src/com/cloud/event/dao/EventJoinDaoImpl.java +++ b/server/src/com/cloud/event/dao/EventJoinDaoImpl.java @@ -27,6 +27,8 @@ import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.EventJoinVO; import org.apache.cloudstack.api.response.EventResponse; +import org.springframework.stereotype.Component; + import com.cloud.event.Event; import com.cloud.event.Event.State; import com.cloud.utils.db.Filter; @@ -34,7 +36,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; - +@Component @Local(value={EventJoinDao.class}) public class EventJoinDaoImpl extends GenericDaoBase implements EventJoinDao { public static final Logger s_logger = Logger.getLogger(EventJoinDaoImpl.class); diff --git a/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java b/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java index 6df08229830..813728f78ca 100755 --- a/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java +++ b/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java @@ -37,7 +37,6 @@ import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; import com.cloud.cluster.ClusterManagerListener; import com.cloud.cluster.ManagementServerHostVO; -import com.cloud.cluster.StackMaid; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.ClusterDetailsDao; @@ -142,7 +141,7 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu ManagementServer _msServer; @Inject ConfigurationDao _configDao; - + String _instance; ScheduledExecutorService _executor; int _stopRetryInterval; @@ -189,12 +188,12 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu if (host.getType() != Host.Type.Routing) { return; } - + if(host.getHypervisorType() == HypervisorType.VMware) { s_logger.info("Don't restart for VMs on host " + host.getId() + " as the host is VMware host"); - return; + return; } - + s_logger.warn("Scheduling restart for VMs on host " + host.getId()); final List vms = _instanceDao.listByHostId(host.getId()); @@ -268,29 +267,29 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu @Override public void scheduleRestart(VMInstanceVO vm, boolean investigate) { - Long hostId = vm.getHostId(); - if (hostId == null) { - try { - s_logger.debug("Found a vm that is scheduled to be restarted but has no host id: " + vm); + Long hostId = vm.getHostId(); + if (hostId == null) { + try { + s_logger.debug("Found a vm that is scheduled to be restarted but has no host id: " + vm); _itMgr.advanceStop(vm, true, _accountMgr.getSystemUser(), _accountMgr.getSystemAccount()); } catch (ResourceUnavailableException e) { assert false : "How do we hit this when force is true?"; - throw new CloudRuntimeException("Caught exception even though it should be handled.", e); + throw new CloudRuntimeException("Caught exception even though it should be handled.", e); } catch (OperationTimedoutException e) { assert false : "How do we hit this when force is true?"; - throw new CloudRuntimeException("Caught exception even though it should be handled.", e); + throw new CloudRuntimeException("Caught exception even though it should be handled.", e); } catch (ConcurrentOperationException e) { assert false : "How do we hit this when force is true?"; - throw new CloudRuntimeException("Caught exception even though it should be handled.", e); + throw new CloudRuntimeException("Caught exception even though it should be handled.", e); } - return; - } + return; + } + + if(vm.getHypervisorType() == HypervisorType.VMware) { + s_logger.info("Skip HA for VMware VM " + vm.getInstanceName()); + return; + } - if(vm.getHypervisorType() == HypervisorType.VMware) { - s_logger.info("Skip HA for VMware VM " + vm.getInstanceName()); - return; - } - if (!investigate) { if (s_logger.isDebugEnabled()) { s_logger.debug("VM does not require investigation so I'm marking it as Stopped: " + vm.toString()); @@ -319,13 +318,13 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu _itMgr.advanceStop(vm, true, _accountMgr.getSystemUser(), _accountMgr.getSystemAccount()); } catch (ResourceUnavailableException e) { assert false : "How do we hit this when force is true?"; - throw new CloudRuntimeException("Caught exception even though it should be handled.", e); + throw new CloudRuntimeException("Caught exception even though it should be handled.", e); } catch (OperationTimedoutException e) { assert false : "How do we hit this when force is true?"; - throw new CloudRuntimeException("Caught exception even though it should be handled.", e); + throw new CloudRuntimeException("Caught exception even though it should be handled.", e); } catch (ConcurrentOperationException e) { assert false : "How do we hit this when force is true?"; - throw new CloudRuntimeException("Caught exception even though it should be handled.", e); + throw new CloudRuntimeException("Caught exception even though it should be handled.", e); } } @@ -360,7 +359,7 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu s_logger.info(str.toString()); return null; } - + items = _haDao.listRunningHaWorkForVm(work.getInstanceId()); if (items.size() > 0) { StringBuilder str = new StringBuilder("Waiting because there's HA work being executed on an item currently. Work Ids =["); @@ -371,7 +370,7 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu s_logger.info(str.toString()); return (System.currentTimeMillis() >> 10) + _investigateRetryInterval; } - + long vmId = work.getInstanceId(); VMInstanceVO vm = _itMgr.findByIdAndType(work.getType(), work.getInstanceId()); @@ -420,14 +419,14 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu Investigator investigator = null; for(Investigator it : _investigators) { - investigator = it; + investigator = it; alive = investigator.isVmAlive(vm, host); s_logger.info(investigator.getName() + " found " + vm + "to be alive? " + alive); if (alive != null) { break; } } - + boolean fenced = false; if (alive == null) { s_logger.debug("Fencing off VM that we don't know the state of"); @@ -439,7 +438,7 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu break; } } - + } else if (!alive) { fenced = true; } else { @@ -464,13 +463,13 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu _itMgr.advanceStop(vm, true, _accountMgr.getSystemUser(), _accountMgr.getSystemAccount()); } catch (ResourceUnavailableException e) { assert false : "How do we hit this when force is true?"; - throw new CloudRuntimeException("Caught exception even though it should be handled.", e); + throw new CloudRuntimeException("Caught exception even though it should be handled.", e); } catch (OperationTimedoutException e) { assert false : "How do we hit this when force is true?"; - throw new CloudRuntimeException("Caught exception even though it should be handled.", e); + throw new CloudRuntimeException("Caught exception even though it should be handled.", e); } catch (ConcurrentOperationException e) { assert false : "How do we hit this when force is true?"; - throw new CloudRuntimeException("Caught exception even though it should be handled.", e); + throw new CloudRuntimeException("Caught exception even though it should be handled.", e); } work.setStep(Step.Scheduled); @@ -481,13 +480,13 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu _itMgr.advanceStop(vm, true, _accountMgr.getSystemUser(), _accountMgr.getSystemAccount()); } catch (ResourceUnavailableException e) { assert false : "How do we hit this when force is true?"; - throw new CloudRuntimeException("Caught exception even though it should be handled.", e); + throw new CloudRuntimeException("Caught exception even though it should be handled.", e); } catch (OperationTimedoutException e) { assert false : "How do we hit this when force is true?"; - throw new CloudRuntimeException("Caught exception even though it should be handled.", e); + throw new CloudRuntimeException("Caught exception even though it should be handled.", e); } catch (ConcurrentOperationException e) { assert false : "How do we hit this when force is true?"; - throw new CloudRuntimeException("Caught exception even though it should be handled.", e); + throw new CloudRuntimeException("Caught exception even though it should be handled.", e); } } } @@ -519,7 +518,7 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu params.put(VirtualMachineProfile.Param.HaTag, _haTag); } VMInstanceVO started = _itMgr.advanceStart(vm, params, _accountMgr.getSystemUser(), _accountMgr.getSystemAccount()); - + if (started != null) { s_logger.info("VM is now restarted: " + vmId + " on " + started.getHostId()); return null; @@ -735,7 +734,7 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu if (_instance == null) { _instance = "VMOPS"; } - + _haTag = params.get("ha.tag"); _haDao.releaseWorkItems(_serverId); @@ -785,8 +784,6 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu _haDao.cleanup(System.currentTimeMillis() - _timeBetweenFailures); } catch (Exception e) { s_logger.warn("Error while cleaning up", e); - } finally { - StackMaid.current().exitCleanup(); } } } @@ -832,7 +829,7 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu nextTime = destroyVM(work); } else { assert false : "How did we get here with " + wt.toString(); - continue; + continue; } if (nextTime == null) { @@ -852,7 +849,6 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu } catch (final Throwable th) { s_logger.error("Caught this throwable, ", th); } finally { - StackMaid.current().exitCleanup(); if (work != null) { NDC.pop(); } @@ -885,5 +881,5 @@ public class HighAvailabilityManagerImpl implements HighAvailabilityManager, Clu public String getHaTag() { return _haTag; } - + } diff --git a/server/src/com/cloud/host/dao/HostDaoImpl.java b/server/src/com/cloud/host/dao/HostDaoImpl.java index 0767befcb70..2a7139cba54 100755 --- a/server/src/com/cloud/host/dao/HostDaoImpl.java +++ b/server/src/com/cloud/host/dao/HostDaoImpl.java @@ -47,7 +47,6 @@ import com.cloud.info.RunningHostCountInfo; import com.cloud.org.Managed; import com.cloud.resource.ResourceState; import com.cloud.utils.DateUtil; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.Attribute; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; @@ -103,7 +102,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder ManagedDirectConnectSearch; protected SearchBuilder ManagedRoutingServersSearch; protected SearchBuilder SecondaryStorageVMSearch; - + protected GenericSearchBuilder HostsInStatusSearch; protected GenericSearchBuilder CountRoutingByDc; @@ -123,7 +122,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao public HostDaoImpl() { } - + @PostConstruct public void init() { @@ -152,7 +151,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao TypeDcSearch.and("type", TypeDcSearch.entity().getType(), SearchCriteria.Op.EQ); TypeDcSearch.and("dc", TypeDcSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); TypeDcSearch.done(); - + SecondaryStorageVMSearch = createSearchBuilder(); SecondaryStorageVMSearch.and("type", SecondaryStorageVMSearch.entity().getType(), SearchCriteria.Op.EQ); SecondaryStorageVMSearch.and("dc", SecondaryStorageVMSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); @@ -165,14 +164,14 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao TypeDcStatusSearch.and("status", TypeDcStatusSearch.entity().getStatus(), SearchCriteria.Op.EQ); TypeDcStatusSearch.and("resourceState", TypeDcStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ); TypeDcStatusSearch.done(); - + TypeClusterStatusSearch = createSearchBuilder(); TypeClusterStatusSearch.and("type", TypeClusterStatusSearch.entity().getType(), SearchCriteria.Op.EQ); TypeClusterStatusSearch.and("cluster", TypeClusterStatusSearch.entity().getClusterId(), SearchCriteria.Op.EQ); TypeClusterStatusSearch.and("status", TypeClusterStatusSearch.entity().getStatus(), SearchCriteria.Op.EQ); TypeClusterStatusSearch.and("resourceState", TypeClusterStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ); TypeClusterStatusSearch.done(); - + IdStatusSearch = createSearchBuilder(); IdStatusSearch.and("id", IdStatusSearch.entity().getId(), SearchCriteria.Op.EQ); IdStatusSearch.and("states", IdStatusSearch.entity().getStatus(), SearchCriteria.Op.IN); @@ -218,7 +217,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao StatusSearch = createSearchBuilder(); StatusSearch.and("status", StatusSearch.entity().getStatus(), SearchCriteria.Op.IN); StatusSearch.done(); - + ResourceStateSearch = createSearchBuilder(); ResourceStateSearch.and("resourceState", ResourceStateSearch.entity().getResourceState(), SearchCriteria.Op.IN); ResourceStateSearch.done(); @@ -261,7 +260,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao ClusterManagedSearch.and("managed", ClusterManagedSearch.entity().getManagedState(), SearchCriteria.Op.EQ); UnmanagedDirectConnectSearch.join("ClusterManagedSearch", ClusterManagedSearch, ClusterManagedSearch.entity().getId(), UnmanagedDirectConnectSearch.entity().getClusterId(), JoinType.INNER); UnmanagedDirectConnectSearch.done(); - + DirectConnectSearch = createSearchBuilder(); DirectConnectSearch.and("resource", DirectConnectSearch.entity().getResource(), SearchCriteria.Op.NNULL); @@ -311,7 +310,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao ManagedRoutingServersSearch.and("server", ManagedRoutingServersSearch.entity().getManagementServerId(), SearchCriteria.Op.NNULL); ManagedRoutingServersSearch.and("type", ManagedRoutingServersSearch.entity().getType(), SearchCriteria.Op.EQ); ManagedRoutingServersSearch.done(); - + RoutingSearch = createSearchBuilder(); RoutingSearch.and("type", RoutingSearch.entity().getType(), SearchCriteria.Op.EQ); RoutingSearch.done(); @@ -334,52 +333,52 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao List hosts = listBy(sc); return hosts.size(); } - + @Override public HostVO findByGuid(String guid) { SearchCriteria sc = GuidSearch.create("guid", guid); return findOneBy(sc); } - + @Override @DB public List findAndUpdateDirectAgentToLoad(long lastPingSecondsAfter, Long limit, long managementServerId) { Transaction txn = Transaction.currentTxn(); txn.start(); - SearchCriteria sc = UnmanagedDirectConnectSearch.create(); - sc.setParameters("lastPinged", lastPingSecondsAfter); + SearchCriteria sc = UnmanagedDirectConnectSearch.create(); + sc.setParameters("lastPinged", lastPingSecondsAfter); //sc.setParameters("resourceStates", ResourceState.ErrorInMaintenance, ResourceState.Maintenance, ResourceState.PrepareForMaintenance, ResourceState.Disabled); sc.setJoinParameters("ClusterManagedSearch", "managed", Managed.ManagedState.Managed); List hosts = lockRows(sc, new Filter(HostVO.class, "clusterId", true, 0L, limit), true); - + for (HostVO host : hosts) { host.setManagementServerId(managementServerId); update(host.getId(), host); } - + txn.commit(); - + return hosts; } - + @Override @DB public List findAndUpdateApplianceToLoad(long lastPingSecondsAfter, long managementServerId) { - Transaction txn = Transaction.currentTxn(); - - txn.start(); - SearchCriteria sc = UnmanagedApplianceSearch.create(); - sc.setParameters("lastPinged", lastPingSecondsAfter); + Transaction txn = Transaction.currentTxn(); + + txn.start(); + SearchCriteria sc = UnmanagedApplianceSearch.create(); + sc.setParameters("lastPinged", lastPingSecondsAfter); sc.setParameters("types", Type.ExternalDhcp, Type.ExternalFirewall, Type.ExternalLoadBalancer, Type.PxeServer, Type.TrafficMonitor, Type.L2Networking); - List hosts = lockRows(sc, null, true); - - for (HostVO host : hosts) { - host.setManagementServerId(managementServerId); - update(host.getId(), host); - } - - txn.commit(); - - return hosts; + List hosts = lockRows(sc, null, true); + + for (HostVO host : hosts) { + host.setManagementServerId(managementServerId); + update(host.getId(), host); + } + + txn.commit(); + + return hosts; } @Override @@ -405,7 +404,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao ub = getUpdateBuilder(host); update(ub, sc, null); } - + @Override public List listByHostTag(Host.Type type, Long clusterId, Long podId, long dcId, String hostTag) { @@ -438,8 +437,8 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } - - + + @Override public List listAllUpAndEnabledNonHAHosts(Type type, Long clusterId, Long podId, long dcId, String haTag) { SearchBuilder hostTagSearch = null; @@ -449,42 +448,42 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao hostTagSearch.or("tagNull", hostTagSearch.entity().getTag(), SearchCriteria.Op.NULL); hostTagSearch.cp(); } - + SearchBuilder hostSearch = createSearchBuilder(); - + hostSearch.and("type", hostSearch.entity().getType(), SearchCriteria.Op.EQ); hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.EQ); hostSearch.and("podId", hostSearch.entity().getPodId(), SearchCriteria.Op.EQ); hostSearch.and("zoneId", hostSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); hostSearch.and("status", hostSearch.entity().getStatus(), SearchCriteria.Op.EQ); hostSearch.and("resourceState", hostSearch.entity().getResourceState(), SearchCriteria.Op.EQ); - + if (haTag != null && !haTag.isEmpty()) { hostSearch.join("hostTagSearch", hostTagSearch, hostSearch.entity().getId(), hostTagSearch.entity().getHostId(), JoinBuilder.JoinType.LEFTOUTER); } SearchCriteria sc = hostSearch.create(); - + if (haTag != null && !haTag.isEmpty()) { sc.setJoinParameters("hostTagSearch", "tag", haTag); } - + if (type != null) { sc.setParameters("type", type); } - + if (clusterId != null) { sc.setParameters("clusterId", clusterId); } - + if (podId != null) { sc.setParameters("podId", podId); } - + sc.setParameters("zoneId", dcId); sc.setParameters("status", Status.Up); sc.setParameters("resourceState", ResourceState.Enabled); - + return listBy(sc); } @@ -531,7 +530,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } return result; } - + @Override public void saveDetails(HostVO host) { Map details = host.getDetails(); @@ -653,81 +652,81 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao } - @Override - public boolean updateState(Status oldStatus, Event event, Status newStatus, Host vo, Object data) { - HostVO host = findById(vo.getId()); - long oldPingTime = host.getLastPinged(); + @Override + public boolean updateState(Status oldStatus, Event event, Status newStatus, Host vo, Object data) { + HostVO host = findById(vo.getId()); + long oldPingTime = host.getLastPinged(); - SearchBuilder sb = createSearchBuilder(); - sb.and("status", sb.entity().getStatus(), SearchCriteria.Op.EQ); - sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); - sb.and("update", sb.entity().getUpdated(), SearchCriteria.Op.EQ); - if (newStatus.checkManagementServer()) { - sb.and("ping", sb.entity().getLastPinged(), SearchCriteria.Op.EQ); - sb.and().op("nullmsid", sb.entity().getManagementServerId(), SearchCriteria.Op.NULL); - sb.or("msid", sb.entity().getManagementServerId(), SearchCriteria.Op.EQ); - sb.closeParen(); - } - sb.done(); + SearchBuilder sb = createSearchBuilder(); + sb.and("status", sb.entity().getStatus(), SearchCriteria.Op.EQ); + sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); + sb.and("update", sb.entity().getUpdated(), SearchCriteria.Op.EQ); + if (newStatus.checkManagementServer()) { + sb.and("ping", sb.entity().getLastPinged(), SearchCriteria.Op.EQ); + sb.and().op("nullmsid", sb.entity().getManagementServerId(), SearchCriteria.Op.NULL); + sb.or("msid", sb.entity().getManagementServerId(), SearchCriteria.Op.EQ); + sb.closeParen(); + } + sb.done(); - SearchCriteria sc = sb.create(); + SearchCriteria sc = sb.create(); - sc.setParameters("status", oldStatus); - sc.setParameters("id", host.getId()); - sc.setParameters("update", host.getUpdated()); - long oldUpdateCount = host.getUpdated(); - if (newStatus.checkManagementServer()) { - sc.setParameters("ping", oldPingTime); - sc.setParameters("msid", host.getManagementServerId()); - } + sc.setParameters("status", oldStatus); + sc.setParameters("id", host.getId()); + sc.setParameters("update", host.getUpdated()); + long oldUpdateCount = host.getUpdated(); + if (newStatus.checkManagementServer()) { + sc.setParameters("ping", oldPingTime); + sc.setParameters("msid", host.getManagementServerId()); + } - long newUpdateCount = host.incrUpdated(); - UpdateBuilder ub = getUpdateBuilder(host); - ub.set(host, _statusAttr, newStatus); - if (newStatus.updateManagementServer()) { - if (newStatus.lostConnection()) { - ub.set(host, _msIdAttr, null); - } else { - ub.set(host, _msIdAttr, host.getManagementServerId()); - } - if (event.equals(Event.Ping) || event.equals(Event.AgentConnected)) { - ub.set(host, _pingTimeAttr, System.currentTimeMillis() >> 10); - } - } - if (event.equals(Event.ManagementServerDown)) { - ub.set(host, _pingTimeAttr, ((System.currentTimeMillis() >> 10) - (10 * 60))); - } - int result = update(ub, sc, null); - assert result <= 1 : "How can this update " + result + " rows? "; + long newUpdateCount = host.incrUpdated(); + UpdateBuilder ub = getUpdateBuilder(host); + ub.set(host, _statusAttr, newStatus); + if (newStatus.updateManagementServer()) { + if (newStatus.lostConnection()) { + ub.set(host, _msIdAttr, null); + } else { + ub.set(host, _msIdAttr, host.getManagementServerId()); + } + if (event.equals(Event.Ping) || event.equals(Event.AgentConnected)) { + ub.set(host, _pingTimeAttr, System.currentTimeMillis() >> 10); + } + } + if (event.equals(Event.ManagementServerDown)) { + ub.set(host, _pingTimeAttr, ((System.currentTimeMillis() >> 10) - (10 * 60))); + } + int result = update(ub, sc, null); + assert result <= 1 : "How can this update " + result + " rows? "; - if (status_logger.isDebugEnabled() && result == 0) { - HostVO ho = findById(host.getId()); - assert ho != null : "How how how? : " + host.getId(); + if (status_logger.isDebugEnabled() && result == 0) { + HostVO ho = findById(host.getId()); + assert ho != null : "How how how? : " + host.getId(); + + StringBuilder str = new StringBuilder("Unable to update host for event:").append(event.toString()); + str.append(". Name=").append(host.getName()); + str.append("; New=[status=").append(newStatus.toString()).append(":msid=") + .append(newStatus.lostConnection() ? "null" : host.getManagementServerId()).append(":lastpinged=").append(host.getLastPinged()).append("]"); + str.append("; Old=[status=").append(oldStatus.toString()).append(":msid=").append(host.getManagementServerId()).append(":lastpinged=") + .append(oldPingTime).append("]"); + str.append("; DB=[status=").append(vo.getStatus().toString()).append(":msid=").append(vo.getManagementServerId()).append(":lastpinged=") + .append(vo.getLastPinged()).append(":old update count=").append(oldUpdateCount).append("]"); + status_logger.debug(str.toString()); + } else { + StringBuilder msg = new StringBuilder("Agent status update: ["); + msg.append("id = " + host.getId()); + msg.append("; name = " + host.getName()); + msg.append("; old status = " + oldStatus); + msg.append("; event = " + event); + msg.append("; new status = " + newStatus); + msg.append("; old update count = " + oldUpdateCount); + msg.append("; new update count = " + newUpdateCount + "]"); + status_logger.debug(msg.toString()); + } + + return result > 0; + } - StringBuilder str = new StringBuilder("Unable to update host for event:").append(event.toString()); - str.append(". Name=").append(host.getName()); - str.append("; New=[status=").append(newStatus.toString()).append(":msid=") - .append(newStatus.lostConnection() ? "null" : host.getManagementServerId()).append(":lastpinged=").append(host.getLastPinged()).append("]"); - str.append("; Old=[status=").append(oldStatus.toString()).append(":msid=").append(host.getManagementServerId()).append(":lastpinged=") - .append(oldPingTime).append("]"); - str.append("; DB=[status=").append(vo.getStatus().toString()).append(":msid=").append(vo.getManagementServerId()).append(":lastpinged=") - .append(vo.getLastPinged()).append(":old update count=").append(oldUpdateCount).append("]"); - status_logger.debug(str.toString()); - } else { - StringBuilder msg = new StringBuilder("Agent status update: ["); - msg.append("id = " + host.getId()); - msg.append("; name = " + host.getName()); - msg.append("; old status = " + oldStatus); - msg.append("; event = " + event); - msg.append("; new status = " + newStatus); - msg.append("; old update count = " + oldUpdateCount); - msg.append("; new update count = " + newUpdateCount + "]"); - status_logger.debug(msg.toString()); - } - - return result > 0; - } - @Override public boolean updateResourceState(ResourceState oldState, ResourceState.Event event, ResourceState newState, Host vo) { HostVO host = (HostVO)vo; @@ -735,41 +734,41 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao sb.and("resource_state", sb.entity().getResourceState(), SearchCriteria.Op.EQ); sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); sb.done(); - + SearchCriteria sc = sb.create(); sc.setParameters("resource_state", oldState); sc.setParameters("id", host.getId()); - + UpdateBuilder ub = getUpdateBuilder(host); ub.set(host, _resourceStateAttr, newState); int result = update(ub, sc, null); assert result <= 1 : "How can this update " + result + " rows? "; - + if (state_logger.isDebugEnabled() && result == 0) { HostVO ho = findById(host.getId()); assert ho != null : "How how how? : " + host.getId(); StringBuilder str = new StringBuilder("Unable to update resource state: ["); - str.append("m = " + host.getId()); - str.append("; name = " + host.getName()); - str.append("; old state = " + oldState); - str.append("; event = " + event); - str.append("; new state = " + newState + "]"); - state_logger.debug(str.toString()); + str.append("m = " + host.getId()); + str.append("; name = " + host.getName()); + str.append("; old state = " + oldState); + str.append("; event = " + event); + str.append("; new state = " + newState + "]"); + state_logger.debug(str.toString()); } else { - StringBuilder msg = new StringBuilder("Resource state update: ["); - msg.append("id = " + host.getId()); - msg.append("; name = " + host.getName()); - msg.append("; old state = " + oldState); - msg.append("; event = " + event); - msg.append("; new state = " + newState + "]"); - state_logger.debug(msg.toString()); + StringBuilder msg = new StringBuilder("Resource state update: ["); + msg.append("id = " + host.getId()); + msg.append("; name = " + host.getName()); + msg.append("; old state = " + oldState); + msg.append("; event = " + event); + msg.append("; new state = " + newState + "]"); + state_logger.debug(msg.toString()); } - + return result > 0; } - + @Override public HostVO findByTypeNameAndZoneId(long zoneId, String name, Host.Type type) { SearchCriteria sc = TypeNameZoneSearch.create(); @@ -779,15 +778,15 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return findOneBy(sc); } - @Override - public List findHypervisorHostInCluster(long clusterId) { - SearchCriteria sc = TypeClusterStatusSearch.create(); - sc.setParameters("type", Host.Type.Routing); - sc.setParameters("cluster", clusterId); - sc.setParameters("status", Status.Up); - sc.setParameters("resourceState", ResourceState.Enabled); - - return listBy(sc); - } + @Override + public List findHypervisorHostInCluster(long clusterId) { + SearchCriteria sc = TypeClusterStatusSearch.create(); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("cluster", clusterId); + sc.setParameters("status", Status.Up); + sc.setParameters("resourceState", ResourceState.Enabled); + + return listBy(sc); + } } diff --git a/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java b/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java index 5a32de744a0..b0c25c74166 100644 --- a/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java +++ b/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java @@ -33,8 +33,6 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.utils.component.Adapters; -import com.cloud.utils.component.ComponentLocator; @Component @Local(value = { HypervisorGuruManager.class } ) @@ -42,54 +40,56 @@ public class HypervisorGuruManagerImpl implements HypervisorGuruManager { public static final Logger s_logger = Logger.getLogger(HypervisorGuruManagerImpl.class.getName()); @Inject HostDao _hostDao; - - String _name; - - @Inject List _hvGuruList; + + String _name; + + @Inject List _hvGuruList; Map _hvGurus = new HashMap(); - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - _name = name; - return true; - } - - @PostConstruct - public void init() { - for(HypervisorGuru guru : _hvGuruList) { - _hvGurus.put(guru.getHypervisorType(), guru); - } - } - @Override - public boolean start() { - return true; - } + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + _name = name; + return true; + } - @Override - public boolean stop() { - return true; - } + @PostConstruct + public void init() { + for(HypervisorGuru guru : _hvGuruList) { + _hvGurus.put(guru.getHypervisorType(), guru); + } + } - @Override - public String getName() { - return _name; - } + @Override + public boolean start() { + return true; + } - public HypervisorGuru getGuru(HypervisorType hypervisorType) { - return _hvGurus.get(hypervisorType); - } - + @Override + public boolean stop() { + return true; + } + + @Override + public String getName() { + return _name; + } + + @Override + public HypervisorGuru getGuru(HypervisorType hypervisorType) { + return _hvGurus.get(hypervisorType); + } + + @Override public long getGuruProcessedCommandTargetHost(long hostId, Command cmd) { - HostVO hostVo = _hostDao.findById(hostId); - HypervisorGuru hvGuru = null; - if(hostVo.getType() == Host.Type.Routing) { - hvGuru = _hvGurus.get(hostVo.getHypervisorType()); - } - - if(hvGuru != null) - return hvGuru.getCommandHostDelegation(hostId, cmd); - - return hostId; + HostVO hostVo = _hostDao.findById(hostId); + HypervisorGuru hvGuru = null; + if(hostVo.getType() == Host.Type.Routing) { + hvGuru = _hvGurus.get(hostVo.getHypervisorType()); + } + + if(hvGuru != null) + return hvGuru.getCommandHostDelegation(hostId, cmd); + + return hostId; } } diff --git a/server/src/com/cloud/maint/UpgradeManagerImpl.java b/server/src/com/cloud/maint/UpgradeManagerImpl.java index 2f50dffad71..03c7d2f0cea 100644 --- a/server/src/com/cloud/maint/UpgradeManagerImpl.java +++ b/server/src/com/cloud/maint/UpgradeManagerImpl.java @@ -36,15 +36,14 @@ import javax.naming.ConfigurationException; import org.apache.commons.httpclient.HttpClient; import org.apache.commons.httpclient.HttpException; -import org.apache.commons.httpclient.methods.GetMethod; import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager; +import org.apache.commons.httpclient.methods.GetMethod; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.maint.dao.AgentUpgradeDao; import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.component.ComponentLocator; /** * @@ -57,7 +56,7 @@ import com.cloud.utils.component.ComponentLocator; @Component @Local(UpgradeManager.class) public class UpgradeManagerImpl implements UpgradeManager { - private final static Logger s_logger = Logger.getLogger(UpgradeManagerImpl.class); + private final static Logger s_logger = Logger.getLogger(UpgradeManagerImpl.class); private static final MultiThreadedHttpConnectionManager s_httpClientManager = new MultiThreadedHttpConnectionManager(); String _name; @@ -66,10 +65,10 @@ public class UpgradeManagerImpl implements UpgradeManager { // String _upgradeUrl; String _agentPath; long _checkInterval; - + @Inject AgentUpgradeDao _upgradeDao; @Inject ConfigurationDao _configDao; - + @Override public State registerForUpgrade(long hostId, String version) { State state = State.UpToDate; @@ -90,11 +89,11 @@ public class UpgradeManagerImpl implements UpgradeManager { _upgradeDao.persist(vo); } } - */ - + */ + return state; } - + public String deployNewAgent(String url) { s_logger.info("Updating agent with binary from " + url); @@ -132,18 +131,18 @@ public class UpgradeManagerImpl implements UpgradeManager { s_logger.debug("New Agent zip file is now retrieved"); } catch (final HttpException e) { - return "Unable to retrieve the file from " + url; + return "Unable to retrieve the file from " + url; } catch (final IOException e) { - return "Unable to retrieve the file from " + url; + return "Unable to retrieve the file from " + url; } finally { - method.releaseConnection(); + method.releaseConnection(); } - + file.delete(); - + return "File will be deployed."; } - + // @Override // public String getAgentUrl() { // return _upgradeUrl; @@ -174,7 +173,7 @@ public class UpgradeManagerImpl implements UpgradeManager { } //_upgradeUrl = configs.get("upgrade.url"); - + // if (_upgradeUrl == null) { // s_logger.debug("There is no upgrade url found in configuration table"); // // _upgradeUrl = "http://updates.vmops.com/releases/rss.xml"; diff --git a/server/src/com/cloud/migration/Db21to22MigrationUtil.java b/server/src/com/cloud/migration/Db21to22MigrationUtil.java index b6ff9069ed3..66a7d59f53a 100755 --- a/server/src/com/cloud/migration/Db21to22MigrationUtil.java +++ b/server/src/com/cloud/migration/Db21to22MigrationUtil.java @@ -19,9 +19,7 @@ package com.cloud.migration; import java.io.File; import java.sql.PreparedStatement; import java.sql.ResultSet; -import java.util.LinkedList; import java.util.List; -import java.util.Queue; import javax.inject.Inject; @@ -32,21 +30,17 @@ import com.cloud.configuration.Resource.ResourceType; import com.cloud.configuration.ResourceCountVO; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.configuration.dao.ResourceCountDao; -import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; -import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.ResourceManager; import com.cloud.user.Account; import com.cloud.user.dao.AccountDao; import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; @@ -56,37 +50,37 @@ import com.cloud.vm.dao.InstanceGroupDao; import com.cloud.vm.dao.InstanceGroupVMMapDao; public class Db21to22MigrationUtil { - - @Inject private ClusterDao _clusterDao; - @Inject private HostDao _hostDao; - @Inject private AccountDao _accountDao; - @Inject private DomainDao _domainDao; - @Inject private ResourceCountDao _resourceCountDao; - @Inject private InstanceGroupDao _vmGroupDao; - @Inject private InstanceGroupVMMapDao _groupVMMapDao; - @Inject private ConfigurationDao _configurationDao; - @Inject private DataCenterDao _zoneDao; - @Inject private ResourceManager _resourceMgr; - + + @Inject private ClusterDao _clusterDao; + @Inject private HostDao _hostDao; + @Inject private AccountDao _accountDao; + @Inject private DomainDao _domainDao; + @Inject private ResourceCountDao _resourceCountDao; + @Inject private InstanceGroupDao _vmGroupDao; + @Inject private InstanceGroupVMMapDao _groupVMMapDao; + @Inject private ConfigurationDao _configurationDao; + @Inject private DataCenterDao _zoneDao; + @Inject private ResourceManager _resourceMgr; + private void doMigration() { setupComponents(); migrateResourceCounts(); - + setupInstanceGroups(); migrateZones(); - + setupClusterGuid(); - + System.out.println("Migration done"); } - + /* add guid in cluster table */ private void setupClusterGuid() { - - //FIXME moving out XenServer code out of server. This upgrade step need to be taken care of - /* + + //FIXME moving out XenServer code out of server. This upgrade step need to be taken care of + /* XenServerConnectionPool _connPool = XenServerConnectionPool.getInstance(); List clusters = _clusterDao.listByHyTypeWithoutGuid(HypervisorType.XenServer.toString()); for (ClusterVO cluster : clusters) { @@ -120,34 +114,34 @@ public class Db21to22MigrationUtil { break; } } - */ + */ } - + /** * This method migrates the zones based on bug: 7204 * based on the param direct.attach.untagged.vlan.enabled, we update zone to basic or advanced for 2.2 */ private void migrateZones(){ - try { - System.out.println("Migrating zones"); - String val = _configurationDao.getValue("direct.attach.untagged.vlan.enabled"); - NetworkType networkType; - if(val == null || val.equalsIgnoreCase("true")){ - networkType = NetworkType.Basic; - }else{ - networkType = NetworkType.Advanced; - } - List existingZones = _zoneDao.listAll(); - for(DataCenterVO zone : existingZones){ - zone.setNetworkType(networkType); - _zoneDao.update(zone.getId(), zone); - } - } catch (Exception e) { - System.out.println("Unhandled exception in migrateZones()" + e); - } + try { + System.out.println("Migrating zones"); + String val = _configurationDao.getValue("direct.attach.untagged.vlan.enabled"); + NetworkType networkType; + if(val == null || val.equalsIgnoreCase("true")){ + networkType = NetworkType.Basic; + }else{ + networkType = NetworkType.Advanced; + } + List existingZones = _zoneDao.listAll(); + for(DataCenterVO zone : existingZones){ + zone.setNetworkType(networkType); + _zoneDao.update(zone.getId(), zone); + } + } catch (Exception e) { + System.out.println("Unhandled exception in migrateZones()" + e); + } } - + private void migrateResourceCounts() { System.out.println("migrating resource counts"); SearchBuilder sb = _resourceCountDao.createSearchBuilder(); @@ -175,46 +169,46 @@ public class Db21to22MigrationUtil { private void setupComponents() { } - + private void setupInstanceGroups() { - System.out.println("setting up vm instance groups"); - - //Search for all the vms that have not null groups - Long vmId = 0L; - Long accountId = 0L; - String groupName; - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - txn.start(); - try { - String request = "SELECT vm.id, uservm.account_id, vm.group from vm_instance vm, user_vm uservm where vm.group is not null and vm.removed is null and vm.id=uservm.id order by id"; - System.out.println(request); - PreparedStatement statement = txn.prepareStatement(request); - ResultSet result = statement.executeQuery(); - while (result.next()) { - vmId = result.getLong(1); - accountId = result.getLong(2); - groupName = result.getString(3); - InstanceGroupVO group = _vmGroupDao.findByAccountAndName(accountId, groupName); - //Create vm group if the group doesn't exist for this account - if (group == null) { - group = new InstanceGroupVO(groupName, accountId); - group = _vmGroupDao.persist(group); - System.out.println("Created new isntance group with name " + groupName + " for account id=" + accountId); - } - - if (group != null) { - InstanceGroupVMMapVO groupVmMapVO = new InstanceGroupVMMapVO(group.getId(), vmId); - _groupVMMapDao.persist(groupVmMapVO); - System.out.println("Assigned vm id=" + vmId + " to group with name " + groupName + " for account id=" + accountId); - } - } - txn.commit(); - statement.close(); - } catch (Exception e) { - System.out.println("Unhandled exception: " + e); - } finally { - txn.close(); - } + System.out.println("setting up vm instance groups"); + + //Search for all the vms that have not null groups + Long vmId = 0L; + Long accountId = 0L; + String groupName; + Transaction txn = Transaction.open(Transaction.CLOUD_DB); + txn.start(); + try { + String request = "SELECT vm.id, uservm.account_id, vm.group from vm_instance vm, user_vm uservm where vm.group is not null and vm.removed is null and vm.id=uservm.id order by id"; + System.out.println(request); + PreparedStatement statement = txn.prepareStatement(request); + ResultSet result = statement.executeQuery(); + while (result.next()) { + vmId = result.getLong(1); + accountId = result.getLong(2); + groupName = result.getString(3); + InstanceGroupVO group = _vmGroupDao.findByAccountAndName(accountId, groupName); + //Create vm group if the group doesn't exist for this account + if (group == null) { + group = new InstanceGroupVO(groupName, accountId); + group = _vmGroupDao.persist(group); + System.out.println("Created new isntance group with name " + groupName + " for account id=" + accountId); + } + + if (group != null) { + InstanceGroupVMMapVO groupVmMapVO = new InstanceGroupVMMapVO(group.getId(), vmId); + _groupVMMapDao.persist(groupVmMapVO); + System.out.println("Assigned vm id=" + vmId + " to group with name " + groupName + " for account id=" + accountId); + } + } + txn.commit(); + statement.close(); + } catch (Exception e) { + System.out.println("Unhandled exception: " + e); + } finally { + txn.close(); + } } diff --git a/server/src/com/cloud/network/ExteralIpAddressAllocator.java b/server/src/com/cloud/network/ExteralIpAddressAllocator.java index f8b5c152519..eca5ff6eda9 100644 --- a/server/src/com/cloud/network/ExteralIpAddressAllocator.java +++ b/server/src/com/cloud/network/ExteralIpAddressAllocator.java @@ -19,7 +19,6 @@ package com.cloud.network; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; -import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; import java.net.URLConnection; @@ -33,136 +32,135 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.configuration.dao.ConfigurationDao; - import com.cloud.dc.dao.VlanDao; import com.cloud.network.dao.IPAddressDao; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value=IpAddrAllocator.class) public class ExteralIpAddressAllocator implements IpAddrAllocator{ - private static final Logger s_logger = Logger.getLogger(ExteralIpAddressAllocator.class); - String _name; + private static final Logger s_logger = Logger.getLogger(ExteralIpAddressAllocator.class); + String _name; @Inject ConfigurationDao _configDao = null; @Inject IPAddressDao _ipAddressDao = null; @Inject VlanDao _vlanDao; - private boolean _isExternalIpAllocatorEnabled = false; - private String _externalIpAllocatorUrl = null; + private boolean _isExternalIpAllocatorEnabled = false; + private String _externalIpAllocatorUrl = null; - - @Override - public IpAddr getPrivateIpAddress(String macAddr, long dcId, long podId) { - if (_externalIpAllocatorUrl == null || this._externalIpAllocatorUrl.equalsIgnoreCase("")) { - return new IpAddr(); - } - String urlString = this._externalIpAllocatorUrl + "?command=getIpAddr&mac=" + macAddr + "&dc=" + dcId + "&pod=" + podId; - s_logger.debug("getIP:" + urlString); - - BufferedReader in = null; - try { - URL url = new URL(urlString); - URLConnection conn = url.openConnection(); - conn.setReadTimeout(30000); - - in = new BufferedReader(new InputStreamReader(conn.getInputStream())); - String inputLine; - while ((inputLine = in.readLine()) != null) { - s_logger.debug(inputLine); - String[] tokens = inputLine.split(","); - if (tokens.length != 3) { - s_logger.debug("the return value should be: mac,netmask,gateway"); - return new IpAddr(); - } - return new IpAddr(tokens[0], tokens[1], tokens[2]); - } - - return new IpAddr(); - } catch (MalformedURLException e) { - throw new CloudRuntimeException("URL is malformed " + urlString, e); - } catch (IOException e) { - return new IpAddr(); - } finally { - if (in != null) { - try { - in.close(); - } catch (IOException e) { - } - } - } - - } - - @Override - public IpAddr getPublicIpAddress(String macAddr, long dcId, long podId) { - /*TODO: call API to get ip address from external DHCP server*/ - return getPrivateIpAddress(macAddr, dcId, podId); - } - - @Override - public boolean releasePrivateIpAddress(String ip, long dcId, long podId) { - /*TODO: call API to release the ip address from external DHCP server*/ - if (_externalIpAllocatorUrl == null || this._externalIpAllocatorUrl.equalsIgnoreCase("")) { - return false; - } - - String urlString = this._externalIpAllocatorUrl + "?command=releaseIpAddr&ip=" + ip + "&dc=" + dcId + "&pod=" + podId; - - s_logger.debug("releaseIP:" + urlString); - BufferedReader in = null; - try { - URL url = new URL(urlString); - URLConnection conn = url.openConnection(); - conn.setReadTimeout(30000); - - in = new BufferedReader(new InputStreamReader(conn.getInputStream())); - - return true; - } catch (MalformedURLException e) { - throw new CloudRuntimeException("URL is malformed " + urlString, e); - } catch (IOException e) { - return false; - } finally { - if (in != null) { - try { - in.close(); - } catch (IOException e) { - } - } - } - } - - @Override - public boolean releasePublicIpAddress(String ip, long dcId, long podId) { - /*TODO: call API to release the ip address from external DHCP server*/ - return releasePrivateIpAddress(ip, dcId, podId); - } - - public boolean exteralIpAddressAllocatorEnabled() { - return _isExternalIpAllocatorEnabled; - } - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - _isExternalIpAllocatorEnabled = Boolean.parseBoolean(_configDao.getValue("direct.attach.network.externalIpAllocator.enabled")); - _externalIpAllocatorUrl = _configDao.getValue("direct.attach.network.externalIpAllocator.url"); - _name = name; - - return true; - } - @Override - public String getName() { - return _name; - } + @Override + public IpAddr getPrivateIpAddress(String macAddr, long dcId, long podId) { + if (_externalIpAllocatorUrl == null || this._externalIpAllocatorUrl.equalsIgnoreCase("")) { + return new IpAddr(); + } + String urlString = this._externalIpAllocatorUrl + "?command=getIpAddr&mac=" + macAddr + "&dc=" + dcId + "&pod=" + podId; + s_logger.debug("getIP:" + urlString); - @Override - public boolean start() { - return true; - } + BufferedReader in = null; + try { + URL url = new URL(urlString); + URLConnection conn = url.openConnection(); + conn.setReadTimeout(30000); - @Override - public boolean stop() { - return true; - } + in = new BufferedReader(new InputStreamReader(conn.getInputStream())); + String inputLine; + while ((inputLine = in.readLine()) != null) { + s_logger.debug(inputLine); + String[] tokens = inputLine.split(","); + if (tokens.length != 3) { + s_logger.debug("the return value should be: mac,netmask,gateway"); + return new IpAddr(); + } + return new IpAddr(tokens[0], tokens[1], tokens[2]); + } + + return new IpAddr(); + } catch (MalformedURLException e) { + throw new CloudRuntimeException("URL is malformed " + urlString, e); + } catch (IOException e) { + return new IpAddr(); + } finally { + if (in != null) { + try { + in.close(); + } catch (IOException e) { + } + } + } + + } + + @Override + public IpAddr getPublicIpAddress(String macAddr, long dcId, long podId) { + /*TODO: call API to get ip address from external DHCP server*/ + return getPrivateIpAddress(macAddr, dcId, podId); + } + + @Override + public boolean releasePrivateIpAddress(String ip, long dcId, long podId) { + /*TODO: call API to release the ip address from external DHCP server*/ + if (_externalIpAllocatorUrl == null || this._externalIpAllocatorUrl.equalsIgnoreCase("")) { + return false; + } + + String urlString = this._externalIpAllocatorUrl + "?command=releaseIpAddr&ip=" + ip + "&dc=" + dcId + "&pod=" + podId; + + s_logger.debug("releaseIP:" + urlString); + BufferedReader in = null; + try { + URL url = new URL(urlString); + URLConnection conn = url.openConnection(); + conn.setReadTimeout(30000); + + in = new BufferedReader(new InputStreamReader(conn.getInputStream())); + + return true; + } catch (MalformedURLException e) { + throw new CloudRuntimeException("URL is malformed " + urlString, e); + } catch (IOException e) { + return false; + } finally { + if (in != null) { + try { + in.close(); + } catch (IOException e) { + } + } + } + } + + @Override + public boolean releasePublicIpAddress(String ip, long dcId, long podId) { + /*TODO: call API to release the ip address from external DHCP server*/ + return releasePrivateIpAddress(ip, dcId, podId); + } + + @Override + public boolean exteralIpAddressAllocatorEnabled() { + return _isExternalIpAllocatorEnabled; + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + _isExternalIpAllocatorEnabled = Boolean.parseBoolean(_configDao.getValue("direct.attach.network.externalIpAllocator.enabled")); + _externalIpAllocatorUrl = _configDao.getValue("direct.attach.network.externalIpAllocator.url"); + _name = name; + + return true; + } + + @Override + public String getName() { + return _name; + } + + @Override + public boolean start() { + return true; + } + + @Override + public boolean stop() { + return true; + } } diff --git a/server/src/com/cloud/network/ExternalNetworkDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalNetworkDeviceManagerImpl.java index 93e0ef6d01d..e95dd175fc8 100755 --- a/server/src/com/cloud/network/ExternalNetworkDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalNetworkDeviceManagerImpl.java @@ -27,22 +27,21 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.network.AddNetworkDeviceCmd; +import org.apache.cloudstack.api.command.admin.network.DeleteNetworkDeviceCmd; import org.apache.cloudstack.api.command.admin.network.ListNetworkDeviceCmd; +import org.apache.cloudstack.api.response.NetworkDeviceResponse; import org.apache.cloudstack.network.ExternalNetworkDeviceManager; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.api.ApiDBUtils; - -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.IdentityService; -import org.apache.cloudstack.api.command.admin.network.DeleteNetworkDeviceCmd; import com.cloud.baremetal.ExternalDhcpManager; import com.cloud.baremetal.PxeServerManager; -import com.cloud.baremetal.PxeServerProfile; import com.cloud.baremetal.PxeServerManager.PxeServerType; +import com.cloud.baremetal.PxeServerProfile; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.DataCenter; import com.cloud.dc.Pod; @@ -64,14 +63,11 @@ import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; import com.cloud.network.dao.VpnUserDao; import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.offerings.dao.NetworkOfferingDao; -import com.cloud.server.ManagementServer; -import org.apache.cloudstack.api.response.NetworkDeviceResponse; import com.cloud.server.api.response.NwDeviceDhcpResponse; import com.cloud.server.api.response.PxePingResponse; import com.cloud.user.AccountManager; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserStatisticsDao; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.NicDao; @@ -112,16 +108,16 @@ public class ExternalNetworkDeviceManagerImpl implements ExternalNetworkDeviceMa // obsolete // private final static IdentityService _identityService = (IdentityService)ComponentLocator.getLocator(ManagementServer.Name).getManager(IdentityService.class); - + private static final org.apache.log4j.Logger s_logger = Logger.getLogger(ExternalNetworkDeviceManagerImpl.class); protected String _name; - + @Override public boolean configure(String name, Map params) throws ConfigurationException { _name = name; return true; } - + @Override public boolean start() { return true; @@ -136,21 +132,21 @@ public class ExternalNetworkDeviceManagerImpl implements ExternalNetworkDeviceMa public String getName() { return _name; } - + @Override public Host addNetworkDevice(AddNetworkDeviceCmd cmd) { Map paramList = cmd.getParamList(); if (paramList == null) { throw new CloudRuntimeException("Parameter list is null"); } - + Collection paramsCollection = paramList.values(); HashMap params = (HashMap) (paramsCollection.toArray())[0]; if (cmd.getDeviceType().equalsIgnoreCase(NetworkDevice.ExternalDhcp.getName())) { //Long zoneId = _identityService.getIdentityId("data_center", (String) params.get(ApiConstants.ZONE_ID)); //Long podId = _identityService.getIdentityId("host_pod_ref", (String)params.get(ApiConstants.POD_ID)); - Long zoneId = Long.valueOf((String) params.get(ApiConstants.ZONE_ID)); - Long podId = Long.valueOf((String)params.get(ApiConstants.POD_ID)); + Long zoneId = Long.valueOf((String) params.get(ApiConstants.ZONE_ID)); + Long podId = Long.valueOf((String)params.get(ApiConstants.POD_ID)); String type = (String) params.get(ApiConstants.DHCP_SERVER_TYPE); String url = (String) params.get(ApiConstants.URL); String username = (String) params.get(ApiConstants.USERNAME); @@ -217,7 +213,7 @@ public class ExternalNetworkDeviceManagerImpl implements ExternalNetworkDeviceMa } else { throw new CloudRuntimeException("Unsupported network device type:" + host.getType()); } - + response.setId(device.getUuid()); return response; } @@ -234,19 +230,19 @@ public class ExternalNetworkDeviceManagerImpl implements ExternalNetworkDeviceMa // } else { // List devs = _hostDao.listBy(type, zoneId); // res.addAll(devs); - // } - - // return res; + // } + + // return res; return null; } - + @Override public List listNetworkDevice(ListNetworkDeviceCmd cmd) { Map paramList = cmd.getParamList(); if (paramList == null) { throw new CloudRuntimeException("Parameter list is null"); } - + List res; Collection paramsCollection = paramList.values(); HashMap params = (HashMap) (paramsCollection.toArray())[0]; @@ -275,13 +271,13 @@ public class ExternalNetworkDeviceManagerImpl implements ExternalNetworkDeviceMa } else { throw new CloudRuntimeException("Unknown network device type:" + cmd.getDeviceType()); } - + return res; } @Override public boolean deleteNetworkDevice(DeleteNetworkDeviceCmd cmd) { - HostVO device = _hostDao.findById(cmd.getId()); - return true; + HostVO device = _hostDao.findById(cmd.getId()); + return true; } } diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index 84fcd329218..2a4b27e5de0 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -132,7 +132,8 @@ import com.cloud.user.dao.UserStatisticsDao; import com.cloud.utils.AnnotationHelper; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; -import com.cloud.utils.component.Adapters; +import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.Manager; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.JoinBuilder.JoinType; @@ -212,11 +213,9 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag @Inject PodVlanMapDao _podVlanMapDao; - //@com.cloud.utils.component.Inject(adapter = NetworkGuru.class) @Inject List _networkGurus; - // @com.cloud.utils.component.Inject(adapter = NetworkElement.class) @Inject List _networkElements; @@ -295,7 +294,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag @Override public NetworkElement getElementImplementingProvider(String providerName) { String elementName = s_providerToNetworkElementMap.get(providerName); - NetworkElement element = Adapters.getAdapterByName(_networkElements, elementName); + NetworkElement element = AdapterBase.getAdapterByName(_networkElements, elementName); return element; } @@ -1540,6 +1539,8 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag "multiple NetworkElements found for Provider: " + implementedProvider.getName()); return false; } + s_logger.info("add element/provider mapping. provider: " + implementedProvider.getName() + " -> " + element.getName() + + ", class: " + ComponentContext.getTargetClass(element).getName()); s_providerToNetworkElementMap.put(implementedProvider.getName(), element.getName()); } if (capabilities != null && implementedProvider != null) { @@ -1802,7 +1803,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag NetworkVO ntwkVO = _networksDao.findById(network.getId()); s_logger.debug("Allocating nic for vm " + vm.getVirtualMachine() + " in network " + network + " with requested profile " + requested); - NetworkGuru guru = Adapters.getAdapterByName(_networkGurus, ntwkVO.getGuruName()); + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, ntwkVO.getGuruName()); if (requested != null && requested.getMode() == null) { requested.setMode(network.getMode()); @@ -1947,7 +1948,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag } try { - NetworkGuru guru = Adapters.getAdapterByName(_networkGurus, network.getGuruName()); + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); Network.State state = network.getState(); if (state == Network.State.Implemented || state == Network.State.Implementing) { s_logger.debug("Network id=" + networkId + " is already implemented"); @@ -2154,7 +2155,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { Integer networkRate = getNetworkRate(network.getId(), vmProfile.getId()); - NetworkGuru guru = Adapters.getAdapterByName(_networkGurus, network.getGuruName()); + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); NicVO nic = _nicDao.findById(nicId); NicProfile profile = null; @@ -2216,7 +2217,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag NetworkVO network = _networksDao.findById(nic.getNetworkId()); Integer networkRate = getNetworkRate(network.getId(), vm.getId()); - NetworkGuru guru = Adapters.getAdapterByName(_networkGurus, network.getGuruName()); + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); NicProfile profile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), networkRate, isSecurityGroupSupportedInNetwork(network), getNetworkTag(vm.getHypervisorType(), network)); guru.updateNicProfile(profile, network); @@ -2260,7 +2261,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag if (originalState == Nic.State.Reserved || originalState == Nic.State.Reserving) { if (nic.getReservationStrategy() == Nic.ReservationStrategy.Start) { - NetworkGuru guru = Adapters.getAdapterByName(_networkGurus, network.getGuruName()); + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); nic.setState(Nic.State.Releasing); _nicDao.update(nic.getId(), nic); NicProfile profile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), null, @@ -2310,7 +2311,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag NetworkVO network = _networksDao.findById(nic.getNetworkId()); Integer networkRate = getNetworkRate(network.getId(), vm.getId()); - NetworkGuru guru = Adapters.getAdapterByName(_networkGurus, network.getGuruName()); + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); NicProfile profile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), networkRate, isSecurityGroupSupportedInNetwork(network), getNetworkTag(vm.getHypervisorType(), network)); guru.updateNicProfile(profile, network); @@ -2331,7 +2332,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag NetworkVO network = _networksDao.findById(networkId); Integer networkRate = getNetworkRate(network.getId(), vm.getId()); - NetworkGuru guru = Adapters.getAdapterByName(_networkGurus, network.getGuruName()); + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); NicProfile profile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), networkRate, isSecurityGroupSupportedInNetwork(network), getNetworkTag(vm.getHypervisorType(), network)); guru.updateNicProfile(profile, network); @@ -2437,6 +2438,12 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag return _networksDao.findById(id); } + @Override + @DB + public Network getNetwork(String uuid) { + return _networksDao.findByUuid(uuid); + } + @Override public List getRemoteAccessVpnElements() { List elements = new ArrayList(); @@ -2486,7 +2493,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag NetworkVO network = _networksDao.findById(nic.getNetworkId()); NicProfile profile = new NicProfile(nic, network, null, null, null, isSecurityGroupSupportedInNetwork(network), getNetworkTag(vm.getHypervisorType(), network)); - NetworkGuru guru = Adapters.getAdapterByName(_networkGurus, network.getGuruName()); + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); guru.deallocate(network, profile, vm); _nicDao.remove(nic.getId()); s_logger.debug("Removed nic id=" + nic.getId()); @@ -3549,7 +3556,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag if (s_logger.isDebugEnabled()) { s_logger.debug("Network id=" + networkId + " is shutdown successfully, cleaning up corresponding resources now."); } - NetworkGuru guru = Adapters.getAdapterByName(_networkGurus, network.getGuruName()); + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); NetworkProfile profile = convertNetworkToNetworkProfile(network.getId()); guru.shutdown(profile, _networkOfferingDao.findById(network.getNetworkOfferingId())); @@ -3715,7 +3722,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag if (s_logger.isDebugEnabled()) { s_logger.debug("Network id=" + networkId + " is destroyed successfully, cleaning up corresponding resources now."); } - NetworkGuru guru = Adapters.getAdapterByName(_networkGurus, network.getGuruName()); + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); Account owner = _accountMgr.getAccount(network.getAccountId()); Transaction txn = Transaction.currentTxn(); @@ -4418,7 +4425,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag @Override public NetworkProfile convertNetworkToNetworkProfile(long networkId) { NetworkVO network = _networksDao.findById(networkId); - NetworkGuru guru = Adapters.getAdapterByName(_networkGurus, network.getGuruName()); + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); NetworkProfile profile = new NetworkProfile(network); guru.updateNetworkProfile(profile); diff --git a/server/src/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/com/cloud/network/as/AutoScaleManagerImpl.java index d49f4aa168e..7c8a6f5a800 100644 --- a/server/src/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/com/cloud/network/as/AutoScaleManagerImpl.java @@ -23,27 +23,31 @@ import java.util.List; import java.util.Map; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.ControlledEntity; -import org.apache.cloudstack.api.command.admin.autoscale.CreateCounterCmd; -import org.apache.cloudstack.api.command.user.autoscale.*; -import org.apache.log4j.Logger; - import org.apache.cloudstack.api.ApiConstants; -import com.cloud.api.ApiDBUtils; -import com.cloud.api.ApiDispatcher; import org.apache.cloudstack.api.BaseListAccountResourcesCmd; +import org.apache.cloudstack.api.command.admin.autoscale.CreateCounterCmd; import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScalePolicyCmd; import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScaleVmGroupCmd; +import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScaleVmProfileCmd; import org.apache.cloudstack.api.command.user.autoscale.CreateConditionCmd; -import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; +import org.apache.cloudstack.api.command.user.autoscale.ListAutoScalePoliciesCmd; import org.apache.cloudstack.api.command.user.autoscale.ListAutoScaleVmGroupsCmd; +import org.apache.cloudstack.api.command.user.autoscale.ListAutoScaleVmProfilesCmd; import org.apache.cloudstack.api.command.user.autoscale.ListConditionsCmd; +import org.apache.cloudstack.api.command.user.autoscale.ListCountersCmd; import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScalePolicyCmd; +import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmGroupCmd; import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmProfileCmd; +import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.api.ApiDBUtils; +import com.cloud.api.ApiDispatcher; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; @@ -81,7 +85,6 @@ import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; -import com.cloud.utils.component.Inject; import com.cloud.utils.component.Manager; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; diff --git a/server/src/com/cloud/network/dao/FirewallRulesDaoImpl.java b/server/src/com/cloud/network/dao/FirewallRulesDaoImpl.java index e5ee604a2dd..0c79676b8e1 100644 --- a/server/src/com/cloud/network/dao/FirewallRulesDaoImpl.java +++ b/server/src/com/cloud/network/dao/FirewallRulesDaoImpl.java @@ -32,7 +32,6 @@ import com.cloud.network.rules.FirewallRule.TrafficType; import com.cloud.network.rules.FirewallRuleVO; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; @@ -286,11 +285,12 @@ public class FirewallRulesDaoImpl extends GenericDaoBase i if (purpose != null) { sc.setParameters("purpose", purpose); } - + sc.setParameters("trafficType", trafficType); return listBy(sc); } + @Override @DB public boolean remove(Long id) { Transaction txn = Transaction.currentTxn(); @@ -316,7 +316,7 @@ public class FirewallRulesDaoImpl extends GenericDaoBase i public List listByIpAndPurposeWithState(Long ipId, Purpose purpose, State state) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("ipId", ipId); - + if (state != null) { sc.setParameters("state", state); } diff --git a/server/src/com/cloud/network/dao/IPAddressDaoImpl.java b/server/src/com/cloud/network/dao/IPAddressDaoImpl.java index 833c8314ca8..4d6bd080e2a 100755 --- a/server/src/com/cloud/network/dao/IPAddressDaoImpl.java +++ b/server/src/com/cloud/network/dao/IPAddressDaoImpl.java @@ -35,7 +35,6 @@ import com.cloud.network.IPAddressVO; import com.cloud.network.IpAddress.State; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; @@ -62,11 +61,11 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen @Inject protected VlanDaoImpl _vlanDao; protected GenericSearchBuilder CountFreePublicIps; @Inject ResourceTagsDaoImpl _tagsDao; - + // make it public for JUnit test public IPAddressDaoImpl() { } - + @PostConstruct public void init() { AllFieldsSearch = createSearchBuilder(); @@ -101,7 +100,7 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen AllocatedIpCount.and("vlan", AllocatedIpCount.entity().getVlanId(), Op.EQ); AllocatedIpCount.and("allocated", AllocatedIpCount.entity().getAllocatedTime(), Op.NNULL); AllocatedIpCount.done(); - + AllIpCountForDashboard = createSearchBuilder(Integer.class); AllIpCountForDashboard.select(null, Func.COUNT, AllIpCountForDashboard.entity().getAddress()); AllIpCountForDashboard.and("dc", AllIpCountForDashboard.entity().getDataCenterId(), Op.EQ); @@ -111,7 +110,7 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen virtaulNetworkVlan.and("vlanType", virtaulNetworkVlan.entity().getVlanType(), SearchCriteria.Op.EQ); AllIpCountForDashboard.join("vlan", virtaulNetworkVlan, virtaulNetworkVlan.entity().getId(), - AllIpCountForDashboard.entity().getVlanId(), JoinBuilder.JoinType.INNER); + AllIpCountForDashboard.entity().getVlanId(), JoinBuilder.JoinType.INNER); virtaulNetworkVlan.done(); AllIpCountForDashboard.done(); @@ -121,7 +120,7 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen AllocatedIpCountForAccount.and("allocated", AllocatedIpCountForAccount.entity().getAllocatedTime(), Op.NNULL); AllocatedIpCountForAccount.and("network", AllocatedIpCountForAccount.entity().getAssociatedWithNetworkId(), Op.NNULL); AllocatedIpCountForAccount.done(); - + CountFreePublicIps = createSearchBuilder(Long.class); CountFreePublicIps.select(null, Func.COUNT, null); CountFreePublicIps.and("state", CountFreePublicIps.entity().getState(), SearchCriteria.Op.EQ); @@ -167,14 +166,14 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen sc.setParameters("accountId", accountId); return listBy(sc); } - + @Override public List listByVlanId(long vlanId) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("vlan", vlanId); return listBy(sc); } - + @Override public IPAddressVO findByIpAndSourceNetworkId(long networkId, String ipAddress) { SearchCriteria sc = AllFieldsSearch.create(); @@ -197,7 +196,7 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen sc.setParameters("dataCenterId", dcId); return listBy(sc); } - + @Override public List listByDcIdIpAddress(long dcId, String ipAddress) { SearchCriteria sc = AllFieldsSearch.create(); @@ -205,19 +204,19 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen sc.setParameters("ipAddress", ipAddress); return listBy(sc); } - + @Override public List listByAssociatedNetwork(long networkId, Boolean isSourceNat) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("network", networkId); - + if (isSourceNat != null) { sc.setParameters("sourceNat", isSourceNat); } - + return listBy(sc); } - + @Override public List listStaticNatPublicIps(long networkId) { SearchCriteria sc = AllFieldsSearch.create(); @@ -225,12 +224,12 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen sc.setParameters("oneToOneNat", true); return listBy(sc); } - + @Override public IPAddressVO findByAssociatedVmId(long vmId) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("associatedWithVmId", vmId); - + return findOneBy(sc); } @@ -248,13 +247,13 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen SearchCriteria sc = AllIpCountForDashboard.create(); sc.setParameters("dc", dcId); if (onlyCountAllocated){ - sc.setParameters("state", State.Free); + sc.setParameters("state", State.Free); } sc.setJoinParameters("vlan", "vlanType", vlanType.toString()); return customSearch(sc, null).get(0); } - + @Override @DB public int countIPs(long dcId, Long accountId, String vlanId, String vlanGateway, String vlanNetmask) { @@ -285,35 +284,35 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen public IPAddressVO markAsUnavailable(long ipAddressId) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("id", ipAddressId); - + IPAddressVO ip = createForUpdate(); ip.setState(State.Releasing); if (update(ip, sc) != 1) { return null; } - + return findOneBy(sc); } @Override public long countAllocatedIPsForAccount(long accountId) { - SearchCriteria sc = AllocatedIpCountForAccount.create(); + SearchCriteria sc = AllocatedIpCountForAccount.create(); sc.setParameters("account", accountId); return customSearch(sc, null).get(0); } - + @Override public List listByPhysicalNetworkId(long physicalNetworkId) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("physicalNetworkId", physicalNetworkId); return listBy(sc); } - + @Override public long countFreePublicIPs() { - SearchCriteria sc = CountFreePublicIps.create(); - sc.setParameters("state", State.Free); - sc.setJoinParameters("vlans", "vlanType", VlanType.VirtualNetwork); + SearchCriteria sc = CountFreePublicIps.create(); + sc.setParameters("state", State.Free); + sc.setJoinParameters("vlans", "vlanType", VlanType.VirtualNetwork); return customSearch(sc, null).get(0); } @@ -321,21 +320,22 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen public List listByAssociatedVpc(long vpcId, Boolean isSourceNat) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("vpcId", vpcId); - + if (isSourceNat != null) { sc.setParameters("sourceNat", isSourceNat); } - + return listBy(sc); } - + + @Override public long countFreeIPsInNetwork(long networkId) { SearchCriteria sc = CountFreePublicIps.create(); sc.setParameters("state", State.Free); sc.setParameters("networkId", networkId); return customSearch(sc, null).get(0); } - + @Override @DB public boolean remove(Long id) { diff --git a/server/src/com/cloud/network/dao/LoadBalancerDaoImpl.java b/server/src/com/cloud/network/dao/LoadBalancerDaoImpl.java index 31180db56e3..ec9dbc57a71 100644 --- a/server/src/com/cloud/network/dao/LoadBalancerDaoImpl.java +++ b/server/src/com/cloud/network/dao/LoadBalancerDaoImpl.java @@ -29,7 +29,6 @@ import org.springframework.stereotype.Component; import com.cloud.network.LoadBalancerVO; import com.cloud.network.rules.FirewallRule.State; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -135,5 +134,5 @@ public class LoadBalancerDaoImpl extends GenericDaoBase im sc.setParameters("state", State.Add.toString(), State.Revoke.toString()); return listBy(sc); } - + } diff --git a/server/src/com/cloud/network/dao/NetworkDaoImpl.java b/server/src/com/cloud/network/dao/NetworkDaoImpl.java index c6a65dd604f..206373ec8fb 100644 --- a/server/src/com/cloud/network/dao/NetworkDaoImpl.java +++ b/server/src/com/cloud/network/dao/NetworkDaoImpl.java @@ -45,7 +45,6 @@ import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDaoImpl; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; diff --git a/server/src/com/cloud/network/dao/PhysicalNetworkDaoImpl.java b/server/src/com/cloud/network/dao/PhysicalNetworkDaoImpl.java index e1603dbfa01..f4648fbcde7 100644 --- a/server/src/com/cloud/network/dao/PhysicalNetworkDaoImpl.java +++ b/server/src/com/cloud/network/dao/PhysicalNetworkDaoImpl.java @@ -25,23 +25,20 @@ import org.springframework.stereotype.Component; import com.cloud.network.Networks.TrafficType; import com.cloud.network.PhysicalNetworkVO; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; @Component @Local(value=PhysicalNetworkDao.class) @DB(txn=false) public class PhysicalNetworkDaoImpl extends GenericDaoBase implements PhysicalNetworkDao { final SearchBuilder ZoneSearch; - + @Inject protected PhysicalNetworkTrafficTypeDaoImpl _trafficTypeDao; - + protected PhysicalNetworkDaoImpl() { super(); ZoneSearch = createSearchBuilder(); @@ -65,7 +62,7 @@ public class PhysicalNetworkDaoImpl extends GenericDaoBase listByZoneAndTrafficType(long dataCenterId, TrafficType trafficType) { - + SearchBuilder trafficTypeSearch = _trafficTypeDao.createSearchBuilder(); PhysicalNetworkTrafficTypeVO trafficTypeEntity = trafficTypeSearch.entity(); trafficTypeSearch.and("trafficType", trafficTypeSearch.entity().getTrafficType(), SearchCriteria.Op.EQ); diff --git a/server/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java b/server/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java index 2dbae756d0d..8ad61767cae 100644 --- a/server/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java +++ b/server/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java @@ -25,10 +25,8 @@ import javax.inject.Inject; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.network.IPAddressVO; import com.cloud.network.Site2SiteVpnConnectionVO; import com.cloud.network.Site2SiteVpnGatewayVO; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchBuilder; @@ -41,35 +39,35 @@ public class Site2SiteVpnConnectionDaoImpl extends GenericDaoBase AllFieldsSearch; private SearchBuilder VpcSearch; private SearchBuilder VpnGatewaySearch; public Site2SiteVpnConnectionDaoImpl() { } - + @PostConstruct protected void init() { AllFieldsSearch = createSearchBuilder(); AllFieldsSearch.and("customerGatewayId", AllFieldsSearch.entity().getCustomerGatewayId(), SearchCriteria.Op.EQ); AllFieldsSearch.and("vpnGatewayId", AllFieldsSearch.entity().getVpnGatewayId(), SearchCriteria.Op.EQ); AllFieldsSearch.done(); - + VpcSearch = createSearchBuilder(); VpnGatewaySearch = _vpnGatewayDao.createSearchBuilder(); VpnGatewaySearch.and("vpcId", VpnGatewaySearch.entity().getVpcId(), SearchCriteria.Op.EQ); VpcSearch.join("vpnGatewaySearch", VpnGatewaySearch, VpnGatewaySearch.entity().getId(), VpcSearch.entity().getVpnGatewayId(), JoinType.INNER); VpcSearch.done(); } - + @Override public List listByCustomerGatewayId(long id) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("customerGatewayId", id); return listBy(sc); } - + @Override public List listByVpnGatewayId(long id) { SearchCriteria sc = AllFieldsSearch.create(); diff --git a/server/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java b/server/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java index b5e0ad5e582..491b172c082 100644 --- a/server/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java +++ b/server/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java @@ -23,7 +23,6 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.Site2SiteVpnGatewayVO; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -32,9 +31,9 @@ import com.cloud.utils.db.SearchCriteria; @Local(value={Site2SiteVpnGatewayDao.class}) public class Site2SiteVpnGatewayDaoImpl extends GenericDaoBase implements Site2SiteVpnGatewayDao { @Inject protected IPAddressDaoImpl _addrDao; - + private static final Logger s_logger = Logger.getLogger(Site2SiteVpnGatewayDaoImpl.class); - + private final SearchBuilder AllFieldsSearch; protected Site2SiteVpnGatewayDaoImpl() { @@ -42,7 +41,7 @@ public class Site2SiteVpnGatewayDaoImpl extends GenericDaoBase sc = AllFieldsSearch.create(); diff --git a/server/src/com/cloud/network/element/BareMetalElement.java b/server/src/com/cloud/network/element/BareMetalElement.java index d13cf141c0d..553fe1d63b2 100644 --- a/server/src/com/cloud/network/element/BareMetalElement.java +++ b/server/src/com/cloud/network/element/BareMetalElement.java @@ -16,7 +16,6 @@ // under the License. package com.cloud.network.element; -import java.util.List; import java.util.Map; import java.util.Set; @@ -24,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.baremetal.ExternalDhcpManager; import com.cloud.deploy.DeployDestination; @@ -49,7 +47,6 @@ import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.NicDao; -@Component @Local(value=NetworkElement.class) public class BareMetalElement extends AdapterBase implements NetworkElement { private static final Logger s_logger = Logger.getLogger(BareMetalElement.class); diff --git a/server/src/com/cloud/network/element/CloudZonesNetworkElement.java b/server/src/com/cloud/network/element/CloudZonesNetworkElement.java index 0cf632cb666..cc3546084de 100644 --- a/server/src/com/cloud/network/element/CloudZonesNetworkElement.java +++ b/server/src/com/cloud/network/element/CloudZonesNetworkElement.java @@ -17,7 +17,6 @@ package com.cloud.network.element; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Set; @@ -25,7 +24,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.agent.AgentManager.OnError; @@ -64,7 +62,6 @@ import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.UserVmDao; -@Component @Local(value = NetworkElement.class) public class CloudZonesNetworkElement extends AdapterBase implements NetworkElement, UserDataServiceProvider { private static final Logger s_logger = Logger.getLogger(CloudZonesNetworkElement.class); diff --git a/server/src/com/cloud/network/element/ExternalDhcpElement.java b/server/src/com/cloud/network/element/ExternalDhcpElement.java index 0f99abdc698..f7c465ddd35 100755 --- a/server/src/com/cloud/network/element/ExternalDhcpElement.java +++ b/server/src/com/cloud/network/element/ExternalDhcpElement.java @@ -17,7 +17,6 @@ package com.cloud.network.element; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Set; @@ -25,7 +24,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.baremetal.ExternalDhcpManager; import com.cloud.dc.DataCenter; @@ -51,7 +49,6 @@ import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; -@Component @Local(value = NetworkElement.class) public class ExternalDhcpElement extends AdapterBase implements NetworkElement, DhcpServiceProvider { private static final Logger s_logger = Logger.getLogger(ExternalDhcpElement.class); diff --git a/server/src/com/cloud/network/element/SecurityGroupElement.java b/server/src/com/cloud/network/element/SecurityGroupElement.java index bcc8ecc9269..0659db781e3 100644 --- a/server/src/com/cloud/network/element/SecurityGroupElement.java +++ b/server/src/com/cloud/network/element/SecurityGroupElement.java @@ -17,14 +17,11 @@ package com.cloud.network.element; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Set; import javax.ejb.Local; -import org.springframework.stereotype.Component; - import com.cloud.deploy.DeployDestination; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; @@ -42,7 +39,6 @@ import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; -@Component @Local(value=NetworkElement.class) public class SecurityGroupElement extends AdapterBase implements NetworkElement { private static final Map> capabilities = setCapabilities(); diff --git a/server/src/com/cloud/network/element/VirtualRouterElement.java b/server/src/com/cloud/network/element/VirtualRouterElement.java index 481844364a6..8a69e56bd2c 100755 --- a/server/src/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VirtualRouterElement.java @@ -25,10 +25,10 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; +import com.cloud.utils.PropertiesUtil; import org.apache.cloudstack.api.command.admin.router.ConfigureVirtualRouterElementCmd; import org.apache.cloudstack.api.command.admin.router.ListVirtualRouterElementsCmd; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; @@ -89,7 +89,6 @@ import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.UserVmDao; import com.google.gson.Gson; -@Component @Local(value = NetworkElement.class) public class VirtualRouterElement extends AdapterBase implements VirtualRouterElementService, DhcpServiceProvider, UserDataServiceProvider, SourceNatServiceProvider, StaticNatServiceProvider, FirewallServiceProvider, @@ -682,8 +681,9 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } @Override - public String[] getPropertiesFiles() { - return new String[] { "virtualrouter_commands.properties" }; + public Map getProperties() { + return PropertiesUtil.processConfigFile(new String[] + { "virtualrouter_commands.properties" }); } @Override diff --git a/server/src/com/cloud/network/element/VpcVirtualRouterElement.java b/server/src/com/cloud/network/element/VpcVirtualRouterElement.java index 49ca0b4878d..60924a2a6ab 100644 --- a/server/src/com/cloud/network/element/VpcVirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VpcVirtualRouterElement.java @@ -26,7 +26,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.dc.DataCenter; import com.cloud.deploy.DeployDestination; @@ -63,7 +62,6 @@ import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; import com.cloud.vm.VirtualMachineProfile; -@Component @Local(value = NetworkElement.class) public class VpcVirtualRouterElement extends VirtualRouterElement implements VpcProvider, Site2SiteVpnServiceProvider, NetworkACLServiceProvider{ private static final Logger s_logger = Logger.getLogger(VpcVirtualRouterElement.class); diff --git a/server/src/com/cloud/network/guru/ControlNetworkGuru.java b/server/src/com/cloud/network/guru/ControlNetworkGuru.java index 1195f682819..469a08b31d6 100755 --- a/server/src/com/cloud/network/guru/ControlNetworkGuru.java +++ b/server/src/com/cloud/network/guru/ControlNetworkGuru.java @@ -46,7 +46,7 @@ import com.cloud.network.Networks.Mode; import com.cloud.network.Networks.TrafficType; import com.cloud.offering.NetworkOffering; import com.cloud.user.Account; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.vm.Nic; diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 85e86f6dfd2..255a1af40ae 100755 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -206,7 +206,7 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.PasswordGenerator; import com.cloud.utils.StringUtils; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; @@ -818,26 +818,29 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian String privateIP = router.getPrivateIpAddress(); if (privateIP != null) { + boolean forVpc = router.getVpcId() != null; List routerNics = _nicDao.listByVmId(router.getId()); for (Nic routerNic : routerNics) { Network network = _networkMgr.getNetwork(routerNic.getNetworkId()); - if (network.getTrafficType() == TrafficType.Public) { - boolean forVpc = router.getVpcId() != null; + //Send network usage command for public nic in VPC VR + //Send network usage command for isolated guest nic of non VPC VR + if ((forVpc && network.getTrafficType() == TrafficType.Public) || (!forVpc && network.getTrafficType() == TrafficType.Guest && network.getGuestType() == Network.GuestType.Isolated)) { final NetworkUsageCommand usageCmd = new NetworkUsageCommand(privateIP, router.getHostName(), forVpc, routerNic.getIp4Address()); - UserStatisticsVO previousStats = _statsDao.findBy(router.getAccountId(), - router.getDataCenterIdToDeployIn(), network.getId(), null, router.getId(), router.getType().toString()); + String routerType = router.getType().toString(); + UserStatisticsVO previousStats = _statsDao.findBy(router.getAccountId(), + router.getDataCenterIdToDeployIn(), network.getId(), (forVpc ? routerNic.getIp4Address() : null), router.getId(), routerType); NetworkUsageAnswer answer = null; try { answer = (NetworkUsageAnswer) _agentMgr.easySend(router.getHostId(), usageCmd); } catch (Exception e) { - s_logger.warn("Error while collecting network stats from router: "+router.getInstanceName()+" from host: "+router.getHostId(), e); + s_logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId(), e); continue; } if (answer != null) { if (!answer.getResult()) { - s_logger.warn("Error while collecting network stats from router: "+router.getInstanceName()+" from host: "+router.getHostId() + "; details: " + answer.getDetails()); + s_logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId() + "; details: " + answer.getDetails()); continue; } Transaction txn = Transaction.open(Transaction.CLOUD_DB); @@ -847,27 +850,27 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian continue; } txn.start(); - UserStatisticsVO stats = _statsDao.lock(router.getAccountId(), - router.getDataCenterIdToDeployIn(), network.getId(), routerNic.getIp4Address(), router.getId(), router.getType().toString()); + UserStatisticsVO stats = _statsDao.lock(router.getAccountId(), + router.getDataCenterIdToDeployIn(), network.getId(), (forVpc ? routerNic.getIp4Address() : null), router.getId(), routerType); if (stats == null) { s_logger.warn("unable to find stats for account: " + router.getAccountId()); continue; } - if(previousStats != null - && ((previousStats.getCurrentBytesReceived() != stats.getCurrentBytesReceived()) - || (previousStats.getCurrentBytesSent() != stats.getCurrentBytesSent()))){ + if (previousStats != null + && ((previousStats.getCurrentBytesReceived() != stats.getCurrentBytesReceived()) + || (previousStats.getCurrentBytesSent() != stats.getCurrentBytesSent()))) { s_logger.debug("Router stats changed from the time NetworkUsageCommand was sent. " + - "Ignoring current answer. Router: "+answer.getRouterName()+" Rcvd: " + - answer.getBytesReceived()+ "Sent: " +answer.getBytesSent()); + "Ignoring current answer. Router: " + answer.getRouterName() + " Rcvd: " + + answer.getBytesReceived() + "Sent: " + answer.getBytesSent()); continue; } if (stats.getCurrentBytesReceived() > answer.getBytesReceived()) { if (s_logger.isDebugEnabled()) { s_logger.debug("Received # of bytes that's less than the last one. " + - "Assuming something went wrong and persisting it. Router: " + - answer.getRouterName()+" Reported: " + answer.getBytesReceived() + "Assuming something went wrong and persisting it. Router: " + + answer.getRouterName() + " Reported: " + answer.getBytesReceived() + " Stored: " + stats.getCurrentBytesReceived()); } stats.setNetBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); @@ -876,8 +879,8 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian if (stats.getCurrentBytesSent() > answer.getBytesSent()) { if (s_logger.isDebugEnabled()) { s_logger.debug("Received # of bytes that's less than the last one. " + - "Assuming something went wrong and persisting it. Router: " + - answer.getRouterName()+" Reported: " + answer.getBytesSent() + "Assuming something went wrong and persisting it. Router: " + + answer.getRouterName() + " Reported: " + answer.getBytesSent() + " Stored: " + stats.getCurrentBytesSent()); } stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); diff --git a/server/src/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java b/server/src/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java index 119ab3d6317..5406ab624e0 100644 --- a/server/src/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java +++ b/server/src/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java @@ -27,7 +27,7 @@ import com.cloud.network.dao.FirewallRulesCidrsDaoImpl; import com.cloud.network.rules.FirewallRule.Purpose; import com.cloud.network.rules.FirewallRule.State; import com.cloud.network.rules.PortForwardingRuleVO; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; diff --git a/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java b/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java index 0a0844fc3bc..c7848b8f2fa 100755 --- a/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java +++ b/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java @@ -87,7 +87,7 @@ import com.cloud.uservm.UserVm; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.component.Manager; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; diff --git a/server/src/com/cloud/network/security/dao/SecurityGroupDaoImpl.java b/server/src/com/cloud/network/security/dao/SecurityGroupDaoImpl.java index bd44328117f..68112c0a7c1 100644 --- a/server/src/com/cloud/network/security/dao/SecurityGroupDaoImpl.java +++ b/server/src/com/cloud/network/security/dao/SecurityGroupDaoImpl.java @@ -26,7 +26,7 @@ import org.springframework.stereotype.Component; import com.cloud.network.security.SecurityGroupVO; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; diff --git a/server/src/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/com/cloud/network/vpc/VpcManagerImpl.java index 58c497e7327..00205a10515 100644 --- a/server/src/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/com/cloud/network/vpc/VpcManagerImpl.java @@ -99,7 +99,7 @@ import com.cloud.user.UserContext; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.component.Manager; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; diff --git a/server/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java b/server/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java index c5270f840ce..0ebccabfa8e 100644 --- a/server/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java +++ b/server/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java @@ -27,7 +27,7 @@ import com.cloud.network.vpc.StaticRoute; import com.cloud.network.vpc.StaticRouteVO; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; diff --git a/server/src/com/cloud/network/vpc/dao/VpcDaoImpl.java b/server/src/com/cloud/network/vpc/dao/VpcDaoImpl.java index b7e4d30ba87..a9b5e182b60 100644 --- a/server/src/com/cloud/network/vpc/dao/VpcDaoImpl.java +++ b/server/src/com/cloud/network/vpc/dao/VpcDaoImpl.java @@ -27,7 +27,7 @@ import com.cloud.network.vpc.Vpc; import com.cloud.network.vpc.VpcVO; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; diff --git a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index ad75502db42..5b7d9d422cc 100755 --- a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -70,7 +70,7 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.PasswordGenerator; import com.cloud.utils.Ternary; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.component.Manager; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; diff --git a/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java b/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java index d8371c44b17..e34f7d47c85 100644 --- a/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java +++ b/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java @@ -70,7 +70,7 @@ import com.cloud.user.dao.AccountDao; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.component.Manager; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; diff --git a/server/src/com/cloud/projects/ProjectManagerImpl.java b/server/src/com/cloud/projects/ProjectManagerImpl.java index 00b7716f19c..ebe6d0c464f 100755 --- a/server/src/com/cloud/projects/ProjectManagerImpl.java +++ b/server/src/com/cloud/projects/ProjectManagerImpl.java @@ -131,7 +131,9 @@ public class ProjectManagerImpl implements ProjectManager, Manager{ Map configs = _configDao.getConfiguration(params); _invitationRequired = Boolean.valueOf(configs.get(Config.ProjectInviteRequired.key())); - _invitationTimeOut = Long.valueOf(configs.get(Config.ProjectInvitationExpirationTime.key()))*1000; + + String value = configs.get(Config.ProjectInvitationExpirationTime.key()); + _invitationTimeOut = Long.valueOf(value != null ? value : "86400")*1000; _allowUserToCreateProject = Boolean.valueOf(configs.get(Config.AllowUserToCreateProject.key())); diff --git a/server/src/com/cloud/projects/dao/ProjectDaoImpl.java b/server/src/com/cloud/projects/dao/ProjectDaoImpl.java index 52ab141f264..e07aecc5ec6 100644 --- a/server/src/com/cloud/projects/dao/ProjectDaoImpl.java +++ b/server/src/com/cloud/projects/dao/ProjectDaoImpl.java @@ -28,7 +28,7 @@ import com.cloud.projects.Project; import com.cloud.projects.ProjectVO; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; diff --git a/server/src/com/cloud/resource/DiscovererBase.java b/server/src/com/cloud/resource/DiscovererBase.java index 6f6d1bace00..8bf599bdf06 100644 --- a/server/src/com/cloud/resource/DiscovererBase.java +++ b/server/src/com/cloud/resource/DiscovererBase.java @@ -34,7 +34,7 @@ import com.cloud.dc.dao.ClusterDao; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.network.NetworkManager; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.net.UrlUtil; public abstract class DiscovererBase implements Discoverer { diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index cbcdccb5f64..592a7ad861d 100755 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -20,7 +20,6 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.URLDecoder; import java.util.ArrayList; -import java.util.Enumeration; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -32,16 +31,20 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.cluster.AddClusterCmd; +import org.apache.cloudstack.api.command.admin.cluster.DeleteClusterCmd; +import org.apache.cloudstack.api.command.admin.host.AddHostCmd; +import org.apache.cloudstack.api.command.admin.host.AddSecondaryStorageCmd; +import org.apache.cloudstack.api.command.admin.host.CancelMaintenanceCmd; +import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd; +import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd; +import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; +import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; +import org.apache.cloudstack.api.command.admin.storage.AddS3Cmd; import org.apache.cloudstack.api.command.admin.storage.ListS3sCmd; import org.apache.cloudstack.api.command.admin.swift.AddSwiftCmd; -import org.apache.cloudstack.api.command.admin.cluster.DeleteClusterCmd; -import org.apache.cloudstack.api.command.admin.host.*; import org.apache.cloudstack.api.command.admin.swift.ListSwiftsCmd; -import org.apache.cloudstack.api.command.admin.storage.AddS3Cmd; -import com.cloud.storage.S3; -import com.cloud.storage.S3VO; -import com.cloud.storage.s3.S3Manager; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -59,12 +62,7 @@ import com.cloud.agent.api.UpdateHostPasswordCommand; import com.cloud.agent.manager.AgentAttache; import com.cloud.agent.manager.allocator.PodAllocator; import com.cloud.agent.transport.Request; -import org.apache.cloudstack.api.ApiConstants; import com.cloud.api.ApiDBUtils; -import org.apache.cloudstack.api.command.admin.host.AddHostCmd; -import org.apache.cloudstack.api.command.admin.host.CancelMaintenanceCmd; -import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd; -import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityVO; import com.cloud.capacity.dao.CapacityDao; @@ -112,6 +110,8 @@ import com.cloud.org.Grouping.AllocationState; import com.cloud.org.Managed; import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.GuestOSCategoryVO; +import com.cloud.storage.S3; +import com.cloud.storage.S3VO; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; @@ -125,6 +125,7 @@ import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.s3.S3Manager; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.swift.SwiftManager; import com.cloud.template.VirtualMachineTemplate; @@ -135,7 +136,6 @@ import com.cloud.user.UserContext; import com.cloud.utils.Pair; import com.cloud.utils.StringUtils; import com.cloud.utils.UriUtils; -import com.cloud.utils.component.Adapters; import com.cloud.utils.component.Manager; import com.cloud.utils.db.DB; import com.cloud.utils.db.SearchCriteria; @@ -219,7 +219,7 @@ public class ResourceManagerImpl implements ResourceManager, ResourceService, Ma // @com.cloud.utils.component.Inject(adapter = PodAllocator.class) @Inject protected List _podAllocators = null; - + @Inject protected VMTemplateDao _templateDao; @Inject @@ -236,9 +236,9 @@ public class ResourceManagerImpl implements ResourceManager, ResourceService, Ma @PostConstruct public void init() { - // TODO initialize pod allocators here instead + // TODO initialize pod allocators here instead } - + private void insertListener(Integer event, ResourceListener listener) { List lst = _lifeCycleListeners.get(event); if (lst == null) { @@ -510,10 +510,10 @@ public class ResourceManagerImpl implements ResourceManager, ResourceService, Ma @Override public Discoverer getMatchingDiscover(Hypervisor.HypervisorType hypervisorType) { - for(Discoverer discoverer : _discoverers) { + for(Discoverer discoverer : _discoverers) { if (discoverer.getHypervisorType() == hypervisorType) return discoverer; - } + } return null; } @@ -1629,7 +1629,7 @@ public class ResourceManagerImpl implements ResourceManager, ResourceService, Ma } } } - + if (s_logger.isDebugEnabled()) { new Request(-1l, -1l, cmds, true, false).logD("Startup request from directly connected host: ", true); } @@ -1670,7 +1670,7 @@ public class ResourceManagerImpl implements ResourceManager, ResourceService, Ma } } } - + if (tempHost != null) { /* Change agent status to Alert */ _agentMgr.agentStatusTransitTo(tempHost, Status.Event.AgentDisconnected, _nodeId); @@ -2179,7 +2179,7 @@ public class ResourceManagerImpl implements ResourceManager, ResourceService, Ma @Override public Pair findPod(VirtualMachineTemplate template, ServiceOfferingVO offering, DataCenterVO dc, long accountId, Set avoids) { - for(PodAllocator allocator : _podAllocators) { + for(PodAllocator allocator : _podAllocators) { final Pair pod = allocator.allocateTo(template, offering, dc, accountId, avoids); if (pod != null) { return pod; diff --git a/server/src/com/cloud/server/CloudStackComponentComposer.java b/server/src/com/cloud/server/CloudStackComponentComposer.java new file mode 100644 index 00000000000..ae063259a53 --- /dev/null +++ b/server/src/com/cloud/server/CloudStackComponentComposer.java @@ -0,0 +1,184 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.server; + +import java.util.ArrayList; +import java.util.List; + +import javax.annotation.PostConstruct; +import javax.inject.Inject; + +import org.springframework.stereotype.Component; + +import com.cloud.agent.AgentManager; +import com.cloud.alert.AlertManagerImpl; +import com.cloud.api.query.QueryManagerImpl; +import com.cloud.async.AsyncJobManager; +import com.cloud.async.SyncQueueManager; +import com.cloud.capacity.CapacityManagerImpl; +import com.cloud.cluster.ClusterFenceManagerImpl; +import com.cloud.cluster.ClusterManagerImpl; +import com.cloud.configuration.ConfigurationManager; +import com.cloud.consoleproxy.ConsoleProxyManager; +import com.cloud.dao.EntityManagerImpl; +import com.cloud.ha.HighAvailabilityManager; +import com.cloud.hypervisor.HypervisorGuruManagerImpl; +import com.cloud.keystore.KeystoreManager; +import com.cloud.maint.UpgradeManagerImpl; +import com.cloud.network.ExternalLoadBalancerUsageManager; +import com.cloud.network.NetworkManagerImpl; +import com.cloud.network.StorageNetworkManager; +import com.cloud.network.as.AutoScaleManagerImpl; +import com.cloud.network.firewall.FirewallManagerImpl; +import com.cloud.network.lb.LoadBalancingRulesManagerImpl; +import com.cloud.network.router.VpcVirtualNetworkApplianceManager; +import com.cloud.network.rules.RulesManagerImpl; +import com.cloud.network.security.SecurityGroupManagerImpl2; +import com.cloud.network.vpc.NetworkACLManagerImpl; +import com.cloud.network.vpc.VpcManagerImpl; +import com.cloud.network.vpn.RemoteAccessVpnManagerImpl; +import com.cloud.network.vpn.Site2SiteVpnManagerImpl; +import com.cloud.projects.ProjectManagerImpl; +import com.cloud.resource.ResourceManagerImpl; +import com.cloud.resourcelimit.ResourceLimitManagerImpl; +import com.cloud.storage.OCFS2Manager; +import com.cloud.storage.StorageManagerImpl; +import com.cloud.storage.download.DownloadMonitor; +import com.cloud.storage.s3.S3Manager; +import com.cloud.storage.secondary.SecondaryStorageManagerImpl; +import com.cloud.storage.snapshot.SnapshotManagerImpl; +import com.cloud.storage.snapshot.SnapshotSchedulerImpl; +import com.cloud.storage.swift.SwiftManager; +import com.cloud.storage.upload.UploadMonitor; +import com.cloud.tags.TaggedResourceManagerImpl; +import com.cloud.template.TemplateManagerImpl; +import com.cloud.user.AccountManagerImpl; +import com.cloud.user.DomainManagerImpl; +import com.cloud.utils.component.Manager; +import com.cloud.vm.UserVmManagerImpl; +import com.cloud.vm.VirtualMachineManager; + +@Component +public class CloudStackComponentComposer { + // @Inject CheckPointManagerImpl _checkPointMgr; + @Inject ClusterManagerImpl _clusterMgr; + @Inject ClusterFenceManagerImpl _clusterFenceMgr; + @Inject AgentManager _AgentMgr; + @Inject SyncQueueManager _sycnQueueMgr; + @Inject AsyncJobManager _jobMgr; + @Inject ConfigurationManager _confMgr; + @Inject AccountManagerImpl _accountMgr; + @Inject DomainManagerImpl _domainMgr; + @Inject ResourceLimitManagerImpl _resLimitMgr; + @Inject NetworkManagerImpl _networkMgr; + @Inject DownloadMonitor _downloadMonitor; + @Inject UploadMonitor _uploadMonitor; + @Inject KeystoreManager _ksMgr; + @Inject SecondaryStorageManagerImpl _ssMgr; + @Inject UserVmManagerImpl _userVmMgr; + @Inject UpgradeManagerImpl _upgradeMgr; + @Inject StorageManagerImpl _storageMgr; + @Inject AlertManagerImpl _alertMgr; + @Inject TemplateManagerImpl _tmplMgr; + @Inject SnapshotManagerImpl _snpahsotMgr; + @Inject SnapshotSchedulerImpl _snapshotScheduleMgr; + @Inject SecurityGroupManagerImpl2 _sgMgr; + @Inject EntityManagerImpl _entityMgr; + @Inject LoadBalancingRulesManagerImpl _lbRuleMgr; + @Inject AutoScaleManagerImpl _asMgr; + @Inject RulesManagerImpl _rulesMgr; + @Inject RemoteAccessVpnManagerImpl _acVpnMgr; + @Inject CapacityManagerImpl _capacityMgr; + @Inject VirtualMachineManager _vmMgr; + @Inject HypervisorGuruManagerImpl _hvGuruMgr; + @Inject ResourceManagerImpl _resMgr; + @Inject OCFS2Manager _ocfsMgr; + @Inject FirewallManagerImpl _fwMgr; + @Inject ConsoleProxyManager _cpMgr; + @Inject ProjectManagerImpl _prjMgr; + @Inject SwiftManager _swiftMgr; + @Inject S3Manager _s3Mgr; + @Inject StorageNetworkManager _storageNetworkMgr; + @Inject ExternalLoadBalancerUsageManager _extlbUsageMgr; + @Inject HighAvailabilityManager _haMgr; + @Inject VpcManagerImpl _vpcMgr; + @Inject VpcVirtualNetworkApplianceManager _vpcNetApplianceMgr; + @Inject NetworkACLManagerImpl _networkAclMgr; + @Inject TaggedResourceManagerImpl _taggedResMgr; + @Inject Site2SiteVpnManagerImpl _s2sVpnMgr; + @Inject QueryManagerImpl _queryMgr; + + List _managers = new ArrayList(); + + public CloudStackComponentComposer() { + } + + @PostConstruct + void init() { + // _managers.add(_checkPointMgr); + _managers.add(_clusterMgr); + _managers.add(_clusterFenceMgr); + _managers.add(_AgentMgr); + _managers.add(_sycnQueueMgr); + _managers.add(_jobMgr); + _managers.add(_confMgr); + _managers.add(_accountMgr); + _managers.add(_domainMgr); + _managers.add(_resLimitMgr); + _managers.add(_networkMgr); + _managers.add(_downloadMonitor); + _managers.add(_uploadMonitor); + _managers.add(_ksMgr); + _managers.add(_ssMgr); + _managers.add(_userVmMgr); + _managers.add(_upgradeMgr); + _managers.add(_storageMgr); + _managers.add(_alertMgr); + _managers.add(_tmplMgr); + _managers.add(_snpahsotMgr); + _managers.add(_snapshotScheduleMgr); + _managers.add(_sgMgr); + _managers.add(_entityMgr); + _managers.add(_lbRuleMgr); + _managers.add(_asMgr); + _managers.add(_rulesMgr); + _managers.add(_acVpnMgr); + _managers.add(_capacityMgr); + _managers.add(_vmMgr); + _managers.add(_hvGuruMgr); + _managers.add(_resMgr); + _managers.add(_ocfsMgr); + _managers.add(_fwMgr); + _managers.add(_cpMgr); + _managers.add(_prjMgr); + _managers.add(_swiftMgr); + _managers.add(_s3Mgr); + _managers.add(_storageNetworkMgr); + _managers.add(_extlbUsageMgr); + _managers.add(_haMgr); + _managers.add(_vpcMgr); + _managers.add(_vpcNetApplianceMgr); + _managers.add(_networkAclMgr); + _managers.add(_taggedResMgr); + _managers.add(_s2sVpnMgr); + _managers.add(_queryMgr); + } + + public List getManagers() { + return _managers; + } +} diff --git a/server/src/com/cloud/server/ConfigurationServerImpl.java b/server/src/com/cloud/server/ConfigurationServerImpl.java index 4b4d8df3ef6..77de9bb2beb 100755 --- a/server/src/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/com/cloud/server/ConfigurationServerImpl.java @@ -287,7 +287,7 @@ public class ConfigurationServerImpl implements ConfigurationServer { _identityDao.initializeDefaultUuid("user_ip_address"); _identityDao.initializeDefaultUuid("counter"); } - */ + */ private String getMountParent() { return getEnvironmentProperty("mount.parent"); diff --git a/server/src/com/cloud/server/ManagementServerExtImpl.java b/server/src/com/cloud/server/ManagementServerExtImpl.java index 03e50ebbe69..ed05395d45c 100644 --- a/server/src/com/cloud/server/ManagementServerExtImpl.java +++ b/server/src/com/cloud/server/ManagementServerExtImpl.java @@ -32,8 +32,8 @@ import com.cloud.domain.dao.DomainDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.projects.Project; +import com.cloud.utils.PropertiesUtil; import org.apache.cloudstack.api.response.UsageTypeResponse; -import org.springframework.stereotype.Component; import com.cloud.usage.UsageJobVO; import com.cloud.usage.UsageTypes; @@ -48,7 +48,6 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; -@Component public class ManagementServerExtImpl extends ManagementServerImpl implements ManagementServerExt { @Inject private AccountDao _accountDao; @Inject private DomainDao _domainDao; @@ -209,8 +208,9 @@ public class ManagementServerExtImpl extends ManagementServerImpl implements Man } @Override - public String[] getPropertiesFiles() { - return new String[] { "commands.properties", "commands-ext.properties" }; + public Map getProperties() { + return PropertiesUtil.processConfigFile(new String[] + { "commands.properties", "commands-ext.properties" }); } private Date computeAdjustedTime(Date initialDate, TimeZone targetTZ, boolean adjustToDayStart) { diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 60bab003d9e..65501e3b416 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -58,7 +58,6 @@ import org.apache.cloudstack.api.command.admin.pod.ListPodsByCmd; import org.apache.cloudstack.api.command.admin.resource.ListAlertsCmd; import org.apache.cloudstack.api.command.admin.resource.ListCapacityCmd; import org.apache.cloudstack.api.command.admin.resource.UploadCustomCertificateCmd; -import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd; import org.apache.cloudstack.api.command.admin.systemvm.DestroySystemVmCmd; import org.apache.cloudstack.api.command.admin.systemvm.ListSystemVMsCmd; import org.apache.cloudstack.api.command.admin.systemvm.RebootSystemVmCmd; @@ -86,7 +85,6 @@ import org.apache.cloudstack.api.command.user.zone.ListZonesByCmd; import org.apache.cloudstack.api.response.ExtractResponse; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.agent.api.GetVncPortAnswer; @@ -186,7 +184,6 @@ import com.cloud.storage.GuestOsCategory; import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageManager; -import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolVO; import com.cloud.storage.Upload; import com.cloud.storage.Upload.Mode; @@ -223,6 +220,7 @@ import com.cloud.utils.EnumUtils; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.PasswordGenerator; +import com.cloud.utils.PropertiesUtil; import com.cloud.utils.Ternary; import com.cloud.utils.component.Adapter; import com.cloud.utils.component.ComponentContext; @@ -381,8 +379,14 @@ public class ManagementServerImpl implements ManagementServer { S3Manager _s3Mgr; @Inject - ComponentContext _placeholder; // create a dependency to ComponentContext so that it can be loaded beforehead - + ComponentContext _forceContextRef; // create a dependency to ComponentContext so that it can be loaded beforehead + + @Inject + EventUtils _forceEventUtilsRef; + + @Inject + CloudStackComponentComposer _componentRegistry; + private final ScheduledExecutorService _eventExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("EventChecker")); private KeystoreManager _ksMgr; @@ -393,7 +397,7 @@ public class ManagementServerImpl implements ManagementServer { @Inject List _userAuthenticators; private String _hashKey = null; - + public ManagementServerImpl() { } @@ -448,12 +452,11 @@ public class ManagementServerImpl implements ManagementServer { Map daos = ComponentContext.getApplicationContext().getBeansOfType( GenericDaoBase.class); + Map params = new HashMap(); for (GenericDaoBase dao : daos.values()) { try { s_logger.info("Starting dao " + ComponentContext.getTargetClass(dao).getName()); - - // TODO - // dao.configure(dao.getClass().getSimpleName(), params); + dao.configure(dao.getClass().getSimpleName(), params); } catch (Exception e) { s_logger.error("Problems with running checker:" + ComponentContext.getTargetClass(dao).getName(), e); System.exit(1); @@ -462,12 +465,8 @@ public class ManagementServerImpl implements ManagementServer { } private void startManagers() { - @SuppressWarnings("rawtypes") - Map managers = ComponentContext.getApplicationContext().getBeansOfType( - Manager.class); - - Map params = new HashMap(); - for(Manager manager : managers.values()) { + Map params = new HashMap(); + for(Manager manager : _componentRegistry.getManagers()) { s_logger.info("Start manager: " + ComponentContext.getTargetClass(manager).getName() + "..."); try { if(!manager.configure(manager.getClass().getSimpleName(), params)) { @@ -2392,8 +2391,9 @@ public class ManagementServerImpl implements ManagementServer { } @Override - public String[] getPropertiesFiles() { - return new String[] { "commands.properties" }; + public Map getProperties() { + return PropertiesUtil.processConfigFile(new String[] + { "commands.properties" }); } protected class EventPurgeTask implements Runnable { diff --git a/server/src/com/cloud/servlet/CloudStartupServlet.java b/server/src/com/cloud/servlet/CloudStartupServlet.java index de133abb16e..bbcf3535300 100755 --- a/server/src/com/cloud/servlet/CloudStartupServlet.java +++ b/server/src/com/cloud/servlet/CloudStartupServlet.java @@ -27,7 +27,6 @@ import org.apache.log4j.Logger; import org.apache.log4j.PropertyConfigurator; import org.apache.log4j.xml.DOMConfigurator; -import com.cloud.api.ApiServer; import com.cloud.exception.InvalidParameterValueException; import com.cloud.server.ConfigurationServer; import com.cloud.server.ManagementServer; @@ -42,17 +41,14 @@ public class CloudStartupServlet extends HttpServlet implements ServletContextLi @Override public void init() throws ServletException { - initLog4j(); - - // Save Configuration Values - ConfigurationServer c = ComponentContext.getCompanent(ConfigurationServer.class); + initLog4j(); + ConfigurationServer c = (ConfigurationServer)ComponentContext.getComponent(ConfigurationServer.Name); try { c.persistDefaultValues(); - - ManagementServer ms = ComponentContext.getCompanent(ManagementServer.class); + ManagementServer ms = (ManagementServer)ComponentContext.getComponent(ManagementServer.Name); ms.startup(); ms.enableAdminUser("password"); - ApiServer.initApiServer(ms.getPropertiesFiles()); + //ApiServer.initApiServer(); } catch (InvalidParameterValueException ipve) { s_logger.error("Exception starting management server ", ipve); throw new ServletException (ipve.getMessage()); @@ -75,18 +71,18 @@ public class CloudStartupServlet extends HttpServlet implements ServletContextLi @Override public void contextDestroyed(ServletContextEvent sce) { } - + private void initLog4j() { - File file = PropertiesUtil.findConfigFile("log4j-cloud.xml"); - if (file != null) { - s_logger.info("log4j configuration found at " + file.getAbsolutePath()); - DOMConfigurator.configureAndWatch(file.getAbsolutePath()); - } else { - file = PropertiesUtil.findConfigFile("log4j-cloud.properties"); - if (file != null) { - s_logger.info("log4j configuration found at " + file.getAbsolutePath()); - PropertyConfigurator.configureAndWatch(file.getAbsolutePath()); - } - } - } + File file = PropertiesUtil.findConfigFile("log4j-cloud.xml"); + if (file != null) { + s_logger.info("log4j configuration found at " + file.getAbsolutePath()); + DOMConfigurator.configureAndWatch(file.getAbsolutePath()); + } else { + file = PropertiesUtil.findConfigFile("log4j-cloud.properties"); + if (file != null) { + s_logger.info("log4j configuration found at " + file.getAbsolutePath()); + PropertyConfigurator.configureAndWatch(file.getAbsolutePath()); + } + } + } } diff --git a/server/src/com/cloud/servlet/ConsoleProxyServlet.java b/server/src/com/cloud/servlet/ConsoleProxyServlet.java index afa5c407316..6a47ba28642 100644 --- a/server/src/com/cloud/servlet/ConsoleProxyServlet.java +++ b/server/src/com/cloud/servlet/ConsoleProxyServlet.java @@ -25,17 +25,20 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; import javax.crypto.Mac; import javax.crypto.spec.SecretKeySpec; +import javax.inject.Inject; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; +import org.apache.cloudstack.api.IdentityService; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; -import org.apache.cloudstack.api.IdentityService; import com.cloud.exception.PermissionDeniedException; import com.cloud.host.HostVO; import com.cloud.server.ManagementServer; @@ -46,7 +49,6 @@ import com.cloud.user.User; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.Transaction; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -57,17 +59,27 @@ import com.cloud.vm.VirtualMachineManager; * Console access : /conosole?cmd=access&vm=xxx * Authentication : /console?cmd=auth&vm=xxx&sid=xxx */ +@Component("consoleServlet") public class ConsoleProxyServlet extends HttpServlet { private static final long serialVersionUID = -5515382620323808168L; public static final Logger s_logger = Logger.getLogger(ConsoleProxyServlet.class.getName()); private static final int DEFAULT_THUMBNAIL_WIDTH = 144; private static final int DEFAULT_THUMBNAIL_HEIGHT = 110; - private final static AccountManager _accountMgr = ComponentLocator.getLocator(ManagementServer.Name).getManager(AccountManager.class); - private final static VirtualMachineManager _vmMgr = ComponentLocator.getLocator(ManagementServer.Name).getManager(VirtualMachineManager.class); - private final static ManagementServer _ms = (ManagementServer)ComponentLocator.getComponent(ManagementServer.Name); - private final static IdentityService _identityService = ComponentLocator.getLocator(ManagementServer.Name).getManager(IdentityService.class); + @Inject AccountManager _accountMgr; + @Inject VirtualMachineManager _vmMgr; + @Inject ManagementServer _ms; + @Inject IdentityService _identityService; + static ManagementServer s_ms; + public ConsoleProxyServlet() { + } + + @PostConstruct + void initComponent() { + s_ms = _ms; + } + @Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) { doGet(req, resp); @@ -398,7 +410,7 @@ public class ConsoleProxyServlet extends HttpServlet { long ts = normalizedHashTime.getTime(); ts = ts/60000; // round up to 1 minute - String secretKey = _ms.getHashKey(); + String secretKey = s_ms.getHashKey(); SecretKeySpec keySpec = new SecretKeySpec(secretKey.getBytes(), "HmacSHA1"); mac.init(keySpec); diff --git a/server/src/com/cloud/servlet/RegisterCompleteServlet.java b/server/src/com/cloud/servlet/RegisterCompleteServlet.java index 7755851a2d8..59224553b35 100644 --- a/server/src/com/cloud/servlet/RegisterCompleteServlet.java +++ b/server/src/com/cloud/servlet/RegisterCompleteServlet.java @@ -19,125 +19,111 @@ package com.cloud.servlet; import java.net.URLEncoder; import java.util.List; +import javax.inject.Inject; import javax.servlet.ServletContextEvent; import javax.servlet.ServletContextListener; -import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.configuration.Configuration; import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.server.ManagementServer; import com.cloud.user.Account; import com.cloud.user.AccountService; import com.cloud.user.User; import com.cloud.user.UserVO; import com.cloud.user.dao.UserDao; import com.cloud.utils.SerialVersionUID; -import com.cloud.utils.component.ComponentLocator; +@Component("registerCompleteServlet") public class RegisterCompleteServlet extends HttpServlet implements ServletContextListener { - public static final Logger s_logger = Logger.getLogger(RegisterCompleteServlet.class.getName()); - + public static final Logger s_logger = Logger.getLogger(RegisterCompleteServlet.class.getName()); + static final long serialVersionUID = SerialVersionUID.CloudStartupServlet; - - protected static AccountService _accountSvc = null; - protected static ConfigurationDao _configDao = null; - protected static UserDao _userDao = null; - - @Override - public void init() throws ServletException { - ComponentLocator locator = ComponentLocator.getLocator(ManagementServer.Name); - _accountSvc = locator.getManager(AccountService.class); - _configDao = locator.getDao(ConfigurationDao.class); - _userDao = locator.getDao(UserDao.class); - } - - @Override - public void contextInitialized(ServletContextEvent sce) { - try { - init(); - } catch (ServletException e) { - s_logger.error("Exception starting management server ", e); - throw new RuntimeException(e); - } - } - - @Override - public void contextDestroyed(ServletContextEvent sce) { - } - - @Override + + @Inject AccountService _accountSvc = null; + @Inject ConfigurationDao _configDao = null; + @Inject UserDao _userDao = null; + + @Override + public void contextInitialized(ServletContextEvent sce) { + } + + @Override + public void contextDestroyed(ServletContextEvent sce) { + } + + @Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) { - doGet(req, resp); - } - - @Override + doGet(req, resp); + } + + @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) { - String registrationToken = req.getParameter("token"); - String expires = req.getParameter("expires"); - int statusCode = HttpServletResponse.SC_OK; - String responseMessage = null; - - if (registrationToken == null || registrationToken.trim().length() == 0) { - statusCode = 503; - responseMessage = "{ \"registration_info\" : { \"errorcode\" : \"503\", \"errortext\" : \"Missing token\" } }"; - } else { - s_logger.info("Attempting to register user account with token = "+registrationToken); - User resourceAdminUser = _accountSvc.getActiveUserByRegistrationToken(registrationToken); - if (resourceAdminUser != null) { - if(resourceAdminUser.isRegistered()) { - statusCode = 503; - responseMessage = "{ \"registration_info\" : { \"errorcode\" : \"503\", \"errortext\" : \"Expired token = " + registrationToken + "\" } }"; - } else { - if(expires != null && expires.toLowerCase().equals("true")){ - _accountSvc.markUserRegistered(resourceAdminUser.getId()); - } - - Account resourceAdminAccount = _accountSvc.getActiveAccountById(resourceAdminUser.getAccountId()); - Account rsUserAccount = _accountSvc.getActiveAccountByName(resourceAdminAccount.getAccountName()+"-user", resourceAdminAccount.getDomainId()); - - List users = _userDao.listByAccount(rsUserAccount.getId()); - User rsUser = users.get(0); - - Configuration config = _configDao.findByName("endpointe.url"); - - StringBuffer sb = new StringBuffer(); - sb.append("{ \"registration_info\" : { \"endpoint_url\" : \""+encodeParam(config.getValue())+"\", "); - sb.append("\"domain_id\" : \""+resourceAdminAccount.getDomainId()+"\", "); - sb.append("\"admin_account\" : \""+encodeParam(resourceAdminUser.getUsername())+"\", "); - sb.append("\"admin_account_api_key\" : \""+resourceAdminUser.getApiKey()+"\", "); - sb.append("\"admin_account_secret_key\" : \""+resourceAdminUser.getSecretKey()+"\", "); - sb.append("\"user_account\" : \""+encodeParam(rsUser.getUsername())+"\", "); - sb.append("\"user_account_api_key\" : \""+rsUser.getApiKey()+"\", "); - sb.append("\"user_account_secret_key\" : \""+rsUser.getSecretKey()+"\" "); - sb.append("} }"); - responseMessage = sb.toString(); - } - } else { - statusCode = 503; - responseMessage = "{ \"registration_info\" : { \"errorcode\" : \"503\", \"errortext\" : \"Invalid token = " + registrationToken + "\" } }"; - } - } - - try { - resp.setContentType("text/javascript; charset=UTF-8"); - resp.setStatus(statusCode); - resp.getWriter().print(responseMessage); - } catch (Exception ex) { - s_logger.error("unknown exception writing register complete response", ex); + String registrationToken = req.getParameter("token"); + String expires = req.getParameter("expires"); + int statusCode = HttpServletResponse.SC_OK; + String responseMessage = null; + + if (registrationToken == null || registrationToken.trim().length() == 0) { + statusCode = 503; + responseMessage = "{ \"registration_info\" : { \"errorcode\" : \"503\", \"errortext\" : \"Missing token\" } }"; + } else { + s_logger.info("Attempting to register user account with token = "+registrationToken); + User resourceAdminUser = _accountSvc.getActiveUserByRegistrationToken(registrationToken); + if (resourceAdminUser != null) { + if(resourceAdminUser.isRegistered()) { + statusCode = 503; + responseMessage = "{ \"registration_info\" : { \"errorcode\" : \"503\", \"errortext\" : \"Expired token = " + registrationToken + "\" } }"; + } else { + if(expires != null && expires.toLowerCase().equals("true")){ + _accountSvc.markUserRegistered(resourceAdminUser.getId()); + } + + Account resourceAdminAccount = _accountSvc.getActiveAccountById(resourceAdminUser.getAccountId()); + Account rsUserAccount = _accountSvc.getActiveAccountByName(resourceAdminAccount.getAccountName()+"-user", resourceAdminAccount.getDomainId()); + + List users = _userDao.listByAccount(rsUserAccount.getId()); + User rsUser = users.get(0); + + Configuration config = _configDao.findByName("endpointe.url"); + + StringBuffer sb = new StringBuffer(); + sb.append("{ \"registration_info\" : { \"endpoint_url\" : \""+encodeParam(config.getValue())+"\", "); + sb.append("\"domain_id\" : \""+resourceAdminAccount.getDomainId()+"\", "); + sb.append("\"admin_account\" : \""+encodeParam(resourceAdminUser.getUsername())+"\", "); + sb.append("\"admin_account_api_key\" : \""+resourceAdminUser.getApiKey()+"\", "); + sb.append("\"admin_account_secret_key\" : \""+resourceAdminUser.getSecretKey()+"\", "); + sb.append("\"user_account\" : \""+encodeParam(rsUser.getUsername())+"\", "); + sb.append("\"user_account_api_key\" : \""+rsUser.getApiKey()+"\", "); + sb.append("\"user_account_secret_key\" : \""+rsUser.getSecretKey()+"\" "); + sb.append("} }"); + responseMessage = sb.toString(); + } + } else { + statusCode = 503; + responseMessage = "{ \"registration_info\" : { \"errorcode\" : \"503\", \"errortext\" : \"Invalid token = " + registrationToken + "\" } }"; + } } - } - - private String encodeParam(String value) { - try { - return URLEncoder.encode(value, "UTF-8").replaceAll("\\+", "%20"); - } catch (Exception e) { - s_logger.warn("Unable to encode: " + value); - } - return value; - } + + try { + resp.setContentType("text/javascript; charset=UTF-8"); + resp.setStatus(statusCode); + resp.getWriter().print(responseMessage); + } catch (Exception ex) { + s_logger.error("unknown exception writing register complete response", ex); + } + } + + private String encodeParam(String value) { + try { + return URLEncoder.encode(value, "UTF-8").replaceAll("\\+", "%20"); + } catch (Exception e) { + s_logger.warn("Unable to encode: " + value); + } + return value; + } } diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index ef36a829ccb..bff76b98da1 100755 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -28,7 +28,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Date; -import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -45,7 +44,10 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.api.command.admin.storage.*; +import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; +import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; +import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; import org.apache.log4j.Logger; @@ -76,14 +78,12 @@ import com.cloud.agent.api.to.VolumeTO; import com.cloud.agent.manager.Commands; import com.cloud.alert.AlertManager; import com.cloud.api.ApiDBUtils; -import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import com.cloud.async.AsyncJobManager; import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; import com.cloud.capacity.CapacityState; import com.cloud.capacity.CapacityVO; import com.cloud.capacity.dao.CapacityDao; -import com.cloud.cluster.CheckPointManager; import com.cloud.cluster.ClusterManagerListener; import com.cloud.cluster.ManagementServerHostVO; import com.cloud.configuration.Config; @@ -172,8 +172,7 @@ import com.cloud.utils.EnumUtils; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; -import com.cloud.utils.component.Adapters; -import com.cloud.utils.component.ComponentLocator; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.Manager; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; @@ -322,8 +321,6 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag @Inject protected ResourceManager _resourceMgr; @Inject - protected CheckPointManager _checkPointMgr; - @Inject protected DownloadMonitor _downloadMonitor; @Inject protected ResourceTagDao _resourceTagDao; @@ -355,14 +352,14 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag protected BigDecimal _overProvisioningFactor = new BigDecimal(1); private long _maxVolumeSizeInGb; private long _serverId; - private StateMachine2 _volStateMachine; + private final StateMachine2 _volStateMachine; private int _customDiskOfferingMinSize = 1; private int _customDiskOfferingMaxSize = 1024; private double _storageUsedThreshold = 1.0d; private double _storageAllocatedThreshold = 1.0d; protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1); - private boolean _recreateSystemVmEnabled; + private boolean _recreateSystemVmEnabled; public boolean share(VMInstanceVO vm, List vols, HostVO host, boolean cancelPreviousShare) throws StorageUnavailableException { @@ -653,9 +650,9 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag Pair volumeDetails = createVolumeFromSnapshot(volume, snapshot); if (volumeDetails != null) { createdVolume = volumeDetails.first(); - UsageEventVO usageEvent = new UsageEventVO(EventTypes.EVENT_VOLUME_CREATE, createdVolume.getAccountId(), createdVolume.getDataCenterId(), createdVolume.getId(), createdVolume.getName(), - createdVolume.getDiskOfferingId(), null, createdVolume.getSize()); - _usageEventDao.persist(usageEvent); + UsageEventVO usageEvent = new UsageEventVO(EventTypes.EVENT_VOLUME_CREATE, createdVolume.getAccountId(), createdVolume.getDataCenterId(), createdVolume.getId(), createdVolume.getName(), + createdVolume.getDiskOfferingId(), null, createdVolume.getSize()); + _usageEventDao.persist(usageEvent); } return createdVolume; } @@ -739,32 +736,32 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag @DB public VolumeVO copyVolumeFromSecToPrimary(VolumeVO volume, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, Long clusterId, ServiceOfferingVO offering, DiskOfferingVO diskOffering, List avoids, long size, HypervisorType hyperType) throws NoTransitionException { - - final HashSet avoidPools = new HashSet(avoids); - DiskProfile dskCh = createDiskCharacteristics(volume, template, dc, diskOffering); - dskCh.setHyperType(vm.getHypervisorType()); - // Find a suitable storage to create volume on - StoragePoolVO destPool = findStoragePool(dskCh, dc, pod, clusterId, null, vm, avoidPools); - - // Copy the volume from secondary storage to the destination storage pool - stateTransitTo(volume, Event.CopyRequested); - VolumeHostVO volumeHostVO = _volumeHostDao.findByVolumeId(volume.getId()); - HostVO secStorage = _hostDao.findById(volumeHostVO.getHostId()); - String secondaryStorageURL = secStorage.getStorageUrl(); - String[] volumePath = volumeHostVO.getInstallPath().split("/"); - String volumeUUID = volumePath[volumePath.length - 1].split("\\.")[0]; - + + final HashSet avoidPools = new HashSet(avoids); + DiskProfile dskCh = createDiskCharacteristics(volume, template, dc, diskOffering); + dskCh.setHyperType(vm.getHypervisorType()); + // Find a suitable storage to create volume on + StoragePoolVO destPool = findStoragePool(dskCh, dc, pod, clusterId, null, vm, avoidPools); + + // Copy the volume from secondary storage to the destination storage pool + stateTransitTo(volume, Event.CopyRequested); + VolumeHostVO volumeHostVO = _volumeHostDao.findByVolumeId(volume.getId()); + HostVO secStorage = _hostDao.findById(volumeHostVO.getHostId()); + String secondaryStorageURL = secStorage.getStorageUrl(); + String[] volumePath = volumeHostVO.getInstallPath().split("/"); + String volumeUUID = volumePath[volumePath.length - 1].split("\\.")[0]; + CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), volumeUUID, destPool, secondaryStorageURL, false, _copyvolumewait); CopyVolumeAnswer cvAnswer; - try { + try { cvAnswer = (CopyVolumeAnswer) sendToPool(destPool, cvCmd); } catch (StorageUnavailableException e1) { - stateTransitTo(volume, Event.CopyFailed); + stateTransitTo(volume, Event.CopyFailed); throw new CloudRuntimeException("Failed to copy the volume from secondary storage to the destination primary storage pool."); } if (cvAnswer == null || !cvAnswer.getResult()) { - stateTransitTo(volume, Event.CopyFailed); + stateTransitTo(volume, Event.CopyFailed); throw new CloudRuntimeException("Failed to copy the volume from secondary storage to the destination primary storage pool."); } Transaction txn = Transaction.currentTxn(); @@ -778,11 +775,11 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag UsageEventVO usageEvent = new UsageEventVO(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), volume.getDiskOfferingId(), null, volume.getSize()); _usageEventDao.persist(usageEvent); _volumeHostDao.remove(volumeHostVO.getId()); - txn.commit(); - return volume; - + txn.commit(); + return volume; + } - + @Override @DB public VolumeVO createVolume(VolumeVO volume, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, Long clusterId, ServiceOfferingVO offering, DiskOfferingVO diskOffering, @@ -848,11 +845,11 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag String fullTmpltUrl = tmpltHostUrl + "/" + tmpltHostOn.getInstallPath(); cmd = new CreateCommand(dskCh, fullTmpltUrl, new StorageFilerTO(pool)); } else { - tmpltStoredOn = _tmpltMgr.prepareTemplateForCreate(template, pool); - if (tmpltStoredOn == null) { - continue; - } - cmd = new CreateCommand(dskCh, tmpltStoredOn.getLocalDownloadPath(), new StorageFilerTO(pool)); + tmpltStoredOn = _tmpltMgr.prepareTemplateForCreate(template, pool); + if (tmpltStoredOn == null) { + continue; + } + cmd = new CreateCommand(dskCh, tmpltStoredOn.getLocalDownloadPath(), new StorageFilerTO(pool)); } } else { if (volume.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO == template.getFormat()) { @@ -969,7 +966,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag value = _configDao.getValue(Config.RecreateSystemVmEnabled.key()); _recreateSystemVmEnabled = Boolean.parseBoolean(value); - + value = _configDao.getValue(Config.StorageTemplateCleanupEnabled.key()); _templateCleanupEnabled = (value == null ? true : Boolean.parseBoolean(value)); @@ -995,7 +992,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag int wrks = NumbersUtil.parseInt(workers, 10); _executor = Executors.newScheduledThreadPool(wrks, new NamedThreadFactory("StorageManager-Scavenger")); - _agentMgr.registerForHostEvents(ComponentLocator.inject(LocalStoragePoolListener.class), true, false, false); + _agentMgr.registerForHostEvents(ComponentContext.inject(LocalStoragePoolListener.class), true, false, false); String maxVolumeSizeInGbString = _configDao.getValue("storage.max.volume.size"); _maxVolumeSizeInGb = NumbersUtil.parseLong(maxVolumeSizeInGbString, 2000); @@ -1536,10 +1533,10 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag // If it does , then you cannot delete the pool if (vlms.first() > 0) { throw new CloudRuntimeException("Cannot delete pool " + sPool.getName() + " as there are associated vols" + - " for this pool"); + " for this pool"); } } - + // First get the host_id from storage_pool_host_ref for given pool id StoragePoolVO lock = _storagePoolDao.acquireInLockTable(sPool.getId()); @@ -1733,7 +1730,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag return _volsDao.findById(volume.getId()); } - + /* * Upload the volume to secondary storage. * @@ -1742,19 +1739,19 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag @DB @ActionEvent(eventType = EventTypes.EVENT_VOLUME_UPLOAD, eventDescription = "uploading volume", async = true) public VolumeVO uploadVolume(UploadVolumeCmd cmd) throws ResourceAllocationException{ - Account caller = UserContext.current().getCaller(); + Account caller = UserContext.current().getCaller(); long ownerId = cmd.getEntityOwnerId(); Long zoneId = cmd.getZoneId(); String volumeName = cmd.getVolumeName(); String url = cmd.getUrl(); String format = cmd.getFormat(); - - validateVolume(caller, ownerId, zoneId, volumeName, url, format); - VolumeVO volume = persistVolume(caller, ownerId, zoneId, volumeName, url, cmd.getFormat()); - _downloadMonitor.downloadVolumeToStorage(volume, zoneId, url, cmd.getChecksum(), ImageFormat.valueOf(format.toUpperCase())); - return volume; + + validateVolume(caller, ownerId, zoneId, volumeName, url, format); + VolumeVO volume = persistVolume(caller, ownerId, zoneId, volumeName, url, cmd.getFormat()); + _downloadMonitor.downloadVolumeToStorage(volume, zoneId, url, cmd.getChecksum(), ImageFormat.valueOf(format.toUpperCase())); + return volume; } - + private boolean validateVolume(Account caller, long ownerId, Long zoneId, String volumeName, String url, String format) throws ResourceAllocationException{ // permission check @@ -1762,7 +1759,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag // Check that the resource limit for volumes won't be exceeded _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), ResourceType.volume); - + // Verify that zone exists DataCenterVO zone = _dcDao.findById(zoneId); @@ -1774,75 +1771,75 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) { throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); } - - if (url.toLowerCase().contains("file://")) { - throw new InvalidParameterValueException("File:// type urls are currently unsupported"); - } - - ImageFormat imgfmt = ImageFormat.valueOf(format.toUpperCase()); - if (imgfmt == null) { - throw new IllegalArgumentException("Image format is incorrect " + format + ". Supported formats are " + EnumUtils.listValues(ImageFormat.values())); - } - + + if (url.toLowerCase().contains("file://")) { + throw new InvalidParameterValueException("File:// type urls are currently unsupported"); + } + + ImageFormat imgfmt = ImageFormat.valueOf(format.toUpperCase()); + if (imgfmt == null) { + throw new IllegalArgumentException("Image format is incorrect " + format + ". Supported formats are " + EnumUtils.listValues(ImageFormat.values())); + } + String userSpecifiedName = volumeName; if (userSpecifiedName == null) { userSpecifiedName = getRandomVolumeName(); } - if((!url.toLowerCase().endsWith("vhd"))&&(!url.toLowerCase().endsWith("vhd.zip")) - &&(!url.toLowerCase().endsWith("vhd.bz2"))&&(!url.toLowerCase().endsWith("vhd.gz")) - &&(!url.toLowerCase().endsWith("qcow2"))&&(!url.toLowerCase().endsWith("qcow2.zip")) - &&(!url.toLowerCase().endsWith("qcow2.bz2"))&&(!url.toLowerCase().endsWith("qcow2.gz")) - &&(!url.toLowerCase().endsWith("ova"))&&(!url.toLowerCase().endsWith("ova.zip")) - &&(!url.toLowerCase().endsWith("ova.bz2"))&&(!url.toLowerCase().endsWith("ova.gz")) - &&(!url.toLowerCase().endsWith("img"))&&(!url.toLowerCase().endsWith("raw"))){ - throw new InvalidParameterValueException("Please specify a valid " + format.toLowerCase()); - } - - if ((format.equalsIgnoreCase("vhd") && (!url.toLowerCase().endsWith(".vhd") && !url.toLowerCase().endsWith("vhd.zip") && !url.toLowerCase().endsWith("vhd.bz2") && !url.toLowerCase().endsWith("vhd.gz") )) - || (format.equalsIgnoreCase("qcow2") && (!url.toLowerCase().endsWith(".qcow2") && !url.toLowerCase().endsWith("qcow2.zip") && !url.toLowerCase().endsWith("qcow2.bz2") && !url.toLowerCase().endsWith("qcow2.gz") )) - || (format.equalsIgnoreCase("ova") && (!url.toLowerCase().endsWith(".ova") && !url.toLowerCase().endsWith("ova.zip") && !url.toLowerCase().endsWith("ova.bz2") && !url.toLowerCase().endsWith("ova.gz"))) - || (format.equalsIgnoreCase("raw") && (!url.toLowerCase().endsWith(".img") && !url.toLowerCase().endsWith("raw")))) { - throw new InvalidParameterValueException("Please specify a valid URL. URL:" + url + " is an invalid for the format " + format.toLowerCase()); - } - validateUrl(url); - - return false; - } - - private String validateUrl(String url){ - try { - URI uri = new URI(url); - if ((uri.getScheme() == null) || (!uri.getScheme().equalsIgnoreCase("http") - && !uri.getScheme().equalsIgnoreCase("https") && !uri.getScheme().equalsIgnoreCase("file"))) { - throw new IllegalArgumentException("Unsupported scheme for url: " + url); - } + if((!url.toLowerCase().endsWith("vhd"))&&(!url.toLowerCase().endsWith("vhd.zip")) + &&(!url.toLowerCase().endsWith("vhd.bz2"))&&(!url.toLowerCase().endsWith("vhd.gz")) + &&(!url.toLowerCase().endsWith("qcow2"))&&(!url.toLowerCase().endsWith("qcow2.zip")) + &&(!url.toLowerCase().endsWith("qcow2.bz2"))&&(!url.toLowerCase().endsWith("qcow2.gz")) + &&(!url.toLowerCase().endsWith("ova"))&&(!url.toLowerCase().endsWith("ova.zip")) + &&(!url.toLowerCase().endsWith("ova.bz2"))&&(!url.toLowerCase().endsWith("ova.gz")) + &&(!url.toLowerCase().endsWith("img"))&&(!url.toLowerCase().endsWith("raw"))){ + throw new InvalidParameterValueException("Please specify a valid " + format.toLowerCase()); + } - int port = uri.getPort(); - if (!(port == 80 || port == 443 || port == -1)) { - throw new IllegalArgumentException("Only ports 80 and 443 are allowed"); - } - String host = uri.getHost(); - try { - InetAddress hostAddr = InetAddress.getByName(host); - if (hostAddr.isAnyLocalAddress() || hostAddr.isLinkLocalAddress() || hostAddr.isLoopbackAddress() || hostAddr.isMulticastAddress()) { - throw new IllegalArgumentException("Illegal host specified in url"); - } - if (hostAddr instanceof Inet6Address) { - throw new IllegalArgumentException("IPV6 addresses not supported (" + hostAddr.getHostAddress() + ")"); - } - } catch (UnknownHostException uhe) { - throw new IllegalArgumentException("Unable to resolve " + host); - } - - return uri.toString(); - } catch (URISyntaxException e) { - throw new IllegalArgumentException("Invalid URL " + url); - } - + if ((format.equalsIgnoreCase("vhd") && (!url.toLowerCase().endsWith(".vhd") && !url.toLowerCase().endsWith("vhd.zip") && !url.toLowerCase().endsWith("vhd.bz2") && !url.toLowerCase().endsWith("vhd.gz") )) + || (format.equalsIgnoreCase("qcow2") && (!url.toLowerCase().endsWith(".qcow2") && !url.toLowerCase().endsWith("qcow2.zip") && !url.toLowerCase().endsWith("qcow2.bz2") && !url.toLowerCase().endsWith("qcow2.gz") )) + || (format.equalsIgnoreCase("ova") && (!url.toLowerCase().endsWith(".ova") && !url.toLowerCase().endsWith("ova.zip") && !url.toLowerCase().endsWith("ova.bz2") && !url.toLowerCase().endsWith("ova.gz"))) + || (format.equalsIgnoreCase("raw") && (!url.toLowerCase().endsWith(".img") && !url.toLowerCase().endsWith("raw")))) { + throw new InvalidParameterValueException("Please specify a valid URL. URL:" + url + " is an invalid for the format " + format.toLowerCase()); + } + validateUrl(url); + + return false; } - + + private String validateUrl(String url){ + try { + URI uri = new URI(url); + if ((uri.getScheme() == null) || (!uri.getScheme().equalsIgnoreCase("http") + && !uri.getScheme().equalsIgnoreCase("https") && !uri.getScheme().equalsIgnoreCase("file"))) { + throw new IllegalArgumentException("Unsupported scheme for url: " + url); + } + + int port = uri.getPort(); + if (!(port == 80 || port == 443 || port == -1)) { + throw new IllegalArgumentException("Only ports 80 and 443 are allowed"); + } + String host = uri.getHost(); + try { + InetAddress hostAddr = InetAddress.getByName(host); + if (hostAddr.isAnyLocalAddress() || hostAddr.isLinkLocalAddress() || hostAddr.isLoopbackAddress() || hostAddr.isMulticastAddress()) { + throw new IllegalArgumentException("Illegal host specified in url"); + } + if (hostAddr instanceof Inet6Address) { + throw new IllegalArgumentException("IPV6 addresses not supported (" + hostAddr.getHostAddress() + ")"); + } + } catch (UnknownHostException uhe) { + throw new IllegalArgumentException("Unable to resolve " + host); + } + + return uri.toString(); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("Invalid URL " + url); + } + + } + private VolumeVO persistVolume(Account caller, long ownerId, Long zoneId, String volumeName, String url, String format) { - + Transaction txn = Transaction.currentTxn(); txn.start(); @@ -1861,21 +1858,21 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag volume = _volsDao.persist(volume); try { - stateTransitTo(volume, Event.UploadRequested); - } catch (NoTransitionException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } + stateTransitTo(volume, Event.UploadRequested); + } catch (NoTransitionException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } UserContext.current().setEventDetails("Volume Id: " + volume.getId()); // Increment resource count during allocation; if actual creation fails, decrement it _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.volume); txn.commit(); - return volume; - } - - + return volume; + } + + /* * Just allocate a volume in the database, don't send the createvolume cmd to hypervisor. The volume will be finally * created @@ -2049,9 +2046,9 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag volume = _volsDao.persist(volume); if(cmd.getSnapshotId() == null){ - //for volume created from snapshot, create usage event after volume creation - UsageEventVO usageEvent = new UsageEventVO(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), diskOfferingId, null, size); - _usageEventDao.persist(usageEvent); + //for volume created from snapshot, create usage event after volume creation + UsageEventVO usageEvent = new UsageEventVO(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), diskOfferingId, null, size); + _usageEventDao.persist(usageEvent); } UserContext.current().setEventDetails("Volume Id: " + volume.getId()); @@ -2161,7 +2158,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag if (capacities.size() == 0) { CapacityVO capacity = new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), storagePool.getClusterId(), allocated, totalOverProvCapacity, capacityType); CapacityState capacityState = _configMgr.findClusterAllocationState(ApiDBUtils.findClusterById(storagePool.getClusterId())) == AllocationState.Disabled ? - CapacityState.Disabled : CapacityState.Enabled; + CapacityState.Disabled : CapacityState.Enabled; capacity.setCapacityState(capacityState); _capacityDao.persist(capacity); } else { @@ -2182,7 +2179,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag s_logger.debug("Successfully set Capacity - " + totalOverProvCapacity + " for capacity type - " + capacityType + " , DataCenterId - " + storagePool.getDataCenterId() + ", HostOrPoolId - " + storagePool.getId() + ", PodId " + storagePool.getPodId()); } - + @Override public List getUpHostsInPool(long poolId) { SearchCriteria sc = UpHostsInPoolSearch.create(); @@ -2283,7 +2280,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag s_logger.warn("Unable to destroy " + vol.getId(), e); } } - + // remove snapshots in Error state List snapshots = _snapshotDao.listAllByStatus(Snapshot.Status.Error); for (SnapshotVO snapshotVO : snapshots) { @@ -2293,7 +2290,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag s_logger.warn("Unable to destroy " + snapshotVO.getId(), e); } } - + } finally { scanLock.unlock(); } @@ -2432,7 +2429,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag s_logger.warn("problem cleaning up snapshots in secondary storage " + secondaryStorageHost, e2); } } - + //CleanUp volumes on Secondary Storage. for (HostVO secondaryStorageHost : secondaryStorageHosts) { try { @@ -2460,7 +2457,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag _volumeHostDao.remove(destroyedVolumeHostVO.getId()); } } - + }catch (Exception e2) { s_logger.warn("problem cleaning up volumes in secondary storage " + secondaryStorageHost, e2); } @@ -2894,12 +2891,12 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag // Check that volume is completely Uploaded if (volume.getState() == Volume.State.UploadOp){ - VolumeHostVO volumeHost = _volumeHostDao.findByVolumeId(volume.getId()); + VolumeHostVO volumeHost = _volumeHostDao.findByVolumeId(volume.getId()); if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS){ - throw new InvalidParameterValueException("Please specify a volume that is not uploading"); + throw new InvalidParameterValueException("Please specify a volume that is not uploading"); } } - + // Check that the volume is not already destroyed if (volume.getState() != Volume.State.Destroy) { if (!destroyVolume(volume)) { @@ -3109,7 +3106,6 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag volIds.add(volume.getId()); } - checkPointTaskId = _checkPointMgr.pushCheckPoint(new StorageMigrationCleanupMaid(StorageMigrationCleanupMaid.StorageMigrationState.MIGRATING, volIds)); transitResult = true; } finally { if (!transitResult) { @@ -3166,7 +3162,6 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag s_logger.debug("Failed to change volume state: " + e.toString()); } } - _checkPointMgr.popCheckPoint(checkPointTaskId); } else { // Need a transaction, make sure all the volumes get migrated to new storage pool txn = Transaction.currentTxn(); @@ -3192,11 +3187,6 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag } } transitResult = true; - try { - _checkPointMgr.popCheckPoint(checkPointTaskId); - } catch (Exception e) { - - } } finally { if (!transitResult) { txn.rollback(); @@ -3259,7 +3249,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag if (s_logger.isDebugEnabled()) { s_logger.debug("Checking if we need to prepare " + vols.size() + " volumes for " + vm); } - + boolean recreate = _recreateSystemVmEnabled; List recreateVols = new ArrayList(vols.size()); @@ -3270,8 +3260,8 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag assignedPool = dest.getStorageForDisks().get(vol); } if (assignedPool == null && recreate) { - assignedPool = _storagePoolDao.findById(vol.getPoolId()); - + assignedPool = _storagePoolDao.findById(vol.getPoolId()); + } if (assignedPool != null || recreate) { Volume.State state = vol.getState(); @@ -3312,7 +3302,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); vm.addDisk(new VolumeTO(vol, pool)); } - + } } } else { @@ -3331,10 +3321,10 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag VolumeVO newVol; StoragePool existingPool = null; if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) { - existingPool = _storagePoolDao.findById(vol.getPoolId()); - s_logger.debug("existing pool: " + existingPool.getId()); + existingPool = _storagePoolDao.findById(vol.getPoolId()); + s_logger.debug("existing pool: " + existingPool.getId()); } - + if (vol.getState() == Volume.State.Allocated || vol.getState() == Volume.State.Creating) { newVol = vol; } else { @@ -3429,12 +3419,12 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag if (toBeCreated.getTemplateId() != null) { template = _templateDao.findById(toBeCreated.getTemplateId()); } - + StoragePool pool = null; if (sPool != null) { - pool = sPool; + pool = sPool; } else { - pool = dest.getStorageForDisks().get(toBeCreated); + pool = dest.getStorageForDisks().get(toBeCreated); } if (pool != null) { @@ -3465,12 +3455,12 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag String fullTmpltUrl = tmpltHostUrl + "/" + tmpltHostOn.getInstallPath(); cmd = new CreateCommand(diskProfile, fullTmpltUrl, new StorageFilerTO(pool)); } else { - tmpltStoredOn = _tmpltMgr.prepareTemplateForCreate(template, pool); - if (tmpltStoredOn == null) { - s_logger.debug("Cannot use this pool " + pool + " because we can't propagate template " + template); - return null; - } - cmd = new CreateCommand(diskProfile, tmpltStoredOn.getLocalDownloadPath(), new StorageFilerTO(pool)); + tmpltStoredOn = _tmpltMgr.prepareTemplateForCreate(template, pool); + if (tmpltStoredOn == null) { + s_logger.debug("Cannot use this pool " + pool + " because we can't propagate template " + template); + return null; + } + cmd = new CreateCommand(diskProfile, tmpltStoredOn.getLocalDownloadPath(), new StorageFilerTO(pool)); } } else { if (template != null && Storage.ImageFormat.ISO == template.getFormat()) { @@ -3513,27 +3503,27 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag if (s_logger.isDebugEnabled()) { s_logger.debug("Expunging " + vol); } - + //Find out if the volume is present on secondary storage VolumeHostVO volumeHost = _volumeHostDao.findByVolumeId(vol.getId()); if(volumeHost != null){ - if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED){ - HostVO ssHost = _hostDao.findById(volumeHost.getHostId()); - DeleteVolumeCommand dtCommand = new DeleteVolumeCommand(ssHost.getStorageUrl(), volumeHost.getInstallPath()); - Answer answer = _agentMgr.sendToSecStorage(ssHost, dtCommand); - if (answer == null || !answer.getResult()) { - s_logger.debug("Failed to delete " + volumeHost + " due to " + ((answer == null) ? "answer is null" : answer.getDetails())); - return; - } - }else if(volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS){ - s_logger.debug("Volume: " + vol.getName() + " is currently being uploaded; cant' delete it."); - throw new CloudRuntimeException("Please specify a volume that is not currently being uploaded."); - } + if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED){ + HostVO ssHost = _hostDao.findById(volumeHost.getHostId()); + DeleteVolumeCommand dtCommand = new DeleteVolumeCommand(ssHost.getStorageUrl(), volumeHost.getInstallPath()); + Answer answer = _agentMgr.sendToSecStorage(ssHost, dtCommand); + if (answer == null || !answer.getResult()) { + s_logger.debug("Failed to delete " + volumeHost + " due to " + ((answer == null) ? "answer is null" : answer.getDetails())); + return; + } + }else if(volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS){ + s_logger.debug("Volume: " + vol.getName() + " is currently being uploaded; cant' delete it."); + throw new CloudRuntimeException("Please specify a volume that is not currently being uploaded."); + } _volumeHostDao.remove(volumeHost.getId()); _volumeDao.remove(vol.getId()); return; } - + String vmName = null; if (vol.getVolumeType() == Type.ROOT && vol.getInstanceId() != null) { VirtualMachine vm = _vmInstanceDao.findByIdIncludingRemoved(vol.getInstanceId()); @@ -3578,7 +3568,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag } catch (RuntimeException ex) { if (force) { s_logger.info("Failed to expunge volume, but marking volume id=" + vol.getId() + " as expunged anyway " + - "due to force=true. Volume failed to expunge due to ", ex); + "due to force=true. Volume failed to expunge due to ", ex); removeVolume = true; } else { throw ex; @@ -3869,14 +3859,14 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag return null; } } - + @Override public HypervisorType getHypervisorTypeFromFormat(ImageFormat format) { - - if(format == null) { + + if(format == null) { return HypervisorType.None; - } - + } + if (format == ImageFormat.VHD) { return HypervisorType.XenServer; } else if (format == ImageFormat.OVA) { @@ -3965,5 +3955,5 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag } return true; } - + } diff --git a/server/src/com/cloud/storage/StorageMigrationCleanupMaid.java b/server/src/com/cloud/storage/StorageMigrationCleanupMaid.java deleted file mode 100644 index 3ba8483d320..00000000000 --- a/server/src/com/cloud/storage/StorageMigrationCleanupMaid.java +++ /dev/null @@ -1,121 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.storage; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.log4j.Logger; - -import com.cloud.cluster.CheckPointManager; -import com.cloud.cluster.CleanupMaid; -import com.cloud.server.ManagementServer; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.utils.fsm.StateMachine2; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachineManager; -import com.cloud.vm.dao.VMInstanceDao; - -public class StorageMigrationCleanupMaid implements CleanupMaid { - private static final Logger s_logger = Logger.getLogger(StorageMigrationCleanupMaid.class); - public static enum StorageMigrationState { - MIGRATING, - MIGRATINGFAILED, - MIGRATINGSUCCESS; - } - - private List _volumesIds = new ArrayList(); - private StorageMigrationState _migrateState; - - public StorageMigrationCleanupMaid() { - - } - - public StorageMigrationCleanupMaid(StorageMigrationState state, List volumes) { - _migrateState = state; - _volumesIds = volumes; - } - - public void updateStaste(StorageMigrationState state) { - _migrateState = state; - } - - @Override - public int cleanup(CheckPointManager checkPointMgr) { - StateMachine2 _stateMachine = Volume.State.getStateMachine(); - - ComponentLocator locator = ComponentLocator.getLocator(ManagementServer.Name); - VolumeDao volDao = locator.getDao(VolumeDao.class); - VMInstanceDao vmDao = locator.getDao(VMInstanceDao.class); - VirtualMachineManager vmMgr = locator.getManager(VirtualMachineManager.class); - Long vmInstanceId = null; - boolean success = true; - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - - try { - txn.start(); - for (Long volumeId : _volumesIds) { - VolumeVO volume = volDao.findById(volumeId); - if (volume == null) { - continue; - } - vmInstanceId = volume.getInstanceId(); - if (_migrateState == StorageMigrationState.MIGRATING && volume.getState() == Volume.State.Migrating) { - try { - _stateMachine.transitTo(volume, Volume.Event.OperationFailed, null, volDao); - } catch (NoTransitionException e) { - s_logger.debug("Failed to transit volume state: " + e.toString()); - success = false; - break; - } - } - } - if (vmInstanceId != null) { - VMInstanceVO vm = vmDao.findById(vmInstanceId); - if (vm != null && vm.getState() == VirtualMachine.State.Migrating) { - try { - vmMgr.stateTransitTo(vm, VirtualMachine.Event.AgentReportStopped, null); - } catch (NoTransitionException e) { - s_logger.debug("Failed to transit vm state"); - success = false; - } - } - } - - if (success) { - txn.commit(); - } - } catch (Exception e) { - s_logger.debug("storage migration cleanup failed:" + e.toString()); - txn.rollback(); - }finally { - txn.close(); - } - - return 0; - } - - @Override - public String getCleanupProcedure() { - return null; - } - -} diff --git a/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java index 3e1030d077c..4eeae280d8b 100644 --- a/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java +++ b/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java @@ -31,7 +31,7 @@ import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; -import com.cloud.utils.component.ComponentLocator; +import com.cloud.utils.component.ComponentContext; import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -40,68 +40,66 @@ import com.cloud.vm.VirtualMachineProfile; @Local(value=StoragePoolAllocator.class) public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAllocator { private static final Logger s_logger = Logger.getLogger(GarbageCollectingStoragePoolAllocator.class); - + StoragePoolAllocator _firstFitStoragePoolAllocator; StoragePoolAllocator _localStoragePoolAllocator; @Inject StorageManager _storageMgr; @Inject ConfigurationDao _configDao; boolean _storagePoolCleanupEnabled; - + @Override public boolean allocatorIsCorrectType(DiskProfile dskCh) { - return true; + return true; } - + public Integer getStorageOverprovisioningFactor() { - return null; + return null; } - + public Long getExtraBytesPerVolume() { - return null; + return null; } - + @Override public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - - if (!_storagePoolCleanupEnabled) { - s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped."); - return null; - } - - // Clean up all storage pools - _storageMgr.cleanupStorage(false); - // Determine what allocator to use - StoragePoolAllocator allocator; - if (localStorageAllocationNeeded(dskCh)) { - allocator = _localStoragePoolAllocator; - } else { - allocator = _firstFitStoragePoolAllocator; - } - // Try to find a storage pool after cleanup + if (!_storagePoolCleanupEnabled) { + s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped."); + return null; + } + + // Clean up all storage pools + _storageMgr.cleanupStorage(false); + // Determine what allocator to use + StoragePoolAllocator allocator; + if (localStorageAllocationNeeded(dskCh)) { + allocator = _localStoragePoolAllocator; + } else { + allocator = _firstFitStoragePoolAllocator; + } + + // Try to find a storage pool after cleanup ExcludeList myAvoids = new ExcludeList(avoid.getDataCentersToAvoid(), avoid.getPodsToAvoid(), avoid.getClustersToAvoid(), avoid.getHostsToAvoid(), avoid.getPoolsToAvoid()); - + return allocator.allocateToPool(dskCh, vmProfile, plan, myAvoids, returnUpTo); } @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); - - ComponentLocator locator = ComponentLocator.getCurrentLocator(); - - _firstFitStoragePoolAllocator = ComponentLocator.inject(FirstFitStoragePoolAllocator.class); + + _firstFitStoragePoolAllocator = ComponentContext.inject(FirstFitStoragePoolAllocator.class); _firstFitStoragePoolAllocator.configure("GCFirstFitStoragePoolAllocator", params); - _localStoragePoolAllocator = ComponentLocator.inject(LocalStoragePoolAllocator.class); + _localStoragePoolAllocator = ComponentContext.inject(LocalStoragePoolAllocator.class); _localStoragePoolAllocator.configure("GCLocalStoragePoolAllocator", params); - + String storagePoolCleanupEnabled = _configDao.getValue("storage.pool.cleanup.enabled"); _storagePoolCleanupEnabled = (storagePoolCleanupEnabled == null) ? true : Boolean.parseBoolean(storagePoolCleanupEnabled); - + return true; } - + public GarbageCollectingStoragePoolAllocator() { } - + } diff --git a/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java b/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java index ace7303170c..2c19406fef6 100644 --- a/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java +++ b/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java @@ -34,7 +34,7 @@ import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.host.Host; import com.cloud.storage.StoragePool; import com.cloud.storage.Volume.Type; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; diff --git a/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java b/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java index 7868b3619d3..8347c4e1301 100644 --- a/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java +++ b/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java @@ -34,7 +34,7 @@ import com.cloud.storage.SnapshotVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; diff --git a/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java b/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java index 9a50189277d..4019dffd4ae 100644 --- a/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java +++ b/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java @@ -35,7 +35,7 @@ import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StoragePoolDetailVO; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StoragePoolVO; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; diff --git a/server/src/com/cloud/storage/dao/VMTemplateDaoImpl.java b/server/src/com/cloud/storage/dao/VMTemplateDaoImpl.java index 8093fd5f9e2..42f10d34c1b 100755 --- a/server/src/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/server/src/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -59,7 +59,7 @@ import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.template.VirtualMachineTemplate.TemplateFilter; import com.cloud.user.Account; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; diff --git a/server/src/com/cloud/storage/dao/VolumeDaoImpl.java b/server/src/com/cloud/storage/dao/VolumeDaoImpl.java index d4cc692c06a..a189d00fead 100755 --- a/server/src/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/server/src/com/cloud/storage/dao/VolumeDaoImpl.java @@ -39,7 +39,7 @@ import com.cloud.storage.Volume.Type; import com.cloud.storage.VolumeVO; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; diff --git a/server/src/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/com/cloud/storage/listener/StoragePoolMonitor.java index af6f0f5276a..e848a8727a0 100755 --- a/server/src/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/com/cloud/storage/listener/StoragePoolMonitor.java @@ -18,6 +18,8 @@ package com.cloud.storage.listener; import java.util.List; +import javax.inject.Inject; + import org.apache.log4j.Logger; import com.cloud.agent.Listener; @@ -31,96 +33,93 @@ import com.cloud.exception.ConnectionException; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.server.ManagementService; import com.cloud.storage.OCFS2Manager; +import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManagerImpl; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.dao.StoragePoolDao; -import com.cloud.utils.component.ComponentLocator; + public class StoragePoolMonitor implements Listener { private static final Logger s_logger = Logger.getLogger(StoragePoolMonitor.class); - private final StorageManagerImpl _storageManager; - private final StoragePoolDao _poolDao; - OCFS2Manager _ocfs2Mgr; - + private final StorageManagerImpl _storageManager; + private final StoragePoolDao _poolDao; + @Inject OCFS2Manager _ocfs2Mgr; + public StoragePoolMonitor(StorageManagerImpl mgr, StoragePoolDao poolDao) { - this._storageManager = mgr; - this._poolDao = poolDao; - - ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name); - this._ocfs2Mgr = locator.getManager(OCFS2Manager.class); + this._storageManager = mgr; + this._poolDao = poolDao; + } - - + + @Override public boolean isRecurring() { return false; } - + @Override public synchronized boolean processAnswers(long agentId, long seq, Answer[] resp) { return true; } - + @Override public synchronized boolean processDisconnect(long agentId, Status state) { return true; } - + @Override public void processConnect(HostVO host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { - if (cmd instanceof StartupRoutingCommand) { - StartupRoutingCommand scCmd = (StartupRoutingCommand)cmd; - if (scCmd.getHypervisorType() == HypervisorType.XenServer || scCmd.getHypervisorType() == HypervisorType.KVM || - scCmd.getHypervisorType() == HypervisorType.VMware || scCmd.getHypervisorType() == HypervisorType.Simulator || scCmd.getHypervisorType() == HypervisorType.Ovm) { - List pools = _poolDao.listBy(host.getDataCenterId(), host.getPodId(), host.getClusterId()); - for (StoragePoolVO pool : pools) { - if (pool.getStatus() != StoragePoolStatus.Up) { - continue; - } - if (!pool.getPoolType().isShared()) { - continue; - } - - if (pool.getPoolType() == StoragePoolType.OCFS2 && !_ocfs2Mgr.prepareNodes(pool.getClusterId())) { - throw new ConnectionException(true, "Unable to prepare OCFS2 nodes for pool " + pool.getId()); - } - - Long hostId = host.getId(); - s_logger.debug("Host " + hostId + " connected, sending down storage pool information ..."); - try { - _storageManager.connectHostToSharedPool(hostId, pool); - _storageManager.createCapacityEntry(pool); - } catch (Exception e) { - s_logger.warn("Unable to connect host " + hostId + " to pool " + pool + " due to " + e.toString(), e); - } - } - } - } + if (cmd instanceof StartupRoutingCommand) { + StartupRoutingCommand scCmd = (StartupRoutingCommand)cmd; + if (scCmd.getHypervisorType() == HypervisorType.XenServer || scCmd.getHypervisorType() == HypervisorType.KVM || + scCmd.getHypervisorType() == HypervisorType.VMware || scCmd.getHypervisorType() == HypervisorType.Simulator || scCmd.getHypervisorType() == HypervisorType.Ovm) { + List pools = _poolDao.listBy(host.getDataCenterId(), host.getPodId(), host.getClusterId()); + for (StoragePoolVO pool : pools) { + if (pool.getStatus() != StoragePoolStatus.Up) { + continue; + } + if (!pool.getPoolType().isShared()) { + continue; + } + + if (pool.getPoolType() == StoragePoolType.OCFS2 && !_ocfs2Mgr.prepareNodes(pool.getClusterId())) { + throw new ConnectionException(true, "Unable to prepare OCFS2 nodes for pool " + pool.getId()); + } + + Long hostId = host.getId(); + s_logger.debug("Host " + hostId + " connected, sending down storage pool information ..."); + try { + _storageManager.connectHostToSharedPool(hostId, pool); + _storageManager.createCapacityEntry(pool); + } catch (Exception e) { + s_logger.warn("Unable to connect host " + hostId + " to pool " + pool + " due to " + e.toString(), e); + } + } + } + } } - + @Override public boolean processCommands(long agentId, long seq, Command[] req) { return false; } - + @Override public AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd) { - return null; + return null; } - + @Override public boolean processTimeout(long agentId, long seq) { - return true; + return true; } - + @Override public int getTimeout() { - return -1; + return -1; } - + } diff --git a/server/src/com/cloud/storage/resource/DummySecondaryStorageResource.java b/server/src/com/cloud/storage/resource/DummySecondaryStorageResource.java index 23f3def0c22..2773e293239 100644 --- a/server/src/com/cloud/storage/resource/DummySecondaryStorageResource.java +++ b/server/src/com/cloud/storage/resource/DummySecondaryStorageResource.java @@ -21,6 +21,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; @@ -50,42 +51,42 @@ import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.template.TemplateConstants; import com.cloud.storage.template.TemplateInfo; -import com.cloud.utils.component.ComponentLocator; + public class DummySecondaryStorageResource extends ServerResourceBase implements ServerResource { private static final Logger s_logger = Logger.getLogger(DummySecondaryStorageResource.class); - + String _dc; String _pod; String _guid; String _dummyPath; - VMTemplateDao _tmpltDao; - private boolean _useServiceVm; - - - public DummySecondaryStorageResource(boolean useServiceVM) { - setUseServiceVm(useServiceVM); - } + @Inject VMTemplateDao _tmpltDao; + private boolean _useServiceVm; - @Override - protected String getDefaultScriptsDir() { - return "dummy"; - } - @Override - public Answer executeRequest(Command cmd) { + public DummySecondaryStorageResource(boolean useServiceVM) { + setUseServiceVm(useServiceVM); + } + + @Override + protected String getDefaultScriptsDir() { + return "dummy"; + } + + @Override + public Answer executeRequest(Command cmd) { if (cmd instanceof DownloadProgressCommand) { return new DownloadAnswer(null, 100, cmd, - com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED, - "dummyFS", - "/dummy"); + com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED, + "dummyFS", + "/dummy"); } else if (cmd instanceof DownloadCommand) { return new DownloadAnswer(null, 100, cmd, - com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED, - "dummyFS", - "/dummy"); + com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED, + "dummyFS", + "/dummy"); } else if (cmd instanceof GetStorageStatsCommand) { - return execute((GetStorageStatsCommand)cmd); + return execute((GetStorageStatsCommand)cmd); } else if (cmd instanceof CheckHealthCommand) { return new CheckHealthAnswer((CheckHealthCommand)cmd, true); } else if (cmd instanceof ReadyCommand) { @@ -93,33 +94,33 @@ public class DummySecondaryStorageResource extends ServerResourceBase implements } else { return Answer.createUnsupportedCommandAnswer(cmd); } - } + } - @Override - public PingCommand getCurrentStatus(long id) { + @Override + public PingCommand getCurrentStatus(long id) { return new PingStorageCommand(Host.Type.Storage, id, new HashMap()); - } + } - @Override - public Type getType() { + @Override + public Type getType() { return Host.Type.SecondaryStorage; - } + } - @Override - public StartupCommand[] initialize() { + @Override + public StartupCommand[] initialize() { final StartupStorageCommand cmd = new StartupStorageCommand("dummy", - StoragePoolType.NetworkFilesystem, 1024*1024*1024*100L, - new HashMap()); - + StoragePoolType.NetworkFilesystem, 1024*1024*1024*100L, + new HashMap()); + cmd.setResourceType(Storage.StorageResourceType.SECONDARY_STORAGE); cmd.setIqn(null); cmd.setNfsShare(_guid); - + fillNetworkInformation(cmd); cmd.setDataCenter(_dc); cmd.setPod(_pod); cmd.setGuid(_guid); - + cmd.setName(_guid); cmd.setVersion(DummySecondaryStorageResource.class.getPackage().getImplementationVersion()); /* gather TemplateInfo in second storage */ @@ -127,62 +128,57 @@ public class DummySecondaryStorageResource extends ServerResourceBase implements cmd.getHostDetails().put("mount.parent", "dummy"); cmd.getHostDetails().put("mount.path", "dummy"); cmd.getHostDetails().put("orig.url", _guid); - + String tok[] = _dummyPath.split(":"); cmd.setPrivateIpAddress(tok[0]); return new StartupCommand [] {cmd}; - } - + } + protected GetStorageStatsAnswer execute(GetStorageStatsCommand cmd) { long size = 1024*1024*1024*100L; return new GetStorageStatsAnswer(cmd, 0, size); } - + @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); - + _guid = (String)params.get("guid"); if (_guid == null) { throw new ConfigurationException("Unable to find the guid"); } - + _dc = (String)params.get("zone"); if (_dc == null) { throw new ConfigurationException("Unable to find the zone"); } _pod = (String)params.get("pod"); - + _dummyPath = (String)params.get("mount.path"); if (_dummyPath == null) { throw new ConfigurationException("Unable to find mount.path"); } - - ComponentLocator locator = ComponentLocator.getLocator("management-server"); - _tmpltDao = locator.getDao(VMTemplateDao.class); - if (_tmpltDao == null) { - throw new ConfigurationException("Unable to find VMTemplate dao"); - } + return true; } - public void setUseServiceVm(boolean _useServiceVm) { - this._useServiceVm = _useServiceVm; - } + public void setUseServiceVm(boolean _useServiceVm) { + this._useServiceVm = _useServiceVm; + } - public boolean useServiceVm() { - return _useServiceVm; - } - - public Map getDefaultSystemVmTemplateInfo() { - List tmplts = _tmpltDao.listAllSystemVMTemplates(); - Map tmpltInfo = new HashMap(); - if (tmplts != null) { - for (VMTemplateVO tmplt : tmplts) { - TemplateInfo routingInfo = new TemplateInfo(tmplt.getUniqueName(), TemplateConstants.DEFAULT_SYSTEM_VM_TEMPLATE_PATH + tmplt.getId() + File.separator, false, false); - tmpltInfo.put(tmplt.getUniqueName(), routingInfo); - } - } - return tmpltInfo; - } + public boolean useServiceVm() { + return _useServiceVm; + } + + public Map getDefaultSystemVmTemplateInfo() { + List tmplts = _tmpltDao.listAllSystemVMTemplates(); + Map tmpltInfo = new HashMap(); + if (tmplts != null) { + for (VMTemplateVO tmplt : tmplts) { + TemplateInfo routingInfo = new TemplateInfo(tmplt.getUniqueName(), TemplateConstants.DEFAULT_SYSTEM_VM_TEMPLATE_PATH + tmplt.getId() + File.separator, false, false); + tmpltInfo.put(tmplt.getUniqueName(), routingInfo); + } + } + return tmpltInfo; + } } diff --git a/server/src/com/cloud/storage/s3/S3ManagerImpl.java b/server/src/com/cloud/storage/s3/S3ManagerImpl.java index 069edf37612..16e2ad900db 100644 --- a/server/src/com/cloud/storage/s3/S3ManagerImpl.java +++ b/server/src/com/cloud/storage/s3/S3ManagerImpl.java @@ -41,11 +41,15 @@ import java.util.Map; import java.util.UUID; import java.util.concurrent.Callable; +import javax.annotation.PostConstruct; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.api.command.admin.storage.AddS3Cmd; +import org.apache.cloudstack.api.command.admin.storage.ListS3sCmd; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -53,9 +57,6 @@ import com.cloud.agent.api.DeleteTemplateFromS3Command; import com.cloud.agent.api.DownloadTemplateFromS3ToSecondaryStorageCommand; import com.cloud.agent.api.UploadTemplateToS3FromSecondaryStorageCommand; import com.cloud.agent.api.to.S3TO; -import org.apache.cloudstack.api.command.admin.storage.ListS3sCmd; -import org.springframework.stereotype.Component; - import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.DataCenterVO; @@ -77,7 +78,6 @@ import com.cloud.storage.dao.VMTemplateS3Dao; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.utils.S3Utils.ClientOptions; -import com.cloud.utils.component.Inject; import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; @@ -90,7 +90,7 @@ public class S3ManagerImpl implements S3Manager { private String name; - @Inject + @Inject private AgentManager agentManager; @Inject @@ -120,10 +120,9 @@ public class S3ManagerImpl implements S3Manager { @Inject private SecondaryStorageVmManager secondaryStorageVMManager; - protected S3ManagerImpl() { - super(); + public S3ManagerImpl() { } - + private void verifyConnection(final S3TO s3) throws DiscoveryException { if (!canConnect(s3)) { @@ -288,32 +287,32 @@ public class S3ManagerImpl implements S3Manager { executeWithNoWaitLock(determineLockId(accountId, templateId), new Callable() { - @Override - public Void call() throws Exception { + @Override + public Void call() throws Exception { - final Answer answer = agentManager.sendToSSVM(null, - new DeleteTemplateFromS3Command(s3, - accountId, templateId)); - if (answer == null || !answer.getResult()) { - final String errorMessage = format( - "Delete Template Failed: Unable to delete template id %1$s from S3 due to following error: %2$s", - templateId, - ((answer == null) ? "answer is null" - : answer.getDetails())); - LOGGER.error(errorMessage); - throw new CloudRuntimeException(errorMessage); - } + final Answer answer = agentManager.sendToSSVM(null, + new DeleteTemplateFromS3Command(s3, + accountId, templateId)); + if (answer == null || !answer.getResult()) { + final String errorMessage = format( + "Delete Template Failed: Unable to delete template id %1$s from S3 due to following error: %2$s", + templateId, + ((answer == null) ? "answer is null" + : answer.getDetails())); + LOGGER.error(errorMessage); + throw new CloudRuntimeException(errorMessage); + } - vmTemplateS3Dao.remove(vmTemplateS3VO.getId()); - LOGGER.debug(format( - "Deleted template %1$s from S3.", - templateId)); + vmTemplateS3Dao.remove(vmTemplateS3VO.getId()); + LOGGER.debug(format( + "Deleted template %1$s from S3.", + templateId)); - return null; + return null; - } + } - }); + }); } catch (Exception e) { @@ -384,38 +383,38 @@ public class S3ManagerImpl implements S3Manager { executeWithNoWaitLock(determineLockId(accountId, templateId), new Callable() { - @Override - public Void call() throws Exception { + @Override + public Void call() throws Exception { - final Answer answer = agentManager.sendToSSVM( - dataCenterId, cmd); + final Answer answer = agentManager.sendToSSVM( + dataCenterId, cmd); - if (answer == null || !answer.getResult()) { - final String errMsg = String - .format("Failed to download template from S3 to secondary storage due to %1$s", - (answer == null ? "answer is null" - : answer.getDetails())); - LOGGER.error(errMsg); - throw new CloudRuntimeException(errMsg); - } + if (answer == null || !answer.getResult()) { + final String errMsg = String + .format("Failed to download template from S3 to secondary storage due to %1$s", + (answer == null ? "answer is null" + : answer.getDetails())); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } - final String installPath = join( - asList("template", "tmpl", accountId, - templateId), File.separator); - final VMTemplateHostVO tmpltHost = new VMTemplateHostVO( - secondaryStorageHost.getId(), templateId, - now(), 100, Status.DOWNLOADED, null, null, - null, installPath, template.getUrl()); - tmpltHost.setSize(templateS3VO.getSize()); - tmpltHost.setPhysicalSize(templateS3VO - .getPhysicalSize()); - vmTemplateHostDao.persist(tmpltHost); + final String installPath = join( + asList("template", "tmpl", accountId, + templateId), File.separator); + final VMTemplateHostVO tmpltHost = new VMTemplateHostVO( + secondaryStorageHost.getId(), templateId, + now(), 100, Status.DOWNLOADED, null, null, + null, installPath, template.getUrl()); + tmpltHost.setSize(templateS3VO.getSize()); + tmpltHost.setPhysicalSize(templateS3VO + .getPhysicalSize()); + vmTemplateHostDao.persist(tmpltHost); - return null; + return null; - } + } - }); + }); } catch (Exception e) { final String errMsg = "Failed to download template from S3 to secondary storage due to " @@ -608,50 +607,50 @@ public class S3ManagerImpl implements S3Manager { executeWithNoWaitLock(determineLockId(accountId, templateId), new Callable() { - @Override - public Void call() throws Exception { + @Override + public Void call() throws Exception { - final UploadTemplateToS3FromSecondaryStorageCommand cmd = new UploadTemplateToS3FromSecondaryStorageCommand( - s3, secondaryHost.getStorageUrl(), - dataCenterId, accountId, templateId); + final UploadTemplateToS3FromSecondaryStorageCommand cmd = new UploadTemplateToS3FromSecondaryStorageCommand( + s3, secondaryHost.getStorageUrl(), + dataCenterId, accountId, templateId); - final Answer answer = agentManager.sendToSSVM( - dataCenterId, cmd); - if (answer == null || !answer.getResult()) { + final Answer answer = agentManager.sendToSSVM( + dataCenterId, cmd); + if (answer == null || !answer.getResult()) { - final String reason = answer != null ? answer - .getDetails() - : "S3 template sync failed due to an unspecified error."; + final String reason = answer != null ? answer + .getDetails() + : "S3 template sync failed due to an unspecified error."; throw new CloudRuntimeException( format("Failed to upload template id %1$s to S3 from secondary storage due to %2$s.", templateId, reason)); - } + } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(format( - "Creating VMTemplateS3VO instance using template id %1s.", - templateId)); - } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(format( + "Creating VMTemplateS3VO instance using template id %1s.", + templateId)); + } - final VMTemplateS3VO vmTemplateS3VO = new VMTemplateS3VO( - s3.getId(), templateId, now(), - templateHostRef.getSize(), templateHostRef - .getPhysicalSize()); + final VMTemplateS3VO vmTemplateS3VO = new VMTemplateS3VO( + s3.getId(), templateId, now(), + templateHostRef.getSize(), templateHostRef + .getPhysicalSize()); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(format("Persisting %1$s", - vmTemplateS3VO)); - } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(format("Persisting %1$s", + vmTemplateS3VO)); + } - vmTemplateS3Dao.persist(vmTemplateS3VO); - propagateTemplateToAllZones(vmTemplateS3VO); + vmTemplateS3Dao.persist(vmTemplateS3VO); + propagateTemplateToAllZones(vmTemplateS3VO); - return null; + return null; - } + } - }); + }); } catch (Exception e) { diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index fb4ece5187c..6bf6d5e5f2a 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -114,7 +114,7 @@ import com.cloud.utils.DateUtil.IntervalType; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.component.Manager; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; diff --git a/server/src/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java index 190b054a1d0..18dc1f9d053 100644 --- a/server/src/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java @@ -55,7 +55,7 @@ import com.cloud.user.User; import com.cloud.utils.DateUtil; import com.cloud.utils.DateUtil.IntervalType; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.concurrency.TestClock; import com.cloud.utils.db.DB; import com.cloud.utils.db.GlobalLock; diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java index 0e914725e16..1ce9578408f 100755 --- a/server/src/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/com/cloud/template/TemplateManagerImpl.java @@ -130,8 +130,8 @@ import com.cloud.user.dao.UserAccountDao; import com.cloud.user.dao.UserDao; import com.cloud.uservm.UserVm; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.component.Adapters; -import com.cloud.utils.component.ComponentLocator; +import com.cloud.utils.component.AdapterBase; + import com.cloud.utils.component.Manager; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; @@ -216,10 +216,10 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe private TemplateAdapter getAdapter(HypervisorType type) { TemplateAdapter adapter = null; if (type == HypervisorType.BareMetal) { - adapter = Adapters.getAdapterByName(_adapters, TemplateAdapterType.BareMetal.getName()); + adapter = AdapterBase.getAdapterByName(_adapters, TemplateAdapterType.BareMetal.getName()); } else { // see HyervisorTemplateAdapter - adapter = Adapters.getAdapterByName(_adapters, TemplateAdapterType.Hypervisor.getName()); + adapter = AdapterBase.getAdapterByName(_adapters, TemplateAdapterType.Hypervisor.getName()); } if (adapter == null) { @@ -1098,7 +1098,7 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe s_logger.info("S3 secondary storage synchronization is disabled."); } - return false; + return true; } protected TemplateManagerImpl() { diff --git a/server/src/com/cloud/test/DatabaseConfig.java b/server/src/com/cloud/test/DatabaseConfig.java index 03cf083b610..7c10f98abf4 100755 --- a/server/src/com/cloud/test/DatabaseConfig.java +++ b/server/src/com/cloud/test/DatabaseConfig.java @@ -54,7 +54,7 @@ import com.cloud.service.dao.ServiceOfferingDaoImpl; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.dao.DiskOfferingDaoImpl; import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.component.LegacyComponentLocator; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.net.NfsUtils; @@ -74,86 +74,86 @@ public class DatabaseConfig { // Change to HashSet private static HashSet objectNames = new HashSet(); private static HashSet fieldNames = new HashSet(); - + // Maintain an IPRangeConfig object to handle IP related logic - private final IPRangeConfig iprc = LegacyComponentLocator.inject(IPRangeConfig.class); - + private final IPRangeConfig iprc = ComponentContext.inject(IPRangeConfig.class); + // Maintain a PodZoneConfig object to handle Pod/Zone related logic - private final PodZoneConfig pzc = LegacyComponentLocator.inject(PodZoneConfig.class); - + private final PodZoneConfig pzc = ComponentContext.inject(PodZoneConfig.class); + // Global variables to store network.throttling.rate and multicast.throttling.rate from the configuration table // Will be changed from null to a non-null value if the value existed in the configuration table private String _networkThrottlingRate = null; private String _multicastThrottlingRate = null; - + static { - // initialize the objectNames ArrayList - objectNames.add("zone"); + // initialize the objectNames ArrayList + objectNames.add("zone"); objectNames.add("physicalNetwork"); - objectNames.add("vlan"); - objectNames.add("pod"); + objectNames.add("vlan"); + objectNames.add("pod"); objectNames.add("cluster"); - objectNames.add("storagePool"); - objectNames.add("secondaryStorage"); - objectNames.add("serviceOffering"); + objectNames.add("storagePool"); + objectNames.add("secondaryStorage"); + objectNames.add("serviceOffering"); objectNames.add("diskOffering"); - objectNames.add("user"); - objectNames.add("pricing"); - objectNames.add("configuration"); - objectNames.add("privateIpAddresses"); - objectNames.add("publicIpAddresses"); + objectNames.add("user"); + objectNames.add("pricing"); + objectNames.add("configuration"); + objectNames.add("privateIpAddresses"); + objectNames.add("publicIpAddresses"); objectNames.add("physicalNetworkServiceProvider"); objectNames.add("virtualRouterProvider"); - - // initialize the fieldNames ArrayList - fieldNames.add("id"); - fieldNames.add("name"); - fieldNames.add("dns1"); - fieldNames.add("dns2"); - fieldNames.add("internalDns1"); - fieldNames.add("internalDns2"); - fieldNames.add("guestNetworkCidr"); - fieldNames.add("gateway"); - fieldNames.add("netmask"); - fieldNames.add("vncConsoleIp"); - fieldNames.add("zoneId"); - fieldNames.add("vlanId"); - fieldNames.add("cpu"); - fieldNames.add("ramSize"); - fieldNames.add("speed"); - fieldNames.add("useLocalStorage"); - fieldNames.add("hypervisorType"); - fieldNames.add("diskSpace"); - fieldNames.add("nwRate"); - fieldNames.add("mcRate"); - fieldNames.add("price"); - fieldNames.add("username"); - fieldNames.add("password"); - fieldNames.add("firstname"); - fieldNames.add("lastname"); - fieldNames.add("email"); - fieldNames.add("priceUnit"); - fieldNames.add("type"); - fieldNames.add("value"); - fieldNames.add("podId"); - fieldNames.add("podName"); - fieldNames.add("ipAddressRange"); - fieldNames.add("vlanType"); - fieldNames.add("vlanName"); - fieldNames.add("cidr"); - fieldNames.add("vnet"); - fieldNames.add("mirrored"); - fieldNames.add("enableHA"); - fieldNames.add("displayText"); - fieldNames.add("domainId"); - fieldNames.add("hostAddress"); - fieldNames.add("hostPath"); - fieldNames.add("guestIpType"); - fieldNames.add("url"); - fieldNames.add("storageType"); - fieldNames.add("category"); - fieldNames.add("tags"); - fieldNames.add("networktype"); + + // initialize the fieldNames ArrayList + fieldNames.add("id"); + fieldNames.add("name"); + fieldNames.add("dns1"); + fieldNames.add("dns2"); + fieldNames.add("internalDns1"); + fieldNames.add("internalDns2"); + fieldNames.add("guestNetworkCidr"); + fieldNames.add("gateway"); + fieldNames.add("netmask"); + fieldNames.add("vncConsoleIp"); + fieldNames.add("zoneId"); + fieldNames.add("vlanId"); + fieldNames.add("cpu"); + fieldNames.add("ramSize"); + fieldNames.add("speed"); + fieldNames.add("useLocalStorage"); + fieldNames.add("hypervisorType"); + fieldNames.add("diskSpace"); + fieldNames.add("nwRate"); + fieldNames.add("mcRate"); + fieldNames.add("price"); + fieldNames.add("username"); + fieldNames.add("password"); + fieldNames.add("firstname"); + fieldNames.add("lastname"); + fieldNames.add("email"); + fieldNames.add("priceUnit"); + fieldNames.add("type"); + fieldNames.add("value"); + fieldNames.add("podId"); + fieldNames.add("podName"); + fieldNames.add("ipAddressRange"); + fieldNames.add("vlanType"); + fieldNames.add("vlanName"); + fieldNames.add("cidr"); + fieldNames.add("vnet"); + fieldNames.add("mirrored"); + fieldNames.add("enableHA"); + fieldNames.add("displayText"); + fieldNames.add("domainId"); + fieldNames.add("hostAddress"); + fieldNames.add("hostPath"); + fieldNames.add("guestIpType"); + fieldNames.add("url"); + fieldNames.add("storageType"); + fieldNames.add("category"); + fieldNames.add("tags"); + fieldNames.add("networktype"); fieldNames.add("clusterId"); fieldNames.add("physicalNetworkId"); fieldNames.add("destPhysicalNetworkId"); @@ -169,7 +169,7 @@ public class DatabaseConfig { fieldNames.add("userData"); fieldNames.add("securityGroup"); fieldNames.add("nspId"); - + s_configurationDescriptions.put("host.stats.interval", "the interval in milliseconds when host stats are retrieved from agents"); s_configurationDescriptions.put("storage.stats.interval", "the interval in milliseconds when storage stats (per host) are retrieved from agents"); s_configurationDescriptions.put("volume.stats.interval", "the interval in milliseconds when volume stats are retrieved from agents"); @@ -220,17 +220,17 @@ public class DatabaseConfig { s_configurationDescriptions.put("snapshot.test.weeks.per.month", "Set it to a smaller value to take more recurring snapshots"); s_configurationDescriptions.put("snapshot.test.months.per.year", "Set it to a smaller value to take more recurring snapshots"); s_configurationDescriptions.put("hypervisor.type", "The type of hypervisor that this deployment will use."); - - + + s_configurationComponents.put("host.stats.interval", "management-server"); s_configurationComponents.put("storage.stats.interval", "management-server"); s_configurationComponents.put("volume.stats.interval", "management-server"); s_configurationComponents.put("integration.api.port", "management-server"); s_configurationComponents.put("usage.stats.job.exec.time", "management-server"); s_configurationComponents.put("usage.stats.job.aggregation.range", "management-server"); - s_configurationComponents.put("consoleproxy.domP.enable", "management-server"); - s_configurationComponents.put("consoleproxy.port", "management-server"); - s_configurationComponents.put("consoleproxy.url.port", "management-server"); + s_configurationComponents.put("consoleproxy.domP.enable", "management-server"); + s_configurationComponents.put("consoleproxy.port", "management-server"); + s_configurationComponents.put("consoleproxy.url.port", "management-server"); s_configurationComponents.put("alert.email.addresses", "management-server"); s_configurationComponents.put("alert.smtp.host", "management-server"); s_configurationComponents.put("alert.smtp.port", "management-server"); @@ -256,22 +256,22 @@ public class DatabaseConfig { s_configurationComponents.put("instance.name", "AgentManager"); s_configurationComponents.put("storage.overprovisioning.factor", "StorageAllocator"); s_configurationComponents.put("retries.per.host", "AgentManager"); - s_configurationComponents.put("start.retry", "AgentManager"); - s_configurationComponents.put("wait", "AgentManager"); - s_configurationComponents.put("ping.timeout", "AgentManager"); - s_configurationComponents.put("ping.interval", "AgentManager"); - s_configurationComponents.put("alert.wait", "AgentManager"); - s_configurationComponents.put("update.wait", "AgentManager"); - s_configurationComponents.put("guest.domain.suffix", "AgentManager"); - s_configurationComponents.put("consoleproxy.ram.size", "AgentManager"); - s_configurationComponents.put("consoleproxy.cmd.port", "AgentManager"); - s_configurationComponents.put("consoleproxy.loadscan.interval", "AgentManager"); - s_configurationComponents.put("consoleproxy.capacityscan.interval", "AgentManager"); - s_configurationComponents.put("consoleproxy.capacity.standby", "AgentManager"); - s_configurationComponents.put("consoleproxy.session.max", "AgentManager"); - s_configurationComponents.put("consoleproxy.session.timeout", "AgentManager"); - s_configurationComponents.put("expunge.workers", "UserVmManager"); - s_configurationComponents.put("extract.url.cleanup.interval", "management-server"); + s_configurationComponents.put("start.retry", "AgentManager"); + s_configurationComponents.put("wait", "AgentManager"); + s_configurationComponents.put("ping.timeout", "AgentManager"); + s_configurationComponents.put("ping.interval", "AgentManager"); + s_configurationComponents.put("alert.wait", "AgentManager"); + s_configurationComponents.put("update.wait", "AgentManager"); + s_configurationComponents.put("guest.domain.suffix", "AgentManager"); + s_configurationComponents.put("consoleproxy.ram.size", "AgentManager"); + s_configurationComponents.put("consoleproxy.cmd.port", "AgentManager"); + s_configurationComponents.put("consoleproxy.loadscan.interval", "AgentManager"); + s_configurationComponents.put("consoleproxy.capacityscan.interval", "AgentManager"); + s_configurationComponents.put("consoleproxy.capacity.standby", "AgentManager"); + s_configurationComponents.put("consoleproxy.session.max", "AgentManager"); + s_configurationComponents.put("consoleproxy.session.timeout", "AgentManager"); + s_configurationComponents.put("expunge.workers", "UserVmManager"); + s_configurationComponents.put("extract.url.cleanup.interval", "management-server"); s_configurationComponents.put("stop.retry.interval", "HighAvailabilityManager"); s_configurationComponents.put("restart.retry.interval", "HighAvailabilityManager"); s_configurationComponents.put("investigate.retry.interval", "HighAvailabilityManager"); @@ -294,7 +294,7 @@ public class DatabaseConfig { s_configurationComponents.put("snapshot.test.months.per.year", "SnapshotManager"); s_configurationComponents.put("hypervisor.type", "ManagementServer"); - + s_defaultConfigurationValues.put("host.stats.interval", "60000"); s_defaultConfigurationValues.put("storage.stats.interval", "60000"); //s_defaultConfigurationValues.put("volume.stats.interval", "-1"); @@ -336,7 +336,7 @@ public class DatabaseConfig { s_defaultConfigurationValues.put("cpu.overprovisioning.factor", "1"); s_defaultConfigurationValues.put("mem.overprovisioning.factor", "1"); } - + protected DatabaseConfig() { } @@ -346,20 +346,20 @@ public class DatabaseConfig { public static void main(String[] args) { System.setProperty("javax.xml.parsers.DocumentBuilderFactory", "com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl"); System.setProperty("javax.xml.parsers.SAXParserFactory", "com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl"); - + File file = PropertiesUtil.findConfigFile("log4j-cloud.xml"); if(file != null) { - System.out.println("Log4j configuration from : " + file.getAbsolutePath()); - DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); - } else { - System.out.println("Configure log4j with default properties"); - } - + System.out.println("Log4j configuration from : " + file.getAbsolutePath()); + DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); + } else { + System.out.println("Configure log4j with default properties"); + } + if (args.length < 1) { s_logger.error("error starting database config, missing initial data file"); } else { try { - DatabaseConfig config = LegacyComponentLocator.inject(DatabaseConfig.class, args[0]); + DatabaseConfig config = ComponentContext.inject(DatabaseConfig.class); config.doVersionCheck(); config.doConfig(); System.exit(0); @@ -374,65 +374,65 @@ public class DatabaseConfig { public DatabaseConfig(String configFileName) { _configFileName = configFileName; } - + private void doVersionCheck() { - try { - String warningMsg = "\nYou are using an outdated format for server-setup.xml. Please switch to the new format.\n"; - DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); - DocumentBuilder dbuilder = dbf.newDocumentBuilder(); - File configFile = new File(_configFileName); - Document d = dbuilder.parse(configFile); - NodeList nodeList = d.getElementsByTagName("version"); - - if (nodeList.getLength() == 0) { - System.out.println(warningMsg); - return; - } - - Node firstNode = nodeList.item(0); - String version = firstNode.getTextContent(); - - if (!version.equals("2.0")) { - System.out.println(warningMsg); - } - - } catch (ParserConfigurationException parserException) { - parserException.printStackTrace(); - } catch (IOException ioException) { - ioException.printStackTrace(); - } catch (SAXException saxException) { - saxException.printStackTrace(); - } + try { + String warningMsg = "\nYou are using an outdated format for server-setup.xml. Please switch to the new format.\n"; + DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); + DocumentBuilder dbuilder = dbf.newDocumentBuilder(); + File configFile = new File(_configFileName); + Document d = dbuilder.parse(configFile); + NodeList nodeList = d.getElementsByTagName("version"); + + if (nodeList.getLength() == 0) { + System.out.println(warningMsg); + return; + } + + Node firstNode = nodeList.item(0); + String version = firstNode.getTextContent(); + + if (!version.equals("2.0")) { + System.out.println(warningMsg); + } + + } catch (ParserConfigurationException parserException) { + parserException.printStackTrace(); + } catch (IOException ioException) { + ioException.printStackTrace(); + } catch (SAXException saxException) { + saxException.printStackTrace(); + } } @DB protected void doConfig() { Transaction txn = Transaction.currentTxn(); try { - + File configFile = new File(_configFileName); - + SAXParserFactory spfactory = SAXParserFactory.newInstance(); SAXParser saxParser = spfactory.newSAXParser(); DbConfigXMLHandler handler = new DbConfigXMLHandler(); handler.setParent(this); - + txn.start(); // Save user configured values for all fields saxParser.parse(configFile, handler); - + // Save default values for configuration fields saveVMTemplate(); saveRootDomain(); saveDefaultConfiguations(); - + txn.commit(); // Check pod CIDRs against each other, and against the guest ip network/netmask pzc.checkAllPodCidrSubnets(); - + } catch (Exception ex) { - System.out.print("ERROR IS"+ex); + System.out.print("ERROR IS"+ex); s_logger.error("error", ex); txn.rollback(); } @@ -448,7 +448,7 @@ public class DatabaseConfig { } else if ("physicalNetwork".equals(_currentObjectName)) { savePhysicalNetwork(); } else if ("vlan".equals(_currentObjectName)) { - saveVlan(); + saveVlan(); } else if ("pod".equals(_currentObjectName)) { savePod(); } else if ("serviceOffering".equals(_currentObjectName)) { @@ -460,9 +460,9 @@ public class DatabaseConfig { } else if ("configuration".equals(_currentObjectName)) { saveConfiguration(); } else if ("storagePool".equals(_currentObjectName)) { - saveStoragePool(); + saveStoragePool(); } else if ("secondaryStorage".equals(_currentObjectName)) { - saveSecondaryStorage(); + saveSecondaryStorage(); } else if ("cluster".equals(_currentObjectName)) { saveCluster(); } else if ("physicalNetworkServiceProvider".equals(_currentObjectName)) { @@ -472,88 +472,88 @@ public class DatabaseConfig { } _currentObjectParams = null; } - + @DB public void saveSecondaryStorage() { - long dataCenterId = Long.parseLong(_currentObjectParams.get("zoneId")); - String url = _currentObjectParams.get("url"); - String mountPoint; - try { - mountPoint = NfsUtils.url2Mount(url); - } catch (URISyntaxException e1) { - return; - } - String insertSql1 = "INSERT INTO `host` (`id`, `name`, `status` , `type` , `private_ip_address`, `private_netmask` ,`private_mac_address` , `storage_ip_address` ,`storage_netmask`, `storage_mac_address`, `data_center_id`, `version`, `dom0_memory`, `last_ping`, `resource`, `guid`, `hypervisor_type`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - String insertSqlHostDetails = "INSERT INTO `host_details` (`id`, `host_id`, `name`, `value`) VALUES(?,?,?,?)"; + long dataCenterId = Long.parseLong(_currentObjectParams.get("zoneId")); + String url = _currentObjectParams.get("url"); + String mountPoint; + try { + mountPoint = NfsUtils.url2Mount(url); + } catch (URISyntaxException e1) { + return; + } + String insertSql1 = "INSERT INTO `host` (`id`, `name`, `status` , `type` , `private_ip_address`, `private_netmask` ,`private_mac_address` , `storage_ip_address` ,`storage_netmask`, `storage_mac_address`, `data_center_id`, `version`, `dom0_memory`, `last_ping`, `resource`, `guid`, `hypervisor_type`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + String insertSqlHostDetails = "INSERT INTO `host_details` (`id`, `host_id`, `name`, `value`) VALUES(?,?,?,?)"; String insertSql2 = "INSERT INTO `op_host` (`id`, `sequence`) VALUES(?, ?)"; - Transaction txn = Transaction.currentTxn(); - try { - PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql1); - stmt.setLong(1, 0); - stmt.setString(2, url); - stmt.setString(3, "UP"); - stmt.setString(4, "SecondaryStorage"); - stmt.setString(5, "192.168.122.1"); - stmt.setString(6, "255.255.255.0"); - stmt.setString(7, "92:ff:f5:ad:23:e1"); - stmt.setString(8, "192.168.122.1"); - stmt.setString(9, "255.255.255.0"); - stmt.setString(10, "92:ff:f5:ad:23:e1"); - stmt.setLong(11, dataCenterId); - stmt.setString(12, "2.2.4"); - stmt.setLong(13, 0); - stmt.setLong(14, 1238425896); - - boolean nfs = false; - if (url.startsWith("nfs")) { - nfs = true; - } - if (nfs) { - stmt.setString(15, "com.cloud.storage.resource.NfsSecondaryStorageResource"); - } else { - stmt.setString(15, "com.cloud.storage.secondary.LocalSecondaryStorageResource"); - } - stmt.setString(16, url); - stmt.setString(17, "None"); - stmt.executeUpdate(); + Transaction txn = Transaction.currentTxn(); + try { + PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql1); + stmt.setLong(1, 0); + stmt.setString(2, url); + stmt.setString(3, "UP"); + stmt.setString(4, "SecondaryStorage"); + stmt.setString(5, "192.168.122.1"); + stmt.setString(6, "255.255.255.0"); + stmt.setString(7, "92:ff:f5:ad:23:e1"); + stmt.setString(8, "192.168.122.1"); + stmt.setString(9, "255.255.255.0"); + stmt.setString(10, "92:ff:f5:ad:23:e1"); + stmt.setLong(11, dataCenterId); + stmt.setString(12, "2.2.4"); + stmt.setLong(13, 0); + stmt.setLong(14, 1238425896); - stmt = txn.prepareAutoCloseStatement(insertSqlHostDetails); - stmt.setLong(1, 1); - stmt.setLong(2, 1); - stmt.setString(3, "mount.parent"); - if (nfs) { - stmt.setString(4, "/mnt"); - } else { + boolean nfs = false; + if (url.startsWith("nfs")) { + nfs = true; + } + if (nfs) { + stmt.setString(15, "com.cloud.storage.resource.NfsSecondaryStorageResource"); + } else { + stmt.setString(15, "com.cloud.storage.secondary.LocalSecondaryStorageResource"); + } + stmt.setString(16, url); + stmt.setString(17, "None"); + stmt.executeUpdate(); + + stmt = txn.prepareAutoCloseStatement(insertSqlHostDetails); + stmt.setLong(1, 1); + stmt.setLong(2, 1); + stmt.setString(3, "mount.parent"); + if (nfs) { + stmt.setString(4, "/mnt"); + } else { stmt.setString(4, "/"); } - stmt.executeUpdate(); + stmt.executeUpdate(); - stmt.setLong(1, 2); - stmt.setLong(2, 1); - stmt.setString(3, "mount.path"); - if (nfs) { - stmt.setString(4, mountPoint); - } else { + stmt.setLong(1, 2); + stmt.setLong(2, 1); + stmt.setString(3, "mount.path"); + if (nfs) { + stmt.setString(4, mountPoint); + } else { stmt.setString(4, url.replaceFirst("file:/", "")); } - stmt.executeUpdate(); + stmt.executeUpdate(); + + stmt.setLong(1, 3); + stmt.setLong(2, 1); + stmt.setString(3, "orig.url"); + stmt.setString(4, url); + stmt.executeUpdate(); - stmt.setLong(1, 3); - stmt.setLong(2, 1); - stmt.setString(3, "orig.url"); - stmt.setString(4, url); - stmt.executeUpdate(); - stmt = txn.prepareAutoCloseStatement(insertSql2); stmt.setLong(1, 1); stmt.setLong(2, 1); stmt.executeUpdate(); - } catch (SQLException ex) { - System.out.println("Error creating secondary storage: " + ex.getMessage()); - return; - } + } catch (SQLException ex) { + System.out.println("Error creating secondary storage: " + ex.getMessage()); + return; + } } - + @DB public void saveCluster() { String name = _currentObjectParams.get("name"); @@ -562,7 +562,7 @@ public class DatabaseConfig { long podId = Long.parseLong(_currentObjectParams.get("podId")); String hypervisor = _currentObjectParams.get("hypervisorType"); String insertSql1 = "INSERT INTO `cluster` (`id`, `name`, `data_center_id` , `pod_id`, `hypervisor_type` , `cluster_type`, `allocation_state`) VALUES (?,?,?,?,?,?,?)"; - + Transaction txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql1); @@ -583,54 +583,54 @@ public class DatabaseConfig { } - + @DB public void saveStoragePool() { - String name = _currentObjectParams.get("name"); - long id = Long.parseLong(_currentObjectParams.get("id")); - long dataCenterId = Long.parseLong(_currentObjectParams.get("zoneId")); - long podId = Long.parseLong(_currentObjectParams.get("podId")); - long clusterId = Long.parseLong(_currentObjectParams.get("clusterId")); - String hostAddress = _currentObjectParams.get("hostAddress"); - String hostPath = _currentObjectParams.get("hostPath"); - String storageType = _currentObjectParams.get("storageType"); - String uuid = UUID.nameUUIDFromBytes(new String(hostAddress+hostPath).getBytes()).toString(); - - String insertSql1 = "INSERT INTO `storage_pool` (`id`, `name`, `uuid` , `pool_type` , `port`, `data_center_id` ,`available_bytes` , `capacity_bytes` ,`host_address`, `path`, `created`, `pod_id`,`status` , `cluster_id`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - // String insertSql2 = "INSERT INTO `netfs_storage_pool` VALUES (?,?,?)"; - - Transaction txn = Transaction.currentTxn(); - try { - PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql1); - stmt.setLong(1, id); - stmt.setString(2, name); - stmt.setString(3, uuid); - if (storageType == null) { + String name = _currentObjectParams.get("name"); + long id = Long.parseLong(_currentObjectParams.get("id")); + long dataCenterId = Long.parseLong(_currentObjectParams.get("zoneId")); + long podId = Long.parseLong(_currentObjectParams.get("podId")); + long clusterId = Long.parseLong(_currentObjectParams.get("clusterId")); + String hostAddress = _currentObjectParams.get("hostAddress"); + String hostPath = _currentObjectParams.get("hostPath"); + String storageType = _currentObjectParams.get("storageType"); + String uuid = UUID.nameUUIDFromBytes(new String(hostAddress+hostPath).getBytes()).toString(); + + String insertSql1 = "INSERT INTO `storage_pool` (`id`, `name`, `uuid` , `pool_type` , `port`, `data_center_id` ,`available_bytes` , `capacity_bytes` ,`host_address`, `path`, `created`, `pod_id`,`status` , `cluster_id`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + // String insertSql2 = "INSERT INTO `netfs_storage_pool` VALUES (?,?,?)"; + + Transaction txn = Transaction.currentTxn(); + try { + PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql1); + stmt.setLong(1, id); + stmt.setString(2, name); + stmt.setString(3, uuid); + if (storageType == null) { stmt.setString(4, "NetworkFileSystem"); } else { stmt.setString(4, storageType); } - stmt.setLong(5, 111); - stmt.setLong(6, dataCenterId); - stmt.setLong(7,0); - stmt.setLong(8,0); - stmt.setString(9, hostAddress); - stmt.setString(10, hostPath); - stmt.setDate(11, new Date(new java.util.Date().getTime())); - stmt.setLong(12, podId); - stmt.setString(13, Status.Up.toString()); - stmt.setLong(14, clusterId); - stmt.executeUpdate(); + stmt.setLong(5, 111); + stmt.setLong(6, dataCenterId); + stmt.setLong(7,0); + stmt.setLong(8,0); + stmt.setString(9, hostAddress); + stmt.setString(10, hostPath); + stmt.setDate(11, new Date(new java.util.Date().getTime())); + stmt.setLong(12, podId); + stmt.setString(13, Status.Up.toString()); + stmt.setLong(14, clusterId); + stmt.executeUpdate(); - } catch (SQLException ex) { - System.out.println("Error creating storage pool: " + ex.getMessage()); - s_logger.error("error creating storage pool ", ex); - return; - } + } catch (SQLException ex) { + System.out.println("Error creating storage pool: " + ex.getMessage()); + s_logger.error("error creating storage pool ", ex); + return; + } - } + } - private void saveZone() { + private void saveZone() { long id = Long.parseLong(_currentObjectParams.get("id")); String name = _currentObjectParams.get("name"); //String description = _currentObjectParams.get("description"); @@ -641,7 +641,7 @@ public class DatabaseConfig { //String vnetRange = _currentObjectParams.get("vnet"); String guestNetworkCidr = _currentObjectParams.get("guestNetworkCidr"); String networkType = _currentObjectParams.get("networktype"); - + // Check that all IPs are valid String ipError = "Please enter a valid IP address for the field: "; if (!IPRangeConfig.validOrBlankIP(dns1)) { @@ -659,15 +659,15 @@ public class DatabaseConfig { if (!IPRangeConfig.validCIDR(guestNetworkCidr)) { printError("Please enter a valid value for guestNetworkCidr"); } - - pzc.saveZone(false, id, name, dns1, dns2, internalDns1, internalDns2, guestNetworkCidr, networkType); + + pzc.saveZone(false, id, name, dns1, dns2, internalDns1, internalDns2, guestNetworkCidr, networkType); } - + private void savePhysicalNetwork() { long id = Long.parseLong(_currentObjectParams.get("id")); String zoneId = _currentObjectParams.get("zoneId"); String vnetRange = _currentObjectParams.get("vnet"); - + int vnetStart = -1; int vnetEnd = -1; if (vnetRange != null) { @@ -677,16 +677,16 @@ public class DatabaseConfig { } long zoneDbId = Long.parseLong(zoneId); pzc.savePhysicalNetwork(false, id, zoneDbId, vnetStart, vnetEnd); - + } - + private void savePhysicalNetworkServiceProvider() { long id = Long.parseLong(_currentObjectParams.get("id")); long physicalNetworkId = Long.parseLong(_currentObjectParams.get("physicalNetworkId")); String providerName = _currentObjectParams.get("providerName"); long destPhysicalNetworkId = Long.parseLong(_currentObjectParams.get("destPhysicalNetworkId")); String uuid = UUID.randomUUID().toString(); - + int vpn = Integer.parseInt(_currentObjectParams.get("vpn")); int dhcp = Integer.parseInt(_currentObjectParams.get("dhcp")); int dns = Integer.parseInt(_currentObjectParams.get("dns")); @@ -698,12 +698,12 @@ public class DatabaseConfig { int pf =Integer.parseInt(_currentObjectParams.get("portForwarding")); int userData =Integer.parseInt(_currentObjectParams.get("userData")); int securityGroup =Integer.parseInt(_currentObjectParams.get("securityGroup")); - + String insertSql1 = "INSERT INTO `physical_network_service_providers` (`id`, `uuid`, `physical_network_id` , `provider_name`, `state` ," + - "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," + - "`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`," + - "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - + "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," + + "`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`," + + "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + Transaction txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql1); @@ -732,7 +732,7 @@ public class DatabaseConfig { } } - + private void saveVirtualRouterProvider() { long id = Long.parseLong(_currentObjectParams.get("id")); long nspId = Long.parseLong(_currentObjectParams.get("nspId")); @@ -740,7 +740,7 @@ public class DatabaseConfig { String type = _currentObjectParams.get("type"); String insertSql1 = "INSERT INTO `virtual_router_providers` (`id`, `nsp_id`, `uuid` , `type` , `enabled`) " + - "VALUES (?,?,?,?,?)"; + "VALUES (?,?,?,?,?)"; Transaction txn = Transaction.currentTxn(); try { @@ -758,18 +758,18 @@ public class DatabaseConfig { } } - + private void saveVlan() { - String zoneId = _currentObjectParams.get("zoneId"); - String physicalNetworkIdStr = _currentObjectParams.get("physicalNetworkId"); - String vlanId = _currentObjectParams.get("vlanId"); - String gateway = _currentObjectParams.get("gateway"); + String zoneId = _currentObjectParams.get("zoneId"); + String physicalNetworkIdStr = _currentObjectParams.get("physicalNetworkId"); + String vlanId = _currentObjectParams.get("vlanId"); + String gateway = _currentObjectParams.get("gateway"); String netmask = _currentObjectParams.get("netmask"); String publicIpRange = _currentObjectParams.get("ipAddressRange"); String vlanType = _currentObjectParams.get("vlanType"); String vlanPodName = _currentObjectParams.get("podName"); - - + + String ipError = "Please enter a valid IP address for the field: "; if (!IPRangeConfig.validOrBlankIP(gateway)) { printError(ipError + "gateway"); @@ -777,51 +777,51 @@ public class DatabaseConfig { if (!IPRangeConfig.validOrBlankIP(netmask)) { printError(ipError + "netmask"); } - + // Check that the given IP address range was valid - if (!checkIpAddressRange(publicIpRange)) { + if (!checkIpAddressRange(publicIpRange)) { printError("Please enter a valid public IP range."); } - - // Split the IP address range - String[] ipAddressRangeArray = publicIpRange.split("\\-"); - String startIP = ipAddressRangeArray[0]; - String endIP = null; - if (ipAddressRangeArray.length > 1) { + + // Split the IP address range + String[] ipAddressRangeArray = publicIpRange.split("\\-"); + String startIP = ipAddressRangeArray[0]; + String endIP = null; + if (ipAddressRangeArray.length > 1) { endIP = ipAddressRangeArray[1]; } - - // If a netmask was provided, check that the startIP, endIP, and gateway all belong to the same subnet - if (netmask != null && netmask != "") { - if (endIP != null) { - if (!IPRangeConfig.sameSubnet(startIP, endIP, netmask)) { + + // If a netmask was provided, check that the startIP, endIP, and gateway all belong to the same subnet + if (netmask != null && netmask != "") { + if (endIP != null) { + if (!IPRangeConfig.sameSubnet(startIP, endIP, netmask)) { printError("Start and end IPs for the public IP range must be in the same subnet, as per the provided netmask."); } - } - - if (gateway != null && gateway != "") { - if (!IPRangeConfig.sameSubnet(startIP, gateway, netmask)) { + } + + if (gateway != null && gateway != "") { + if (!IPRangeConfig.sameSubnet(startIP, gateway, netmask)) { printError("The start IP for the public IP range must be in the same subnet as the gateway, as per the provided netmask."); } - if (endIP != null) { - if (!IPRangeConfig.sameSubnet(endIP, gateway, netmask)) { + if (endIP != null) { + if (!IPRangeConfig.sameSubnet(endIP, gateway, netmask)) { printError("The end IP for the public IP range must be in the same subnet as the gateway, as per the provided netmask."); } - } - } - } - - long zoneDbId = Long.parseLong(zoneId); - String zoneName = PodZoneConfig.getZoneName(zoneDbId); - - long physicalNetworkId = Long.parseLong(physicalNetworkIdStr); - - //Set networkId to be 0, the value will be updated after management server starts up - pzc.modifyVlan(zoneName, true, vlanId, gateway, netmask, vlanPodName, vlanType, publicIpRange, 0, physicalNetworkId); - - long vlanDbId = pzc.getVlanDbId(zoneName, vlanId); - iprc.saveIPRange("public", -1, zoneDbId, vlanDbId, startIP, endIP, null, physicalNetworkId); - + } + } + } + + long zoneDbId = Long.parseLong(zoneId); + String zoneName = PodZoneConfig.getZoneName(zoneDbId); + + long physicalNetworkId = Long.parseLong(physicalNetworkIdStr); + + //Set networkId to be 0, the value will be updated after management server starts up + pzc.modifyVlan(zoneName, true, vlanId, gateway, netmask, vlanPodName, vlanType, publicIpRange, 0, physicalNetworkId); + + long vlanDbId = pzc.getVlanDbId(zoneName, vlanId); + iprc.saveIPRange("public", -1, zoneDbId, vlanDbId, startIP, endIP, null, physicalNetworkId); + } private void savePod() { @@ -835,7 +835,7 @@ public class DatabaseConfig { String startIP = null; String endIP = null; String vlanRange = _currentObjectParams.get("vnet"); - + int vlanStart = -1; int vlanEnd = -1; if (vlanRange != null) { @@ -843,51 +843,51 @@ public class DatabaseConfig { vlanStart = Integer.parseInt(tokens[0]); vlanEnd = Integer.parseInt(tokens[1]); } - + // Get the individual cidrAddress and cidrSize values - String[] cidrPair = cidr.split("\\/"); - String cidrAddress = cidrPair[0]; - String cidrSize = cidrPair[1]; + String[] cidrPair = cidr.split("\\/"); + String cidrAddress = cidrPair[0]; + String cidrSize = cidrPair[1]; long cidrSizeNum = Long.parseLong(cidrSize); - + // Check that the gateway is in the same subnet as the CIDR - if (!IPRangeConfig.sameSubnetCIDR(gateway, cidrAddress, cidrSizeNum)) { - printError("For pod " + name + " in zone " + zoneName + " , please ensure that your gateway is in the same subnet as the pod's CIDR address."); - } - + if (!IPRangeConfig.sameSubnetCIDR(gateway, cidrAddress, cidrSizeNum)) { + printError("For pod " + name + " in zone " + zoneName + " , please ensure that your gateway is in the same subnet as the pod's CIDR address."); + } + pzc.savePod(false, id, name, dataCenterId, gateway, cidr, vlanStart, vlanEnd); - - if (privateIpRange != null) { - // Check that the given IP address range was valid - if (!checkIpAddressRange(privateIpRange)) { + + if (privateIpRange != null) { + // Check that the given IP address range was valid + if (!checkIpAddressRange(privateIpRange)) { printError("Please enter a valid private IP range."); } - - String[] ipAddressRangeArray = privateIpRange.split("\\-"); - startIP = ipAddressRangeArray[0]; - endIP = null; - if (ipAddressRangeArray.length > 1) { + + String[] ipAddressRangeArray = privateIpRange.split("\\-"); + startIP = ipAddressRangeArray[0]; + endIP = null; + if (ipAddressRangeArray.length > 1) { endIP = ipAddressRangeArray[1]; } - } - - // Check that the start IP and end IP match up with the CIDR - if (!IPRangeConfig.sameSubnetCIDR(startIP, endIP, cidrSizeNum)) { - printError("For pod " + name + " in zone " + zoneName + ", please ensure that your start IP and end IP are in the same subnet, as per the pod's CIDR size."); - } - - if (!IPRangeConfig.sameSubnetCIDR(startIP, cidrAddress, cidrSizeNum)) { - printError("For pod " + name + " in zone " + zoneName + ", please ensure that your start IP is in the same subnet as the pod's CIDR address."); - } - - if (!IPRangeConfig.sameSubnetCIDR(endIP, cidrAddress, cidrSizeNum)) { - printError("For pod " + name + " in zone " + zoneName + ", please ensure that your end IP is in the same subnet as the pod's CIDR address."); - } - - if (privateIpRange != null) { - // Save the IP address range - iprc.saveIPRange("private", id, dataCenterId, -1, startIP, endIP, null, -1); - } + } + + // Check that the start IP and end IP match up with the CIDR + if (!IPRangeConfig.sameSubnetCIDR(startIP, endIP, cidrSizeNum)) { + printError("For pod " + name + " in zone " + zoneName + ", please ensure that your start IP and end IP are in the same subnet, as per the pod's CIDR size."); + } + + if (!IPRangeConfig.sameSubnetCIDR(startIP, cidrAddress, cidrSizeNum)) { + printError("For pod " + name + " in zone " + zoneName + ", please ensure that your start IP is in the same subnet as the pod's CIDR address."); + } + + if (!IPRangeConfig.sameSubnetCIDR(endIP, cidrAddress, cidrSizeNum)) { + printError("For pod " + name + " in zone " + zoneName + ", please ensure that your end IP is in the same subnet as the pod's CIDR address."); + } + + if (privateIpRange != null) { + // Save the IP address range + iprc.saveIPRange("private", id, dataCenterId, -1, startIP, endIP, null, -1); + } } @@ -900,30 +900,30 @@ public class DatabaseConfig { int ramSize = Integer.parseInt(_currentObjectParams.get("ramSize")); int speed = Integer.parseInt(_currentObjectParams.get("speed")); String useLocalStorageValue = _currentObjectParams.get("useLocalStorage"); - + // int nwRate = Integer.parseInt(_currentObjectParams.get("nwRate")); // int mcRate = Integer.parseInt(_currentObjectParams.get("mcRate")); boolean ha = Boolean.parseBoolean(_currentObjectParams.get("enableHA")); boolean mirroring = Boolean.parseBoolean(_currentObjectParams.get("mirrored")); - + boolean useLocalStorage; if (useLocalStorageValue != null) { - if (Boolean.parseBoolean(useLocalStorageValue)) { - useLocalStorage = true; - } else { - useLocalStorage = false; - } + if (Boolean.parseBoolean(useLocalStorageValue)) { + useLocalStorage = true; + } else { + useLocalStorage = false; + } } else { - useLocalStorage = false; + useLocalStorage = false; } - + ServiceOfferingVO serviceOffering = new ServiceOfferingVO(name, cpu, ramSize, speed, null, null, ha, displayText, useLocalStorage, false, null, false, null, false); - ServiceOfferingDaoImpl dao = LegacyComponentLocator.inject(ServiceOfferingDaoImpl.class); + ServiceOfferingDaoImpl dao = ComponentContext.inject(ServiceOfferingDaoImpl.class); try { dao.persist(serviceOffering); } catch (Exception e) { s_logger.error("error creating service offering", e); - + } /* String insertSql = "INSERT INTO `cloud`.`service_offering` (id, name, cpu, ram_size, speed, nw_rate, mc_rate, created, ha_enabled, mirrored, display_text, guest_ip_type, use_local_storage) " + @@ -937,9 +937,9 @@ public class DatabaseConfig { s_logger.error("error creating service offering", ex); return; } - */ + */ } - + @DB protected void saveDiskOffering() { long id = Long.parseLong(_currentObjectParams.get("id")); @@ -953,9 +953,9 @@ public class DatabaseConfig { String useLocal = _currentObjectParams.get("useLocal"); boolean local = false; if (useLocal != null) { - local = Boolean.parseBoolean(useLocal); + local = Boolean.parseBoolean(useLocal); } - + if (tags != null && tags.length() > 0) { String[] tokens = tags.split(","); StringBuilder newTags = new StringBuilder(); @@ -967,12 +967,12 @@ public class DatabaseConfig { } DiskOfferingVO diskOffering = new DiskOfferingVO(domainId, name, displayText, diskSpace , tags, false); diskOffering.setUseLocalStorage(local); - DiskOfferingDaoImpl offering = LegacyComponentLocator.inject(DiskOfferingDaoImpl.class); + DiskOfferingDaoImpl offering = ComponentContext.inject(DiskOfferingDaoImpl.class); try { offering.persist(diskOffering); } catch (Exception e) { s_logger.error("error creating disk offering", e); - + } /* String insertSql = "INSERT INTO `cloud`.`disk_offering` (id, domain_id, name, display_text, disk_size, mirrored, tags) " + @@ -987,37 +987,37 @@ public class DatabaseConfig { s_logger.error("error creating disk offering", ex); return; } - */ + */ } - + @DB protected void saveThrottlingRates() { - boolean saveNetworkThrottlingRate = (_networkThrottlingRate != null); - boolean saveMulticastThrottlingRate = (_multicastThrottlingRate != null); - - if (!saveNetworkThrottlingRate && !saveMulticastThrottlingRate) { + boolean saveNetworkThrottlingRate = (_networkThrottlingRate != null); + boolean saveMulticastThrottlingRate = (_multicastThrottlingRate != null); + + if (!saveNetworkThrottlingRate && !saveMulticastThrottlingRate) { return; } - - String insertNWRateSql = "UPDATE `cloud`.`service_offering` SET `nw_rate` = ?"; - String insertMCRateSql = "UPDATE `cloud`.`service_offering` SET `mc_rate` = ?"; - + + String insertNWRateSql = "UPDATE `cloud`.`service_offering` SET `nw_rate` = ?"; + String insertMCRateSql = "UPDATE `cloud`.`service_offering` SET `mc_rate` = ?"; + Transaction txn = Transaction.currentTxn(); - try { + try { PreparedStatement stmt; - + if (saveNetworkThrottlingRate) { - stmt = txn.prepareAutoCloseStatement(insertNWRateSql); - stmt.setString(1, _networkThrottlingRate); - stmt.executeUpdate(); + stmt = txn.prepareAutoCloseStatement(insertNWRateSql); + stmt.setString(1, _networkThrottlingRate); + stmt.executeUpdate(); } - + if (saveMulticastThrottlingRate) { - stmt = txn.prepareAutoCloseStatement(insertMCRateSql); - stmt.setString(1, _multicastThrottlingRate); - stmt.executeUpdate(); + stmt = txn.prepareAutoCloseStatement(insertMCRateSql); + stmt.setString(1, _multicastThrottlingRate); + stmt.executeUpdate(); } - + } catch (SQLException ex) { s_logger.error("error saving network and multicast throttling rates to all service offerings", ex); return; @@ -1026,7 +1026,7 @@ public class DatabaseConfig { // no configurable values for VM Template, hard-code the defaults for now private void saveVMTemplate() { - /* + /* long id = 1; String uniqueName = "routing"; String name = "DomR Template"; @@ -1051,8 +1051,8 @@ public class DatabaseConfig { } finally { txn.close(); } - */ -/* + */ + /* // do it again for console proxy template id = 2; uniqueName = "consoleproxy"; @@ -1060,7 +1060,7 @@ public class DatabaseConfig { isPublic = 0; path = "template/private/u000000/os/consoleproxy"; type = "ext3"; - + insertSql = "INSERT INTO `cloud`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, ready) " + "VALUES (" + id + ",'" + uniqueName + "','" + name + "'," + isPublic + ",'" + path + "',now(),'" + type + "'," + requiresHvm + "," + bits + "," + createdByUserId + "," + isReady + ")"; @@ -1074,7 +1074,7 @@ public class DatabaseConfig { } finally { txn.close(); } -*/ + */ } @DB @@ -1091,27 +1091,27 @@ public class DatabaseConfig { // insert system user insertSql = "INSERT INTO `cloud`.`user` (id, username, password, account_id, firstname, lastname, created)" + - " VALUES (1, 'system', RAND(), 1, 'system', 'cloud', now())"; - txn = Transaction.currentTxn(); - try { - PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); - stmt.executeUpdate(); - } catch (SQLException ex) { - s_logger.error("error creating system user", ex); - } - - // insert admin user + " VALUES (1, 'system', RAND(), 1, 'system', 'cloud', now())"; + txn = Transaction.currentTxn(); + try { + PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); + stmt.executeUpdate(); + } catch (SQLException ex) { + s_logger.error("error creating system user", ex); + } + + // insert admin user long id = Long.parseLong(_currentObjectParams.get("id")); String username = _currentObjectParams.get("username"); String firstname = _currentObjectParams.get("firstname"); String lastname = _currentObjectParams.get("lastname"); String password = _currentObjectParams.get("password"); String email = _currentObjectParams.get("email"); - + if (email == null || email.equals("")) { printError("An email address for each user is required."); } - + MessageDigest md5 = null; try { md5 = MessageDigest.getInstance("MD5"); @@ -1158,45 +1158,45 @@ public class DatabaseConfig { saveConfiguration(name, value, null); } } - + private void saveConfiguration() { String name = _currentObjectParams.get("name"); String value = _currentObjectParams.get("value"); String category = _currentObjectParams.get("category"); saveConfiguration(name, value, category); } - + @DB protected void saveConfiguration(String name, String value, String category) { String instance = "DEFAULT"; String description = s_configurationDescriptions.get(name); String component = s_configurationComponents.get(name); if (category == null) { - category = "Advanced"; + category = "Advanced"; } - + String instanceNameError = "Please enter a non-blank value for the field: "; if (name.equals("instance.name")) { - if (value == null || value.isEmpty() || !value.matches("^[A-Za-z0-9]{1,8}$")) { + if (value == null || value.isEmpty() || !value.matches("^[A-Za-z0-9]{1,8}$")) { printError(instanceNameError + "configuration: instance.name can not be empty and can only contain numbers and alphabets up to 8 characters long"); } } - + if (name.equals("network.throttling.rate")) { - if (value != null && !value.isEmpty()) { + if (value != null && !value.isEmpty()) { _networkThrottlingRate = value; } } - + if (name.equals("multicast.throttling.rate")) { - if (value != null && !value.isEmpty()) { + if (value != null && !value.isEmpty()) { _multicastThrottlingRate = value; } } String insertSql = "INSERT INTO `cloud`.`configuration` (instance, component, name, value, description, category) " + - "VALUES ('" + instance + "','" + component + "','" + name + "','" + value + "','" + description + "','" + category + "')"; - + "VALUES ('" + instance + "','" + component + "','" + name + "','" + value + "','" + description + "','" + category + "')"; + String selectSql = "SELECT name FROM cloud.configuration WHERE name = '" + name + "'"; Transaction txn = Transaction.currentTxn(); @@ -1205,38 +1205,38 @@ public class DatabaseConfig { ResultSet result = stmt.executeQuery(); Boolean hasRow = result.next(); if (!hasRow) { - stmt = txn.prepareAutoCloseStatement(insertSql); - stmt.executeUpdate(); + stmt = txn.prepareAutoCloseStatement(insertSql); + stmt.executeUpdate(); } } catch (SQLException ex) { s_logger.error("error creating configuration", ex); } } - + private boolean checkIpAddressRange(String ipAddressRange) { - String[] ipAddressRangeArray = ipAddressRange.split("\\-"); - String startIP = ipAddressRangeArray[0]; - String endIP = null; - if (ipAddressRangeArray.length > 1) { + String[] ipAddressRangeArray = ipAddressRange.split("\\-"); + String startIP = ipAddressRangeArray[0]; + String endIP = null; + if (ipAddressRangeArray.length > 1) { endIP = ipAddressRangeArray[1]; } - - if (!IPRangeConfig.validIP(startIP)) { - s_logger.error("The private IP address: " + startIP + " is invalid."); - return false; - } - - if (!IPRangeConfig.validOrBlankIP(endIP)) { - s_logger.error("The private IP address: " + endIP + " is invalid."); - return false; - } - - if (!IPRangeConfig.validIPRange(startIP, endIP)) { - s_logger.error("The IP range " + startIP + " -> " + endIP + " is invalid."); - return false; - } - - return true; + + if (!IPRangeConfig.validIP(startIP)) { + s_logger.error("The private IP address: " + startIP + " is invalid."); + return false; + } + + if (!IPRangeConfig.validOrBlankIP(endIP)) { + s_logger.error("The private IP address: " + endIP + " is invalid."); + return false; + } + + if (!IPRangeConfig.validIPRange(startIP, endIP)) { + s_logger.error("The IP range " + startIP + " -> " + endIP + " is invalid."); + return false; + } + + return true; } @DB @@ -1249,7 +1249,7 @@ public class DatabaseConfig { } catch (SQLException ex) { s_logger.error("error creating ROOT domain", ex); } - + /* String updateSql = "update account set domain_id = 1 where id = 2"; Transaction txn = Transaction.currentTxn(); @@ -1272,7 +1272,7 @@ public class DatabaseConfig { } finally { txn.close(); } - */ + */ } class DbConfigXMLHandler extends DefaultHandler { @@ -1295,14 +1295,14 @@ public class DatabaseConfig { @Override public void startElement(String s, String s1, String s2, Attributes attributes) throws SAXException { - if ("object".equals(s2)) { - _parent.setCurrentObjectName(convertName(attributes.getValue("name"))); - } else if ("field".equals(s2)) { - if (_currentObjectParams == null) { - _currentObjectParams = new HashMap(); - } - _currentFieldName = convertName(attributes.getValue("name")); - } else if (DatabaseConfig.objectNames.contains(s2)) { + if ("object".equals(s2)) { + _parent.setCurrentObjectName(convertName(attributes.getValue("name"))); + } else if ("field".equals(s2)) { + if (_currentObjectParams == null) { + _currentObjectParams = new HashMap(); + } + _currentFieldName = convertName(attributes.getValue("name")); + } else if (DatabaseConfig.objectNames.contains(s2)) { _parent.setCurrentObjectName(s2); } else if (DatabaseConfig.fieldNames.contains(s2)) { if (_currentObjectParams == null) { @@ -1313,95 +1313,95 @@ public class DatabaseConfig { } @Override - public void characters(char[] ch, int start, int length) throws SAXException { + public void characters(char[] ch, int start, int length) throws SAXException { if ((_currentObjectParams != null) && (_currentFieldName != null)) { String currentFieldVal = new String(ch, start, length); _currentObjectParams.put(_currentFieldName, currentFieldVal); } } - + private String convertName(String name) { - if (name.contains(".")){ - String[] nameArray = name.split("\\."); - for (int i = 1; i < nameArray.length; i++) { - String word = nameArray[i]; - nameArray[i] = word.substring(0, 1).toUpperCase() + word.substring(1).toLowerCase(); - } - name = ""; - for (int i = 0; i < nameArray.length; i++) { - name = name.concat(nameArray[i]); - } - } - return name; - } + if (name.contains(".")){ + String[] nameArray = name.split("\\."); + for (int i = 1; i < nameArray.length; i++) { + String word = nameArray[i]; + nameArray[i] = word.substring(0, 1).toUpperCase() + word.substring(1).toLowerCase(); + } + name = ""; + for (int i = 0; i < nameArray.length; i++) { + name = name.concat(nameArray[i]); + } + } + return name; + } } - + public static List genReturnList(String success, String message) { - List returnList = new ArrayList(2); - returnList.add(0, success); - returnList.add(1, message); - return returnList; - } - + List returnList = new ArrayList(2); + returnList.add(0, success); + returnList.add(1, message); + return returnList; + } + public static void printError(String message) { - System.out.println(message); - System.exit(1); + System.out.println(message); + System.exit(1); } public static String getDatabaseValueString(String selectSql, String name, String errorMsg) { - Transaction txn = Transaction.open("getDatabaseValueString"); - PreparedStatement stmt = null; - - try { - stmt = txn.prepareAutoCloseStatement(selectSql); - ResultSet rs = stmt.executeQuery(); - if (rs.next()) { - String value = rs.getString(name); - return value; - } else { + Transaction txn = Transaction.open("getDatabaseValueString"); + PreparedStatement stmt = null; + + try { + stmt = txn.prepareAutoCloseStatement(selectSql); + ResultSet rs = stmt.executeQuery(); + if (rs.next()) { + String value = rs.getString(name); + return value; + } else { return null; } - } catch (SQLException e) { - System.out.println("Exception: " + e.getMessage()); - printError(errorMsg); - } finally { - txn.close(); - } - return null; - } - - public static long getDatabaseValueLong(String selectSql, String name, String errorMsg) { - Transaction txn = Transaction.open("getDatabaseValueLong"); - PreparedStatement stmt = null; - - try { - stmt = txn.prepareAutoCloseStatement(selectSql); - ResultSet rs = stmt.executeQuery(); - if (rs.next()) { - return rs.getLong(name); - } else { - return -1; - } - } catch (SQLException e) { - System.out.println("Exception: " + e.getMessage()); - printError(errorMsg); - } finally { - txn.close(); - } - return -1; - } - - public static void saveSQL(String sql, String errorMsg) { - Transaction txn = Transaction.open("saveSQL"); - try { - PreparedStatement stmt = txn.prepareAutoCloseStatement(sql); - stmt.executeUpdate(); - } catch (SQLException ex) { - System.out.println("SQL Exception: " + ex.getMessage()); + } catch (SQLException e) { + System.out.println("Exception: " + e.getMessage()); printError(errorMsg); } finally { txn.close(); } - } - + return null; + } + + public static long getDatabaseValueLong(String selectSql, String name, String errorMsg) { + Transaction txn = Transaction.open("getDatabaseValueLong"); + PreparedStatement stmt = null; + + try { + stmt = txn.prepareAutoCloseStatement(selectSql); + ResultSet rs = stmt.executeQuery(); + if (rs.next()) { + return rs.getLong(name); + } else { + return -1; + } + } catch (SQLException e) { + System.out.println("Exception: " + e.getMessage()); + printError(errorMsg); + } finally { + txn.close(); + } + return -1; + } + + public static void saveSQL(String sql, String errorMsg) { + Transaction txn = Transaction.open("saveSQL"); + try { + PreparedStatement stmt = txn.prepareAutoCloseStatement(sql); + stmt.executeUpdate(); + } catch (SQLException ex) { + System.out.println("SQL Exception: " + ex.getMessage()); + printError(errorMsg); + } finally { + txn.close(); + } + } + } diff --git a/server/src/com/cloud/test/IPRangeConfig.java b/server/src/com/cloud/test/IPRangeConfig.java index c8bc76c3163..4b884f8c4b2 100755 --- a/server/src/com/cloud/test/IPRangeConfig.java +++ b/server/src/com/cloud/test/IPRangeConfig.java @@ -26,482 +26,482 @@ import java.util.List; import java.util.UUID; import java.util.Vector; -import com.cloud.utils.component.ComponentLocator; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.net.NetUtils; public class IPRangeConfig { - - public static void main(String[] args) { - IPRangeConfig config = ComponentLocator.inject(IPRangeConfig.class); - config.run(args); - System.exit(0); + + public static void main(String[] args) { + IPRangeConfig config = ComponentContext.inject(IPRangeConfig.class); + config.run(args); + System.exit(0); } - - private String usage() { - return "Usage: ./change_ip_range.sh [add|delete] [public zone | private pod zone] startIP endIP"; - } - - - public void run(String[] args) { - if (args.length < 2) { + + private String usage() { + return "Usage: ./change_ip_range.sh [add|delete] [public zone | private pod zone] startIP endIP"; + } + + + public void run(String[] args) { + if (args.length < 2) { printError(usage()); } - - String op = args[0]; - String type = args[1]; - - if (type.equals("public")) { - if (args.length != 4 && args.length != 5) { + + String op = args[0]; + String type = args[1]; + + if (type.equals("public")) { + if (args.length != 4 && args.length != 5) { printError(usage()); } - String zone = args[2]; - String startIP = args[3]; - String endIP = null; - if (args.length == 5) { + String zone = args[2]; + String startIP = args[3]; + String endIP = null; + if (args.length == 5) { endIP = args[4]; } - - String result = checkErrors(type, op, null, zone, startIP, endIP); - if (!result.equals("success")) { + + String result = checkErrors(type, op, null, zone, startIP, endIP); + if (!result.equals("success")) { printError(result); } - - long zoneId = PodZoneConfig.getZoneId(zone); - result = changeRange(op, "public", -1, zoneId, startIP, endIP, null, -1); - result.replaceAll("
", "/n"); - System.out.println(result); - } else if (type.equals("private")) { - if (args.length != 5 && args.length != 6) { + + long zoneId = PodZoneConfig.getZoneId(zone); + result = changeRange(op, "public", -1, zoneId, startIP, endIP, null, -1); + result.replaceAll("
", "/n"); + System.out.println(result); + } else if (type.equals("private")) { + if (args.length != 5 && args.length != 6) { printError(usage()); } - String pod = args[2]; - String zone = args[3];; - String startIP = args[4]; - String endIP = null; - if (args.length == 6) { + String pod = args[2]; + String zone = args[3];; + String startIP = args[4]; + String endIP = null; + if (args.length == 6) { endIP = args[5]; } - - String result = checkErrors(type, op, pod, zone, startIP, endIP); - if (!result.equals("success")) { + + String result = checkErrors(type, op, pod, zone, startIP, endIP); + if (!result.equals("success")) { printError(result); } - - long podId = PodZoneConfig.getPodId(pod, zone); - long zoneId = PodZoneConfig.getZoneId(zone); - result = changeRange(op, "private", podId, zoneId, startIP, endIP, null, -1); - result.replaceAll("
", "/n"); - System.out.println(result); - } else { - printError(usage()); - } - } - - public List changePublicIPRangeGUI(String op, String zone, String startIP, String endIP, long physicalNetworkId) { - String result = checkErrors("public", op, null, zone, startIP, endIP); - if (!result.equals("success")) { - return DatabaseConfig.genReturnList("false", result); - } - - long zoneId = PodZoneConfig.getZoneId(zone); - result = changeRange(op, "public", -1, zoneId, startIP, endIP, null, physicalNetworkId); - - return DatabaseConfig.genReturnList("true", result); - } - - public List changePrivateIPRangeGUI(String op, String pod, String zone, String startIP, String endIP) { - String result = checkErrors("private", op, pod, zone, startIP, endIP); - if (!result.equals("success")) { - return DatabaseConfig.genReturnList("false", result); - } - - long podId = PodZoneConfig.getPodId(pod, zone); - long zoneId = PodZoneConfig.getZoneId(zone); - result = changeRange(op, "private", podId, zoneId, startIP, endIP, null, -1); - - return DatabaseConfig.genReturnList("true", result); - } - private String checkErrors(String type, String op, String pod, String zone, String startIP, String endIP) { - if (!op.equals("add") && !op.equals("delete")) { + long podId = PodZoneConfig.getPodId(pod, zone); + long zoneId = PodZoneConfig.getZoneId(zone); + result = changeRange(op, "private", podId, zoneId, startIP, endIP, null, -1); + result.replaceAll("
", "/n"); + System.out.println(result); + } else { + printError(usage()); + } + } + + public List changePublicIPRangeGUI(String op, String zone, String startIP, String endIP, long physicalNetworkId) { + String result = checkErrors("public", op, null, zone, startIP, endIP); + if (!result.equals("success")) { + return DatabaseConfig.genReturnList("false", result); + } + + long zoneId = PodZoneConfig.getZoneId(zone); + result = changeRange(op, "public", -1, zoneId, startIP, endIP, null, physicalNetworkId); + + return DatabaseConfig.genReturnList("true", result); + } + + public List changePrivateIPRangeGUI(String op, String pod, String zone, String startIP, String endIP) { + String result = checkErrors("private", op, pod, zone, startIP, endIP); + if (!result.equals("success")) { + return DatabaseConfig.genReturnList("false", result); + } + + long podId = PodZoneConfig.getPodId(pod, zone); + long zoneId = PodZoneConfig.getZoneId(zone); + result = changeRange(op, "private", podId, zoneId, startIP, endIP, null, -1); + + return DatabaseConfig.genReturnList("true", result); + } + + private String checkErrors(String type, String op, String pod, String zone, String startIP, String endIP) { + if (!op.equals("add") && !op.equals("delete")) { return usage(); } - - if (type.equals("public")) { - // Check that the zone is valid - if (!PodZoneConfig.validZone(zone)) { + + if (type.equals("public")) { + // Check that the zone is valid + if (!PodZoneConfig.validZone(zone)) { return "Please specify a valid zone."; } - } else if (type.equals("private")) { - // Check that the pod and zone are valid - if (!PodZoneConfig.validZone(zone)) { + } else if (type.equals("private")) { + // Check that the pod and zone are valid + if (!PodZoneConfig.validZone(zone)) { return "Please specify a valid zone."; } - if (!PodZoneConfig.validPod(pod, zone)) { + if (!PodZoneConfig.validPod(pod, zone)) { return "Please specify a valid pod."; } - } - - if (!validIP(startIP)) { + } + + if (!validIP(startIP)) { return "Please specify a valid start IP"; } - - if (!validOrBlankIP(endIP)) { + + if (!validOrBlankIP(endIP)) { return "Please specify a valid end IP"; } - - // Check that the IPs that are being added are compatible with either the zone's public netmask, or the pod's CIDR - if (type.equals("public")) { - // String publicNetmask = getPublicNetmask(zone); - // String publicGateway = getPublicGateway(zone); - - // if (publicNetmask == null) return "Please ensure that your zone's public net mask is specified"; - // if (!sameSubnet(startIP, endIP, publicNetmask)) return "Please ensure that your start IP and end IP are in the same subnet, as per the zone's netmask."; - // if (!sameSubnet(startIP, publicGateway, publicNetmask)) return "Please ensure that your start IP is in the same subnet as your zone's gateway, as per the zone's netmask."; - // if (!sameSubnet(endIP, publicGateway, publicNetmask)) return "Please ensure that your end IP is in the same subnet as your zone's gateway, as per the zone's netmask."; - } else if (type.equals("private")) { - String cidrAddress = getCidrAddress(pod, zone); - long cidrSize = getCidrSize(pod, zone); - if (!sameSubnetCIDR(startIP, endIP, cidrSize)) { + // Check that the IPs that are being added are compatible with either the zone's public netmask, or the pod's CIDR + if (type.equals("public")) { + // String publicNetmask = getPublicNetmask(zone); + // String publicGateway = getPublicGateway(zone); + + // if (publicNetmask == null) return "Please ensure that your zone's public net mask is specified"; + // if (!sameSubnet(startIP, endIP, publicNetmask)) return "Please ensure that your start IP and end IP are in the same subnet, as per the zone's netmask."; + // if (!sameSubnet(startIP, publicGateway, publicNetmask)) return "Please ensure that your start IP is in the same subnet as your zone's gateway, as per the zone's netmask."; + // if (!sameSubnet(endIP, publicGateway, publicNetmask)) return "Please ensure that your end IP is in the same subnet as your zone's gateway, as per the zone's netmask."; + } else if (type.equals("private")) { + String cidrAddress = getCidrAddress(pod, zone); + long cidrSize = getCidrSize(pod, zone); + + if (!sameSubnetCIDR(startIP, endIP, cidrSize)) { return "Please ensure that your start IP and end IP are in the same subnet, as per the pod's CIDR size."; } - if (!sameSubnetCIDR(startIP, cidrAddress, cidrSize)) { + if (!sameSubnetCIDR(startIP, cidrAddress, cidrSize)) { return "Please ensure that your start IP is in the same subnet as the pod's CIDR address."; } - if (!sameSubnetCIDR(endIP, cidrAddress, cidrSize)) { + if (!sameSubnetCIDR(endIP, cidrAddress, cidrSize)) { return "Please ensure that your end IP is in the same subnet as the pod's CIDR address."; } - } - - if (!validIPRange(startIP, endIP)) { + } + + if (!validIPRange(startIP, endIP)) { return "Please specify a valid IP range."; } - - return "success"; - } - - private String genChangeRangeSuccessString(List problemIPs, String op) { - if (problemIPs == null) { + + return "success"; + } + + private String genChangeRangeSuccessString(List problemIPs, String op) { + if (problemIPs == null) { return ""; } - - if (problemIPs.size() == 0) { - if (op.equals("add")) { + + if (problemIPs.size() == 0) { + if (op.equals("add")) { return "Successfully added all IPs in the specified range."; } else if (op.equals("delete")) { return "Successfully deleted all IPs in the specified range."; } else { return ""; } - } else { - String successString = ""; - if (op.equals("add")) { + } else { + String successString = ""; + if (op.equals("add")) { successString += "Failed to add the following IPs, because they are already in the database:

"; } else if (op.equals("delete")) { successString += "Failed to delete the following IPs, because they are in use:

"; } - - for (int i = 0; i < problemIPs.size(); i++) { - successString += problemIPs.get(i); - if (i != (problemIPs.size() - 1)) { + + for (int i = 0; i < problemIPs.size(); i++) { + successString += problemIPs.get(i); + if (i != (problemIPs.size() - 1)) { successString += ", "; } - } - - successString += "

"; - - if (op.equals("add")) { + } + + successString += "

"; + + if (op.equals("add")) { successString += "Successfully added all other IPs in the specified range."; } else if (op.equals("delete")) { successString += "Successfully deleted all other IPs in the specified range."; } - - return successString; - } - } - - private String changeRange(String op, String type, long podId, long zoneId, String startIP, String endIP, Long networkId, long physicalNetworkId) { - - // Go through all the IPs and add or delete them - List problemIPs = null; - if (op.equals("add")) { - problemIPs = saveIPRange(type, podId, zoneId, 1, startIP, endIP, networkId, physicalNetworkId); - } else if (op.equals("delete")) { - problemIPs = deleteIPRange(type, podId, zoneId, 1, startIP, endIP); - } - - if (problemIPs == null) { + + return successString; + } + } + + private String changeRange(String op, String type, long podId, long zoneId, String startIP, String endIP, Long networkId, long physicalNetworkId) { + + // Go through all the IPs and add or delete them + List problemIPs = null; + if (op.equals("add")) { + problemIPs = saveIPRange(type, podId, zoneId, 1, startIP, endIP, networkId, physicalNetworkId); + } else if (op.equals("delete")) { + problemIPs = deleteIPRange(type, podId, zoneId, 1, startIP, endIP); + } + + if (problemIPs == null) { return null; } else { return genChangeRangeSuccessString(problemIPs, op); } - } - - private String genSuccessString(Vector problemIPs, String op) { - if (problemIPs == null) { + } + + private String genSuccessString(Vector problemIPs, String op) { + if (problemIPs == null) { return ""; } - - if (problemIPs.size() == 0) { - if (op.equals("add")) { + + if (problemIPs.size() == 0) { + if (op.equals("add")) { return "Successfully added all IPs in the specified range."; } else if (op.equals("delete")) { return "Successfully deleted all IPs in the specified range."; } else { return ""; } - } else { - String successString = ""; - if (op.equals("add")) { + } else { + String successString = ""; + if (op.equals("add")) { successString += "Failed to add the following IPs, because they are already in the database:

"; } else if (op.equals("delete")) { successString += "Failed to delete the following IPs, because they are in use:

"; } - - for (int i = 0; i < problemIPs.size(); i++) { - successString += problemIPs.elementAt(i); - if (i != (problemIPs.size() - 1)) { + + for (int i = 0; i < problemIPs.size(); i++) { + successString += problemIPs.elementAt(i); + if (i != (problemIPs.size() - 1)) { successString += ", "; } - } - - successString += "

"; - - if (op.equals("add")) { + } + + successString += "

"; + + if (op.equals("add")) { successString += "Successfully added all other IPs in the specified range."; } else if (op.equals("delete")) { successString += "Successfully deleted all other IPs in the specified range."; } - - return successString; - } - } - - public static String getCidrAddress(String pod, String zone) { - long dcId = PodZoneConfig.getZoneId(zone); - String selectSql = "SELECT * FROM `cloud`.`host_pod_ref` WHERE name = \"" + pod + "\" AND data_center_id = \"" + dcId + "\""; - String errorMsg = "Could not read CIDR address for pod/zone: " + pod + "/" + zone + " from database. Please contact Cloud Support."; - return DatabaseConfig.getDatabaseValueString(selectSql, "cidr_address", errorMsg); - } - - public static long getCidrSize(String pod, String zone) { - long dcId = PodZoneConfig.getZoneId(zone); - String selectSql = "SELECT * FROM `cloud`.`host_pod_ref` WHERE name = \"" + pod + "\" AND data_center_id = \"" + dcId + "\""; - String errorMsg = "Could not read CIDR address for pod/zone: " + pod + "/" + zone + " from database. Please contact Cloud Support."; - return DatabaseConfig.getDatabaseValueLong(selectSql, "cidr_size", errorMsg); - } - @DB - protected Vector deleteIPRange(String type, long podId, long zoneId, long vlanDbId, String startIP, String endIP) { - long startIPLong = NetUtils.ip2Long(startIP); - long endIPLong = startIPLong; - if (endIP != null) { + return successString; + } + } + + public static String getCidrAddress(String pod, String zone) { + long dcId = PodZoneConfig.getZoneId(zone); + String selectSql = "SELECT * FROM `cloud`.`host_pod_ref` WHERE name = \"" + pod + "\" AND data_center_id = \"" + dcId + "\""; + String errorMsg = "Could not read CIDR address for pod/zone: " + pod + "/" + zone + " from database. Please contact Cloud Support."; + return DatabaseConfig.getDatabaseValueString(selectSql, "cidr_address", errorMsg); + } + + public static long getCidrSize(String pod, String zone) { + long dcId = PodZoneConfig.getZoneId(zone); + String selectSql = "SELECT * FROM `cloud`.`host_pod_ref` WHERE name = \"" + pod + "\" AND data_center_id = \"" + dcId + "\""; + String errorMsg = "Could not read CIDR address for pod/zone: " + pod + "/" + zone + " from database. Please contact Cloud Support."; + return DatabaseConfig.getDatabaseValueLong(selectSql, "cidr_size", errorMsg); + } + + @DB + protected Vector deleteIPRange(String type, long podId, long zoneId, long vlanDbId, String startIP, String endIP) { + long startIPLong = NetUtils.ip2Long(startIP); + long endIPLong = startIPLong; + if (endIP != null) { endIPLong = NetUtils.ip2Long(endIP); } - - Transaction txn = Transaction.currentTxn(); - Vector problemIPs = null; - if (type.equals("public")) { + + Transaction txn = Transaction.currentTxn(); + Vector problemIPs = null; + if (type.equals("public")) { problemIPs = deletePublicIPRange(txn, startIPLong, endIPLong, vlanDbId); } else if (type.equals("private")) { problemIPs = deletePrivateIPRange(txn, startIPLong, endIPLong, podId, zoneId); } - - return problemIPs; - } - - private Vector deletePublicIPRange(Transaction txn, long startIP, long endIP, long vlanDbId) { - String deleteSql = "DELETE FROM `cloud`.`user_ip_address` WHERE public_ip_address = ? AND vlan_id = ?"; - String isPublicIPAllocatedSelectSql = "SELECT * FROM `cloud`.`user_ip_address` WHERE public_ip_address = ? AND vlan_id = ?"; - - Vector problemIPs = new Vector(); - PreparedStatement stmt = null; - PreparedStatement isAllocatedStmt = null; - - Connection conn = null; - try { - conn = txn.getConnection(); - stmt = conn.prepareStatement(deleteSql); - isAllocatedStmt = conn.prepareStatement(isPublicIPAllocatedSelectSql); - } catch (SQLException e) { - return null; - } - - while (startIP <= endIP) { - if (!isPublicIPAllocated(startIP, vlanDbId, isAllocatedStmt)) { - try { - stmt.clearParameters(); - stmt.setLong(1, startIP); - stmt.setLong(2, vlanDbId); - stmt.executeUpdate(); - } catch (Exception ex) { - } - } else { - problemIPs.add(NetUtils.long2Ip(startIP)); - } - startIP += 1; - } - - return problemIPs; - } - - private Vector deletePrivateIPRange(Transaction txn, long startIP, long endIP, long podId, long zoneId) { - String deleteSql = "DELETE FROM `cloud`.`op_dc_ip_address_alloc` WHERE ip_address = ? AND pod_id = ? AND data_center_id = ?"; - String isPrivateIPAllocatedSelectSql = "SELECT * FROM `cloud`.`op_dc_ip_address_alloc` WHERE ip_address = ? AND data_center_id = ? AND pod_id = ?"; - - Vector problemIPs = new Vector(); - PreparedStatement stmt = null; - PreparedStatement isAllocatedStmt = null; - - Connection conn = null; - try { - conn = txn.getConnection(); - stmt = conn.prepareStatement(deleteSql); - isAllocatedStmt = conn.prepareStatement(isPrivateIPAllocatedSelectSql); - } catch (SQLException e) { - System.out.println("Exception: " + e.getMessage()); - printError("Unable to start DB connection to delete private IPs. Please contact Cloud Support."); - } - - while (startIP <= endIP) { - if (!isPrivateIPAllocated(NetUtils.long2Ip(startIP), podId, zoneId, isAllocatedStmt)) { - try { - stmt.clearParameters(); - stmt.setString(1, NetUtils.long2Ip(startIP)); - stmt.setLong(2, podId); - stmt.setLong(3, zoneId); - stmt.executeUpdate(); - } catch (Exception ex) { - } - } else { - problemIPs.add(NetUtils.long2Ip(startIP)); - } - startIP += 1; - } return problemIPs; - } - - private boolean isPublicIPAllocated(long ip, long vlanDbId, PreparedStatement stmt) { - try { - stmt.clearParameters(); - stmt.setLong(1, ip); - stmt.setLong(2, vlanDbId); - ResultSet rs = stmt.executeQuery(); - if (rs.next()) { + } + + private Vector deletePublicIPRange(Transaction txn, long startIP, long endIP, long vlanDbId) { + String deleteSql = "DELETE FROM `cloud`.`user_ip_address` WHERE public_ip_address = ? AND vlan_id = ?"; + String isPublicIPAllocatedSelectSql = "SELECT * FROM `cloud`.`user_ip_address` WHERE public_ip_address = ? AND vlan_id = ?"; + + Vector problemIPs = new Vector(); + PreparedStatement stmt = null; + PreparedStatement isAllocatedStmt = null; + + Connection conn = null; + try { + conn = txn.getConnection(); + stmt = conn.prepareStatement(deleteSql); + isAllocatedStmt = conn.prepareStatement(isPublicIPAllocatedSelectSql); + } catch (SQLException e) { + return null; + } + + while (startIP <= endIP) { + if (!isPublicIPAllocated(startIP, vlanDbId, isAllocatedStmt)) { + try { + stmt.clearParameters(); + stmt.setLong(1, startIP); + stmt.setLong(2, vlanDbId); + stmt.executeUpdate(); + } catch (Exception ex) { + } + } else { + problemIPs.add(NetUtils.long2Ip(startIP)); + } + startIP += 1; + } + + return problemIPs; + } + + private Vector deletePrivateIPRange(Transaction txn, long startIP, long endIP, long podId, long zoneId) { + String deleteSql = "DELETE FROM `cloud`.`op_dc_ip_address_alloc` WHERE ip_address = ? AND pod_id = ? AND data_center_id = ?"; + String isPrivateIPAllocatedSelectSql = "SELECT * FROM `cloud`.`op_dc_ip_address_alloc` WHERE ip_address = ? AND data_center_id = ? AND pod_id = ?"; + + Vector problemIPs = new Vector(); + PreparedStatement stmt = null; + PreparedStatement isAllocatedStmt = null; + + Connection conn = null; + try { + conn = txn.getConnection(); + stmt = conn.prepareStatement(deleteSql); + isAllocatedStmt = conn.prepareStatement(isPrivateIPAllocatedSelectSql); + } catch (SQLException e) { + System.out.println("Exception: " + e.getMessage()); + printError("Unable to start DB connection to delete private IPs. Please contact Cloud Support."); + } + + while (startIP <= endIP) { + if (!isPrivateIPAllocated(NetUtils.long2Ip(startIP), podId, zoneId, isAllocatedStmt)) { + try { + stmt.clearParameters(); + stmt.setString(1, NetUtils.long2Ip(startIP)); + stmt.setLong(2, podId); + stmt.setLong(3, zoneId); + stmt.executeUpdate(); + } catch (Exception ex) { + } + } else { + problemIPs.add(NetUtils.long2Ip(startIP)); + } + startIP += 1; + } + + return problemIPs; + } + + private boolean isPublicIPAllocated(long ip, long vlanDbId, PreparedStatement stmt) { + try { + stmt.clearParameters(); + stmt.setLong(1, ip); + stmt.setLong(2, vlanDbId); + ResultSet rs = stmt.executeQuery(); + if (rs.next()) { return (rs.getString("allocated") != null); } else { return false; } } catch (SQLException ex) { - System.out.println(ex.getMessage()); + System.out.println(ex.getMessage()); return true; } - } - - private boolean isPrivateIPAllocated(String ip, long podId, long zoneId, PreparedStatement stmt) { - try { - stmt.clearParameters(); - stmt.setString(1, ip); - stmt.setLong(2, zoneId); - stmt.setLong(3, podId); - ResultSet rs = stmt.executeQuery(); - if (rs.next()) { + } + + private boolean isPrivateIPAllocated(String ip, long podId, long zoneId, PreparedStatement stmt) { + try { + stmt.clearParameters(); + stmt.setString(1, ip); + stmt.setLong(2, zoneId); + stmt.setLong(3, podId); + ResultSet rs = stmt.executeQuery(); + if (rs.next()) { return (rs.getString("taken") != null); } else { return false; } } catch (SQLException ex) { - System.out.println(ex.getMessage()); + System.out.println(ex.getMessage()); return true; } - } - - @DB - public List saveIPRange(String type, long podId, long zoneId, long vlanDbId, String startIP, String endIP, Long sourceNetworkId, long physicalNetworkId) { - long startIPLong = NetUtils.ip2Long(startIP); - long endIPLong = startIPLong; - if (endIP != null) { + } + + @DB + public List saveIPRange(String type, long podId, long zoneId, long vlanDbId, String startIP, String endIP, Long sourceNetworkId, long physicalNetworkId) { + long startIPLong = NetUtils.ip2Long(startIP); + long endIPLong = startIPLong; + if (endIP != null) { endIPLong = NetUtils.ip2Long(endIP); } - - Transaction txn = Transaction.currentTxn(); - List problemIPs = null; - - if (type.equals("public")) { + + Transaction txn = Transaction.currentTxn(); + List problemIPs = null; + + if (type.equals("public")) { problemIPs = savePublicIPRange(txn, startIPLong, endIPLong, zoneId, vlanDbId, sourceNetworkId, physicalNetworkId); } else if (type.equals("private")) { problemIPs = savePrivateIPRange(txn, startIPLong, endIPLong, podId, zoneId); } - - String[] linkLocalIps = NetUtils.getLinkLocalIPRange(10); - long startLinkLocalIp = NetUtils.ip2Long(linkLocalIps[0]); - long endLinkLocalIp = NetUtils.ip2Long(linkLocalIps[1]); - - saveLinkLocalPrivateIPRange(txn, startLinkLocalIp, endLinkLocalIp, podId, zoneId); - - return problemIPs; + + String[] linkLocalIps = NetUtils.getLinkLocalIPRange(10); + long startLinkLocalIp = NetUtils.ip2Long(linkLocalIps[0]); + long endLinkLocalIp = NetUtils.ip2Long(linkLocalIps[1]); + + saveLinkLocalPrivateIPRange(txn, startLinkLocalIp, endLinkLocalIp, podId, zoneId); + + return problemIPs; } - - public Vector savePublicIPRange(Transaction txn, long startIP, long endIP, long zoneId, long vlanDbId, Long sourceNetworkId, long physicalNetworkId) { - String insertSql = "INSERT INTO `cloud`.`user_ip_address` (public_ip_address, data_center_id, vlan_db_id, mac_address, source_network_id, physical_network_id, uuid) VALUES (?, ?, ?, (select mac_address from `cloud`.`data_center` where id=?), ?, ?, ?)"; - String updateSql = "UPDATE `cloud`.`data_center` set mac_address = mac_address+1 where id=?"; - Vector problemIPs = new Vector(); - PreparedStatement stmt = null; - - Connection conn = null; - try { - conn = txn.getConnection(); - } catch (SQLException e) { - return null; - } - + + public Vector savePublicIPRange(Transaction txn, long startIP, long endIP, long zoneId, long vlanDbId, Long sourceNetworkId, long physicalNetworkId) { + String insertSql = "INSERT INTO `cloud`.`user_ip_address` (public_ip_address, data_center_id, vlan_db_id, mac_address, source_network_id, physical_network_id, uuid) VALUES (?, ?, ?, (select mac_address from `cloud`.`data_center` where id=?), ?, ?, ?)"; + String updateSql = "UPDATE `cloud`.`data_center` set mac_address = mac_address+1 where id=?"; + Vector problemIPs = new Vector(); + PreparedStatement stmt = null; + + Connection conn = null; + try { + conn = txn.getConnection(); + } catch (SQLException e) { + return null; + } + while (startIP <= endIP) { try { - stmt = conn.prepareStatement(insertSql); - stmt.setString(1, NetUtils.long2Ip(startIP)); - stmt.setLong(2, zoneId); - stmt.setLong(3, vlanDbId); - stmt.setLong(4, zoneId); - stmt.setLong(5, sourceNetworkId); - stmt.setLong(6, physicalNetworkId); - stmt.setString(7, UUID.randomUUID().toString()); - stmt.executeUpdate(); - stmt.close(); - stmt = conn.prepareStatement(updateSql); - stmt.setLong(1, zoneId); - stmt.executeUpdate(); - stmt.close(); + stmt = conn.prepareStatement(insertSql); + stmt.setString(1, NetUtils.long2Ip(startIP)); + stmt.setLong(2, zoneId); + stmt.setLong(3, vlanDbId); + stmt.setLong(4, zoneId); + stmt.setLong(5, sourceNetworkId); + stmt.setLong(6, physicalNetworkId); + stmt.setString(7, UUID.randomUUID().toString()); + stmt.executeUpdate(); + stmt.close(); + stmt = conn.prepareStatement(updateSql); + stmt.setLong(1, zoneId); + stmt.executeUpdate(); + stmt.close(); } catch (Exception ex) { problemIPs.add(NetUtils.long2Ip(startIP)); } startIP++; } - + return problemIPs; - } - - public List savePrivateIPRange(Transaction txn, long startIP, long endIP, long podId, long zoneId) { - String insertSql = "INSERT INTO `cloud`.`op_dc_ip_address_alloc` (ip_address, data_center_id, pod_id, mac_address) VALUES (?, ?, ?, (select mac_address from `cloud`.`data_center` where id=?))"; + } + + public List savePrivateIPRange(Transaction txn, long startIP, long endIP, long podId, long zoneId) { + String insertSql = "INSERT INTO `cloud`.`op_dc_ip_address_alloc` (ip_address, data_center_id, pod_id, mac_address) VALUES (?, ?, ?, (select mac_address from `cloud`.`data_center` where id=?))"; String updateSql = "UPDATE `cloud`.`data_center` set mac_address = mac_address+1 where id=?"; - Vector problemIPs = new Vector(); - + Vector problemIPs = new Vector(); + try { Connection conn = null; conn = txn.getConnection(); while (startIP <= endIP) { try { PreparedStatement stmt = conn.prepareStatement(insertSql); - stmt.setString(1, NetUtils.long2Ip(startIP)); - stmt.setLong(2, zoneId); - stmt.setLong(3, podId); - stmt.setLong(4, zoneId); - stmt.executeUpdate(); - stmt.close(); + stmt.setString(1, NetUtils.long2Ip(startIP)); + stmt.setLong(2, zoneId); + stmt.setLong(3, podId); + stmt.setLong(4, zoneId); + stmt.executeUpdate(); + stmt.close(); stmt = conn.prepareStatement(updateSql); stmt.setLong(1, zoneId); stmt.executeUpdate(); @@ -515,30 +515,30 @@ public class IPRangeConfig { System.out.print(ex.getMessage()); ex.printStackTrace(); } - + return problemIPs; - } - - private Vector saveLinkLocalPrivateIPRange(Transaction txn, long startIP, long endIP, long podId, long zoneId) { - String insertSql = "INSERT INTO `cloud`.`op_dc_link_local_ip_address_alloc` (ip_address, data_center_id, pod_id) VALUES (?, ?, ?)"; - Vector problemIPs = new Vector(); - - Connection conn = null; - try { - conn = txn.getConnection(); - } catch (SQLException e) { - System.out.println("Exception: " + e.getMessage()); - printError("Unable to start DB connection to save private IPs. Please contact Cloud Support."); - } - + } + + private Vector saveLinkLocalPrivateIPRange(Transaction txn, long startIP, long endIP, long podId, long zoneId) { + String insertSql = "INSERT INTO `cloud`.`op_dc_link_local_ip_address_alloc` (ip_address, data_center_id, pod_id) VALUES (?, ?, ?)"; + Vector problemIPs = new Vector(); + + Connection conn = null; + try { + conn = txn.getConnection(); + } catch (SQLException e) { + System.out.println("Exception: " + e.getMessage()); + printError("Unable to start DB connection to save private IPs. Please contact Cloud Support."); + } + try { long start = startIP; PreparedStatement stmt = conn.prepareStatement(insertSql); while (startIP <= endIP) { - stmt.setString(1, NetUtils.long2Ip(startIP++)); - stmt.setLong(2, zoneId); - stmt.setLong(3, podId); - stmt.addBatch(); + stmt.setString(1, NetUtils.long2Ip(startIP++)); + stmt.setLong(2, zoneId); + stmt.setLong(3, podId); + stmt.addBatch(); } int[] results = stmt.executeBatch(); for (int i = 0; i < results.length; i += 2) { @@ -547,28 +547,28 @@ public class IPRangeConfig { } } stmt.close(); - } catch (Exception ex) { + } catch (Exception ex) { } - + return problemIPs; - } - - public static String getPublicNetmask(String zone) { - return DatabaseConfig.getDatabaseValueString("SELECT * FROM `cloud`.`data_center` WHERE name = \"" + zone + "\"", "netmask", - "Unable to start DB connection to read public netmask. Please contact Cloud Support."); - } - - public static String getPublicGateway(String zone) { - return DatabaseConfig.getDatabaseValueString("SELECT * FROM `cloud`.`data_center` WHERE name = \"" + zone + "\"", "gateway", - "Unable to start DB connection to read public gateway. Please contact Cloud Support."); - } - - public static String getGuestNetworkCidr(Long zoneId) - { - return DatabaseConfig.getDatabaseValueString("SELECT * FROM `cloud`.`data_center` WHERE id = \"" + zoneId + "\"","guest_network_cidr", - "Unable to start DB connection to read guest cidr network. Please contact Cloud Support."); - } - + } + + public static String getPublicNetmask(String zone) { + return DatabaseConfig.getDatabaseValueString("SELECT * FROM `cloud`.`data_center` WHERE name = \"" + zone + "\"", "netmask", + "Unable to start DB connection to read public netmask. Please contact Cloud Support."); + } + + public static String getPublicGateway(String zone) { + return DatabaseConfig.getDatabaseValueString("SELECT * FROM `cloud`.`data_center` WHERE name = \"" + zone + "\"", "gateway", + "Unable to start DB connection to read public gateway. Please contact Cloud Support."); + } + + public static String getGuestNetworkCidr(Long zoneId) + { + return DatabaseConfig.getDatabaseValueString("SELECT * FROM `cloud`.`data_center` WHERE id = \"" + zoneId + "\"","guest_network_cidr", + "Unable to start DB connection to read guest cidr network. Please contact Cloud Support."); + } + // public static String getGuestIpNetwork() { // return DatabaseConfig.getDatabaseValueString("SELECT * FROM `cloud`.`configuration` WHERE name = \"guest.ip.network\"", "value", // "Unable to start DB connection to read guest IP network. Please contact Cloud Support."); @@ -578,7 +578,7 @@ public class IPRangeConfig { // return DatabaseConfig.getDatabaseValueString("SELECT * FROM `cloud`.`configuration` WHERE name = \"guest.netmask\"", "value", // "Unable to start DB connection to read guest netmask. Please contact Cloud Support."); // } - + // public static String getGuestSubnet() { // String guestIpNetwork = getGuestIpNetwork(); // String guestNetmask = getGuestNetmask(); @@ -593,9 +593,9 @@ public class IPRangeConfig { // String guestNetmask = getGuestNetmask(); // return NetUtils.getCidrSize(guestNetmask); // } - - public static boolean validCIDR(final String cidr) { - if (cidr == null || cidr.isEmpty()) { + + public static boolean validCIDR(final String cidr) { + if (cidr == null || cidr.isEmpty()) { return false; } String[] cidrPair = cidr.split("\\/"); @@ -608,92 +608,92 @@ public class IPRangeConfig { return false; } int cidrSizeNum = -1; - + try { - cidrSizeNum = Integer.parseInt(cidrSize); + cidrSizeNum = Integer.parseInt(cidrSize); } catch (Exception e) { - return false; + return false; } - + if (cidrSizeNum < 1 || cidrSizeNum > 32) { return false; } - + return true; - } - - public static boolean validOrBlankIP(final String ip) { - if (ip == null || ip.isEmpty()) { + } + + public static boolean validOrBlankIP(final String ip) { + if (ip == null || ip.isEmpty()) { return true; } - return validIP(ip); - } - - public static boolean validIP(final String ip) { - final String[] ipAsList = ip.split("\\."); - - // The IP address must have four octets - if (Array.getLength(ipAsList) != 4) { - return false; - } - - for (int i = 0; i < 4; i++) { - // Each octet must be an integer - final String octetString = ipAsList[i]; - int octet; - try { - octet = Integer.parseInt(octetString); - } catch(final Exception e) { - return false; - } - // Each octet must be between 0 and 255, inclusive - if (octet < 0 || octet > 255) { + return validIP(ip); + } + + public static boolean validIP(final String ip) { + final String[] ipAsList = ip.split("\\."); + + // The IP address must have four octets + if (Array.getLength(ipAsList) != 4) { + return false; + } + + for (int i = 0; i < 4; i++) { + // Each octet must be an integer + final String octetString = ipAsList[i]; + int octet; + try { + octet = Integer.parseInt(octetString); + } catch(final Exception e) { + return false; + } + // Each octet must be between 0 and 255, inclusive + if (octet < 0 || octet > 255) { return false; } - // Each octetString must have between 1 and 3 characters - if (octetString.length() < 1 || octetString.length() > 3) { + // Each octetString must have between 1 and 3 characters + if (octetString.length() < 1 || octetString.length() > 3) { return false; } - - } - - // IP is good, return true - return true; - } - + + } + + // IP is good, return true + return true; + } + public static boolean validIPRange(String startIP, String endIP) { - if (endIP == null || endIP.isEmpty()) { + if (endIP == null || endIP.isEmpty()) { return true; } - - long startIPLong = NetUtils.ip2Long(startIP); - long endIPLong = NetUtils.ip2Long(endIP); - return (startIPLong < endIPLong); + + long startIPLong = NetUtils.ip2Long(startIP); + long endIPLong = NetUtils.ip2Long(endIP); + return (startIPLong < endIPLong); } - + public static boolean sameSubnet(final String ip1, final String ip2, final String netmask) { - if (ip1 == null || ip1.isEmpty() || ip2 == null || ip2.isEmpty()) { + if (ip1 == null || ip1.isEmpty() || ip2 == null || ip2.isEmpty()) { return true; } - String subnet1 = NetUtils.getSubNet(ip1, netmask); - String subnet2 = NetUtils.getSubNet(ip2, netmask); - - return (subnet1.equals(subnet2)); + String subnet1 = NetUtils.getSubNet(ip1, netmask); + String subnet2 = NetUtils.getSubNet(ip2, netmask); + + return (subnet1.equals(subnet2)); } - + public static boolean sameSubnetCIDR(final String ip1, final String ip2, final long cidrSize) { - if (ip1 == null || ip1.isEmpty() || ip2 == null || ip2.isEmpty()) { + if (ip1 == null || ip1.isEmpty() || ip2 == null || ip2.isEmpty()) { return true; } - String subnet1 = NetUtils.getCidrSubNet(ip1, cidrSize); - String subnet2 = NetUtils.getCidrSubNet(ip2, cidrSize); - - return (subnet1.equals(subnet2)); + String subnet1 = NetUtils.getCidrSubNet(ip1, cidrSize); + String subnet2 = NetUtils.getCidrSubNet(ip2, cidrSize); + + return (subnet1.equals(subnet2)); } - - private static void printError(String message) { - DatabaseConfig.printError(message); - } - + + private static void printError(String message) { + DatabaseConfig.printError(message); + } + } diff --git a/server/src/com/cloud/test/PodZoneConfig.java b/server/src/com/cloud/test/PodZoneConfig.java index f5787211e57..59f8b6ce12d 100644 --- a/server/src/com/cloud/test/PodZoneConfig.java +++ b/server/src/com/cloud/test/PodZoneConfig.java @@ -25,194 +25,194 @@ import java.util.List; import java.util.Vector; import com.cloud.network.Networks.TrafficType; -import com.cloud.utils.component.ComponentLocator; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.net.NetUtils; public class PodZoneConfig { - - public static void main(String[] args) { - PodZoneConfig config = ComponentLocator.inject(PodZoneConfig.class); - //config.run(args); - System.exit(0); - } - - public void savePod(boolean printOutput, long id, String name, long dcId, String gateway, String cidr, int vlanStart, int vlanEnd) { - // Check that the cidr was valid - if (!IPRangeConfig.validCIDR(cidr)) printError("Please enter a valid CIDR for pod: " + name); - - // Get the individual cidrAddress and cidrSize values - String[] cidrPair = cidr.split("\\/"); - String cidrAddress = cidrPair[0]; - String cidrSize = cidrPair[1]; - String sql = null; - if (id != -1) sql = "INSERT INTO `cloud`.`host_pod_ref` (id, name, data_center_id, gateway, cidr_address, cidr_size) " + "VALUES ('" + id + "','" + name + "','" + dcId + "','" + gateway + "','" + cidrAddress + "','" + cidrSize + "')"; - else sql = "INSERT INTO `cloud`.`host_pod_ref` (name, data_center_id, gateway, cidr_address, cidr_size) " + "VALUES ('" + name + "','" + dcId + "','" + gateway + "','" + cidrAddress + "','" + cidrSize + "')"; - + public static void main(String[] args) { + PodZoneConfig config = ComponentContext.inject(PodZoneConfig.class); + //config.run(args); + System.exit(0); + } + + public void savePod(boolean printOutput, long id, String name, long dcId, String gateway, String cidr, int vlanStart, int vlanEnd) { + // Check that the cidr was valid + if (!IPRangeConfig.validCIDR(cidr)) printError("Please enter a valid CIDR for pod: " + name); + + // Get the individual cidrAddress and cidrSize values + String[] cidrPair = cidr.split("\\/"); + String cidrAddress = cidrPair[0]; + String cidrSize = cidrPair[1]; + + String sql = null; + if (id != -1) sql = "INSERT INTO `cloud`.`host_pod_ref` (id, name, data_center_id, gateway, cidr_address, cidr_size) " + "VALUES ('" + id + "','" + name + "','" + dcId + "','" + gateway + "','" + cidrAddress + "','" + cidrSize + "')"; + else sql = "INSERT INTO `cloud`.`host_pod_ref` (name, data_center_id, gateway, cidr_address, cidr_size) " + "VALUES ('" + name + "','" + dcId + "','" + gateway + "','" + cidrAddress + "','" + cidrSize + "')"; + DatabaseConfig.saveSQL(sql, "Failed to save pod due to exception. Please contact Cloud Support."); - + if (printOutput) System.out.println("Successfuly saved pod."); - } - - public void checkAllPodCidrSubnets() { - Vector allZoneIDs = getAllZoneIDs(); - for (Long dcId : allZoneIDs) { - HashMap> currentPodCidrSubnets = getCurrentPodCidrSubnets(dcId.longValue()); - String result = checkPodCidrSubnets(dcId.longValue(), currentPodCidrSubnets); - if (!result.equals("success")) printError(result); - } - } - - private String checkPodCidrSubnets(long dcId, HashMap> currentPodCidrSubnets) { - + } + + public void checkAllPodCidrSubnets() { + Vector allZoneIDs = getAllZoneIDs(); + for (Long dcId : allZoneIDs) { + HashMap> currentPodCidrSubnets = getCurrentPodCidrSubnets(dcId.longValue()); + String result = checkPodCidrSubnets(dcId.longValue(), currentPodCidrSubnets); + if (!result.equals("success")) printError(result); + } + } + + private String checkPodCidrSubnets(long dcId, HashMap> currentPodCidrSubnets) { + // DataCenterDao _dcDao = null; // final ComponentLocator locator = ComponentLocator.getLocator("management-server"); - + // _dcDao = locator.getDao(DataCenterDao.class); - // For each pod, return an error if any of the following is true: - // 1. The pod's CIDR subnet conflicts with the guest network subnet - // 2. The pod's CIDR subnet conflicts with the CIDR subnet of any other pod - - String zoneName = PodZoneConfig.getZoneName(dcId); - - //get the guest network cidr and guest netmask from the zone + // For each pod, return an error if any of the following is true: + // 1. The pod's CIDR subnet conflicts with the guest network subnet + // 2. The pod's CIDR subnet conflicts with the CIDR subnet of any other pod + + String zoneName = PodZoneConfig.getZoneName(dcId); + + //get the guest network cidr and guest netmask from the zone // DataCenterVO dcVo = _dcDao.findById(dcId); - - String guestNetworkCidr = IPRangeConfig.getGuestNetworkCidr(dcId); - - if (guestNetworkCidr == null || guestNetworkCidr.isEmpty()) return "Please specify a valid guest cidr"; + + String guestNetworkCidr = IPRangeConfig.getGuestNetworkCidr(dcId); + + if (guestNetworkCidr == null || guestNetworkCidr.isEmpty()) return "Please specify a valid guest cidr"; String[] cidrTuple = guestNetworkCidr.split("\\/"); - - String guestIpNetwork = NetUtils.getIpRangeStartIpFromCidr(cidrTuple[0], Long.parseLong(cidrTuple[1])); - long guestCidrSize = Long.parseLong(cidrTuple[1]); - + + String guestIpNetwork = NetUtils.getIpRangeStartIpFromCidr(cidrTuple[0], Long.parseLong(cidrTuple[1])); + long guestCidrSize = Long.parseLong(cidrTuple[1]); + // Iterate through all pods in this zone - for (Long podId : currentPodCidrSubnets.keySet()) { - String podName; - if (podId.longValue() == -1) podName = "newPod"; - else podName = PodZoneConfig.getPodName(podId.longValue(), dcId); - - Vector cidrPair = currentPodCidrSubnets.get(podId); - String cidrAddress = (String) cidrPair.get(0); - long cidrSize = ((Long) cidrPair.get(1)).longValue(); - - long cidrSizeToUse = -1; - if (cidrSize < guestCidrSize) cidrSizeToUse = cidrSize; - else cidrSizeToUse = guestCidrSize; - - String cidrSubnet = NetUtils.getCidrSubNet(cidrAddress, cidrSizeToUse); - String guestSubnet = NetUtils.getCidrSubNet(guestIpNetwork, cidrSizeToUse); - - // Check that cidrSubnet does not equal guestSubnet - if (cidrSubnet.equals(guestSubnet)) { - if (podName.equals("newPod")) { - return "The subnet of the pod you are adding conflicts with the subnet of the Guest IP Network. Please specify a different CIDR."; - } else { - return "Warning: The subnet of pod " + podName + " in zone " + zoneName + " conflicts with the subnet of the Guest IP Network. Please change either the pod's CIDR or the Guest IP Network's subnet, and re-run install-vmops-management."; - } - } - - // Iterate through the rest of the pods - for (Long otherPodId : currentPodCidrSubnets.keySet()) { - if (podId.equals(otherPodId)) continue; - - // Check that cidrSubnet does not equal otherCidrSubnet - Vector otherCidrPair = currentPodCidrSubnets.get(otherPodId); - String otherCidrAddress = (String) otherCidrPair.get(0); - long otherCidrSize = ((Long) otherCidrPair.get(1)).longValue(); - - if (cidrSize < otherCidrSize) cidrSizeToUse = cidrSize; - else cidrSizeToUse = otherCidrSize; - - cidrSubnet = NetUtils.getCidrSubNet(cidrAddress, cidrSizeToUse); - String otherCidrSubnet = NetUtils.getCidrSubNet(otherCidrAddress, cidrSizeToUse); - - if (cidrSubnet.equals(otherCidrSubnet)) { - String otherPodName = PodZoneConfig.getPodName(otherPodId.longValue(), dcId); - if (podName.equals("newPod")) { - return "The subnet of the pod you are adding conflicts with the subnet of pod " + otherPodName + " in zone " + zoneName + ". Please specify a different CIDR."; - } else { - return "Warning: The pods " + podName + " and " + otherPodName + " in zone " + zoneName + " have conflicting CIDR subnets. Please change the CIDR of one of these pods."; - } - } - } - } - - return "success"; - } - - @DB - protected HashMap> getCurrentPodCidrSubnets(long dcId) { - HashMap> currentPodCidrSubnets = new HashMap>(); - - String selectSql = "SELECT id, cidr_address, cidr_size FROM host_pod_ref WHERE data_center_id=" + dcId; - Transaction txn = Transaction.currentTxn(); - try { - PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); - ResultSet rs = stmt.executeQuery(); - while (rs.next()) { - Long podId = rs.getLong("id"); - String cidrAddress = rs.getString("cidr_address"); - long cidrSize = rs.getLong("cidr_size"); - Vector cidrPair = new Vector(); - cidrPair.add(0, cidrAddress); - cidrPair.add(1, new Long(cidrSize)); - currentPodCidrSubnets.put(podId, cidrPair); - } + for (Long podId : currentPodCidrSubnets.keySet()) { + String podName; + if (podId.longValue() == -1) podName = "newPod"; + else podName = PodZoneConfig.getPodName(podId.longValue(), dcId); + + Vector cidrPair = currentPodCidrSubnets.get(podId); + String cidrAddress = (String) cidrPair.get(0); + long cidrSize = ((Long) cidrPair.get(1)).longValue(); + + long cidrSizeToUse = -1; + if (cidrSize < guestCidrSize) cidrSizeToUse = cidrSize; + else cidrSizeToUse = guestCidrSize; + + String cidrSubnet = NetUtils.getCidrSubNet(cidrAddress, cidrSizeToUse); + String guestSubnet = NetUtils.getCidrSubNet(guestIpNetwork, cidrSizeToUse); + + // Check that cidrSubnet does not equal guestSubnet + if (cidrSubnet.equals(guestSubnet)) { + if (podName.equals("newPod")) { + return "The subnet of the pod you are adding conflicts with the subnet of the Guest IP Network. Please specify a different CIDR."; + } else { + return "Warning: The subnet of pod " + podName + " in zone " + zoneName + " conflicts with the subnet of the Guest IP Network. Please change either the pod's CIDR or the Guest IP Network's subnet, and re-run install-vmops-management."; + } + } + + // Iterate through the rest of the pods + for (Long otherPodId : currentPodCidrSubnets.keySet()) { + if (podId.equals(otherPodId)) continue; + + // Check that cidrSubnet does not equal otherCidrSubnet + Vector otherCidrPair = currentPodCidrSubnets.get(otherPodId); + String otherCidrAddress = (String) otherCidrPair.get(0); + long otherCidrSize = ((Long) otherCidrPair.get(1)).longValue(); + + if (cidrSize < otherCidrSize) cidrSizeToUse = cidrSize; + else cidrSizeToUse = otherCidrSize; + + cidrSubnet = NetUtils.getCidrSubNet(cidrAddress, cidrSizeToUse); + String otherCidrSubnet = NetUtils.getCidrSubNet(otherCidrAddress, cidrSizeToUse); + + if (cidrSubnet.equals(otherCidrSubnet)) { + String otherPodName = PodZoneConfig.getPodName(otherPodId.longValue(), dcId); + if (podName.equals("newPod")) { + return "The subnet of the pod you are adding conflicts with the subnet of pod " + otherPodName + " in zone " + zoneName + ". Please specify a different CIDR."; + } else { + return "Warning: The pods " + podName + " and " + otherPodName + " in zone " + zoneName + " have conflicting CIDR subnets. Please change the CIDR of one of these pods."; + } + } + } + } + + return "success"; + } + + @DB + protected HashMap> getCurrentPodCidrSubnets(long dcId) { + HashMap> currentPodCidrSubnets = new HashMap>(); + + String selectSql = "SELECT id, cidr_address, cidr_size FROM host_pod_ref WHERE data_center_id=" + dcId; + Transaction txn = Transaction.currentTxn(); + try { + PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); + ResultSet rs = stmt.executeQuery(); + while (rs.next()) { + Long podId = rs.getLong("id"); + String cidrAddress = rs.getString("cidr_address"); + long cidrSize = rs.getLong("cidr_size"); + Vector cidrPair = new Vector(); + cidrPair.add(0, cidrAddress); + cidrPair.add(1, new Long(cidrSize)); + currentPodCidrSubnets.put(podId, cidrPair); + } } catch (SQLException ex) { - System.out.println(ex.getMessage()); - printError("There was an issue with reading currently saved pod CIDR subnets. Please contact Cloud Support."); + System.out.println(ex.getMessage()); + printError("There was an issue with reading currently saved pod CIDR subnets. Please contact Cloud Support."); return null; } - + return currentPodCidrSubnets; - } - - public void deletePod(String name, long dcId) { - String sql = "DELETE FROM `cloud`.`host_pod_ref` WHERE name=\"" + name + "\" AND data_center_id=\"" + dcId + "\""; - DatabaseConfig.saveSQL(sql, "Failed to delete pod due to exception. Please contact Cloud Support."); - } - - public long getVlanDbId(String zone, String vlanId) { - long zoneId = getZoneId(zone); - - return DatabaseConfig.getDatabaseValueLong("SELECT * FROM `cloud`.`vlan` WHERE data_center_id=\"" + zoneId + "\" AND vlan_id =\"" + vlanId + "\"", "id", - "Unable to start DB connection to read vlan DB id. Please contact Cloud Support."); } - - public List modifyVlan(String zone, boolean add, String vlanId, String vlanGateway, String vlanNetmask, String pod, String vlanType, String ipRange, long networkId, long physicalNetworkId) { - // Check if the zone is valid - long zoneId = getZoneId(zone); - if (zoneId == -1) - return genReturnList("false", "Please specify a valid zone."); - - //check if physical network is valid + + public void deletePod(String name, long dcId) { + String sql = "DELETE FROM `cloud`.`host_pod_ref` WHERE name=\"" + name + "\" AND data_center_id=\"" + dcId + "\""; + DatabaseConfig.saveSQL(sql, "Failed to delete pod due to exception. Please contact Cloud Support."); + } + + public long getVlanDbId(String zone, String vlanId) { + long zoneId = getZoneId(zone); + + return DatabaseConfig.getDatabaseValueLong("SELECT * FROM `cloud`.`vlan` WHERE data_center_id=\"" + zoneId + "\" AND vlan_id =\"" + vlanId + "\"", "id", + "Unable to start DB connection to read vlan DB id. Please contact Cloud Support."); + } + + public List modifyVlan(String zone, boolean add, String vlanId, String vlanGateway, String vlanNetmask, String pod, String vlanType, String ipRange, long networkId, long physicalNetworkId) { + // Check if the zone is valid + long zoneId = getZoneId(zone); + if (zoneId == -1) + return genReturnList("false", "Please specify a valid zone."); + + //check if physical network is valid long physicalNetworkDbId = checkPhysicalNetwork(physicalNetworkId); if (physicalNetworkId == -1) return genReturnList("false", "Please specify a valid physical network."); - - - Long podId = pod!=null?getPodId(pod, zone):null; - if (podId != null && podId == -1) - return genReturnList("false", "Please specify a valid pod."); - - if (add) { - - // Make sure the gateway is valid - if (!NetUtils.isValidIp(vlanGateway)) - return genReturnList("false", "Please specify a valid gateway."); - - // Make sure the netmask is valid - if (!NetUtils.isValidIp(vlanNetmask)) - return genReturnList("false", "Please specify a valid netmask."); - - // Check if a vlan with the same vlanId already exists in the specified zone - if (getVlanDbId(zone, vlanId) != -1) - return genReturnList("false", "A VLAN with the specified VLAN ID already exists in zone " + zone + "."); - - /* + + + Long podId = pod!=null?getPodId(pod, zone):null; + if (podId != null && podId == -1) + return genReturnList("false", "Please specify a valid pod."); + + if (add) { + + // Make sure the gateway is valid + if (!NetUtils.isValidIp(vlanGateway)) + return genReturnList("false", "Please specify a valid gateway."); + + // Make sure the netmask is valid + if (!NetUtils.isValidIp(vlanNetmask)) + return genReturnList("false", "Please specify a valid netmask."); + + // Check if a vlan with the same vlanId already exists in the specified zone + if (getVlanDbId(zone, vlanId) != -1) + return genReturnList("false", "A VLAN with the specified VLAN ID already exists in zone " + zone + "."); + + /* // Check if another vlan in the same zone has the same subnet String newVlanSubnet = NetUtils.getSubNet(vlanGateway, vlanNetmask); List vlans = _vlanDao.findByZone(zoneId); @@ -221,146 +221,146 @@ public class PodZoneConfig { if (newVlanSubnet.equals(currentVlanSubnet)) return genReturnList("false", "The VLAN with ID " + vlan.getVlanId() + " in zone " + zone + " has the same subnet. Please specify a different gateway/netmask."); } - */ - - // Everything was fine, so persist the VLAN - saveVlan(zoneId, podId, vlanId, vlanGateway, vlanNetmask, vlanType, ipRange, networkId, physicalNetworkDbId); + */ + + // Everything was fine, so persist the VLAN + saveVlan(zoneId, podId, vlanId, vlanGateway, vlanNetmask, vlanType, ipRange, networkId, physicalNetworkDbId); if (podId != null) { - long vlanDbId = getVlanDbId(zone, vlanId); - String sql = "INSERT INTO `cloud`.`pod_vlan_map` (pod_id, vlan_db_id) " + "VALUES ('" + podId + "','" + vlanDbId + "')"; + long vlanDbId = getVlanDbId(zone, vlanId); + String sql = "INSERT INTO `cloud`.`pod_vlan_map` (pod_id, vlan_db_id) " + "VALUES ('" + podId + "','" + vlanDbId + "')"; DatabaseConfig.saveSQL(sql, "Failed to save pod_vlan_map due to exception vlanDbId=" + vlanDbId + ", podId=" + podId + ". Please contact Cloud Support."); } - - return genReturnList("true", "Successfully added VLAN."); - - } else { - return genReturnList("false", "That operation is not suppored."); - } - - /* + + return genReturnList("true", "Successfully added VLAN."); + + } else { + return genReturnList("false", "That operation is not suppored."); + } + + /* else { - + // Check if a VLAN actually exists in the specified zone long vlanDbId = getVlanDbId(zone, vlanId); if (vlanDbId == -1) return genReturnList("false", "A VLAN with ID " + vlanId + " does not exist in zone " + zone); - + // Check if there are any public IPs that are in the specified vlan. List ips = _publicIpAddressDao.listByVlanDbId(vlanDbId); if (ips.size() != 0) return genReturnList("false", "Please delete all IP addresses that are in VLAN " + vlanId + " before deleting the VLAN."); - + // Delete the vlan _vlanDao.delete(vlanDbId); - + return genReturnList("true", "Successfully deleted VLAN."); } - */ + */ } - - @DB - public void saveZone(boolean printOutput, long id, String name, String dns1, String dns2, String dns3, String dns4, String guestNetworkCidr, String networkType) { - - if (printOutput) System.out.println("Saving zone, please wait..."); - - String columns = null; - String values = null; - - if (id != -1) { - columns = "(id, name"; - values = "('" + id + "','" + name + "'"; - } else { - columns = "(name"; - values = "('" + name + "'"; - } - if (dns1 != null) { - columns += ", dns1"; - values += ",'" + dns1 + "'"; - } - - if (dns2 != null) { - columns += ", dns2"; - values += ",'" + dns2 + "'"; - } - - if (dns3 != null) { - columns += ", internal_dns1"; - values += ",'" + dns3 + "'"; - } - - if (dns4 != null) { - columns += ", internal_dns2"; - values += ",'" + dns4 + "'"; - } - - if(guestNetworkCidr != null) { - columns += ", guest_network_cidr"; - values += ",'" + guestNetworkCidr + "'"; - } - - if(networkType != null) { - columns += ", networktype"; - values += ",'" + networkType + "'"; - } - - columns += ", uuid"; - values += ", UUID()"; - - columns += ")"; - values += ")"; - - String sql = "INSERT INTO `cloud`.`data_center` " + columns + " VALUES " + values; - - DatabaseConfig.saveSQL(sql, "Failed to save zone due to exception. Please contact Cloud Support."); - - if (printOutput) System.out.println("Successfully saved zone."); - } - - @DB - public void savePhysicalNetwork(boolean printOutput, long id, long dcId, int vnetStart, int vnetEnd) { - - if (printOutput) System.out.println("Saving physical network, please wait..."); - + @DB + public void saveZone(boolean printOutput, long id, String name, String dns1, String dns2, String dns3, String dns4, String guestNetworkCidr, String networkType) { + + if (printOutput) System.out.println("Saving zone, please wait..."); + String columns = null; String values = null; - + + if (id != -1) { + columns = "(id, name"; + values = "('" + id + "','" + name + "'"; + } else { + columns = "(name"; + values = "('" + name + "'"; + } + + if (dns1 != null) { + columns += ", dns1"; + values += ",'" + dns1 + "'"; + } + + if (dns2 != null) { + columns += ", dns2"; + values += ",'" + dns2 + "'"; + } + + if (dns3 != null) { + columns += ", internal_dns1"; + values += ",'" + dns3 + "'"; + } + + if (dns4 != null) { + columns += ", internal_dns2"; + values += ",'" + dns4 + "'"; + } + + if(guestNetworkCidr != null) { + columns += ", guest_network_cidr"; + values += ",'" + guestNetworkCidr + "'"; + } + + if(networkType != null) { + columns += ", networktype"; + values += ",'" + networkType + "'"; + } + + columns += ", uuid"; + values += ", UUID()"; + + columns += ")"; + values += ")"; + + String sql = "INSERT INTO `cloud`.`data_center` " + columns + " VALUES " + values; + + DatabaseConfig.saveSQL(sql, "Failed to save zone due to exception. Please contact Cloud Support."); + + if (printOutput) System.out.println("Successfully saved zone."); + } + + @DB + public void savePhysicalNetwork(boolean printOutput, long id, long dcId, int vnetStart, int vnetEnd) { + + if (printOutput) System.out.println("Saving physical network, please wait..."); + + String columns = null; + String values = null; + columns = "(id "; values = "('" + id + "'"; - + columns += ", name "; values += ",'physical network'"; - + columns += ", data_center_id "; values += ",'" + dcId + "'"; - + //save vnet information columns += ", vnet"; values += ",'" + vnetStart + "-" + vnetEnd + "'"; - + columns += ", state"; values += ", 'Enabled'"; - + columns += ", uuid"; values += ", UUID()"; - + columns += ")"; values += ")"; - + String sql = "INSERT INTO `cloud`.`physical_network` " + columns + " VALUES " + values; - + DatabaseConfig.saveSQL(sql, "Failed to save physical network due to exception. Please contact Cloud Support."); - + // Hardcode the vnet range to be the full range int begin = 0x64; int end = 64000; - + // If vnet arguments were passed in, use them if (vnetStart != -1 && vnetEnd != -1) { begin = vnetStart; end = vnetEnd; } - + String insertVnet = "INSERT INTO `cloud`.`op_dc_vnet_alloc` (vnet, data_center_id, physical_network_id) VALUES ( ?, ?, ?)"; Transaction txn = Transaction.currentTxn(); @@ -376,17 +376,17 @@ public class PodZoneConfig { } catch (SQLException ex) { printError("Error creating vnet for the physical network. Please contact Cloud Support."); } - + //add default traffic types - + //get default Xen network labels String defaultXenPrivateNetworkLabel = getDefaultXenNetworkLabel(TrafficType.Management); String defaultXenPublicNetworkLabel = getDefaultXenNetworkLabel(TrafficType.Public); String defaultXenStorageNetworkLabel = getDefaultXenNetworkLabel(TrafficType.Storage); String defaultXenGuestNetworkLabel = getDefaultXenNetworkLabel(TrafficType.Guest); - + String insertTraficType = "INSERT INTO `cloud`.`physical_network_traffic_types` " + - "(physical_network_id, traffic_type, xen_network_label) VALUES ( ?, ?, ?)"; + "(physical_network_id, traffic_type, xen_network_label) VALUES ( ?, ?, ?)"; try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertTraficType); @@ -406,128 +406,128 @@ public class PodZoneConfig { }else if(traffic.equals(TrafficType.Guest)){ stmt.setString(3, defaultXenGuestNetworkLabel); } - + stmt.addBatch(); } stmt.executeBatch(); } catch (SQLException ex) { printError("Error adding default traffic types for the physical network. Please contact Cloud Support."); } - + if (printOutput) System.out.println("Successfully saved physical network."); } - + private String getDefaultXenNetworkLabel(TrafficType trafficType){ String xenLabel = null; String configName = null; switch(trafficType){ - case Public: configName = "xen.public.network.device"; - break; - case Guest: configName = "xen.guest.network.device"; - break; - case Storage: configName = "xen.storage.network.device1"; - break; - case Management: configName = "xen.private.network.device"; - break; + case Public: configName = "xen.public.network.device"; + break; + case Guest: configName = "xen.guest.network.device"; + break; + case Storage: configName = "xen.storage.network.device1"; + break; + case Management: configName = "xen.private.network.device"; + break; } - + if(configName != null){ xenLabel = getConfiguredValue(configName); } return xenLabel; } - + public static String getConfiguredValue(String configName) { return DatabaseConfig.getDatabaseValueString("SELECT value FROM `cloud`.`configuration` where name = \"" + configName + "\"","value", "Unable to start DB connection to read configuration. Please contact Cloud Support."); } - - public void deleteZone(String name) { - String sql = "DELETE FROM `cloud`.`data_center` WHERE name=\"" + name + "\""; - DatabaseConfig.saveSQL(sql, "Failed to delete zone due to exception. Please contact Cloud Support."); - } - - public void saveVlan(long zoneId, Long podId, String vlanId, String vlanGateway, String vlanNetmask, String vlanType, String ipRange, long networkId, long physicalNetworkId) { - String sql = "INSERT INTO `cloud`.`vlan` (vlan_id, vlan_gateway, vlan_netmask, data_center_id, vlan_type, description, network_id, physical_network_id) " + "VALUES ('" + vlanId + "','" + vlanGateway + "','" + vlanNetmask + "','" + zoneId + "','" + vlanType + "','" + ipRange + "','" + networkId + "','" + physicalNetworkId + "')"; + + public void deleteZone(String name) { + String sql = "DELETE FROM `cloud`.`data_center` WHERE name=\"" + name + "\""; + DatabaseConfig.saveSQL(sql, "Failed to delete zone due to exception. Please contact Cloud Support."); + } + + public void saveVlan(long zoneId, Long podId, String vlanId, String vlanGateway, String vlanNetmask, String vlanType, String ipRange, long networkId, long physicalNetworkId) { + String sql = "INSERT INTO `cloud`.`vlan` (vlan_id, vlan_gateway, vlan_netmask, data_center_id, vlan_type, description, network_id, physical_network_id) " + "VALUES ('" + vlanId + "','" + vlanGateway + "','" + vlanNetmask + "','" + zoneId + "','" + vlanType + "','" + ipRange + "','" + networkId + "','" + physicalNetworkId + "')"; DatabaseConfig.saveSQL(sql, "Failed to save vlan due to exception. Please contact Cloud Support."); - } - - public static long getPodId(String pod, String zone) { - long dcId = getZoneId(zone); - String selectSql = "SELECT * FROM `cloud`.`host_pod_ref` WHERE name = \"" + pod + "\" AND data_center_id = \"" + dcId + "\""; - String errorMsg = "Could not read pod ID fro mdatabase. Please contact Cloud Support."; - return DatabaseConfig.getDatabaseValueLong(selectSql, "id", errorMsg); - } - - public static long getPodId(String pod, long dcId) { + } + + public static long getPodId(String pod, String zone) { + long dcId = getZoneId(zone); String selectSql = "SELECT * FROM `cloud`.`host_pod_ref` WHERE name = \"" + pod + "\" AND data_center_id = \"" + dcId + "\""; String errorMsg = "Could not read pod ID fro mdatabase. Please contact Cloud Support."; return DatabaseConfig.getDatabaseValueLong(selectSql, "id", errorMsg); } - - public static long getZoneId(String zone) { - String selectSql = "SELECT * FROM `cloud`.`data_center` WHERE name = \"" + zone + "\""; - String errorMsg = "Could not read zone ID from database. Please contact Cloud Support."; - return DatabaseConfig.getDatabaseValueLong(selectSql, "id", errorMsg); - } - + + public static long getPodId(String pod, long dcId) { + String selectSql = "SELECT * FROM `cloud`.`host_pod_ref` WHERE name = \"" + pod + "\" AND data_center_id = \"" + dcId + "\""; + String errorMsg = "Could not read pod ID fro mdatabase. Please contact Cloud Support."; + return DatabaseConfig.getDatabaseValueLong(selectSql, "id", errorMsg); + } + + public static long getZoneId(String zone) { + String selectSql = "SELECT * FROM `cloud`.`data_center` WHERE name = \"" + zone + "\""; + String errorMsg = "Could not read zone ID from database. Please contact Cloud Support."; + return DatabaseConfig.getDatabaseValueLong(selectSql, "id", errorMsg); + } + public static long checkPhysicalNetwork(long physicalNetworkId) { String selectSql = "SELECT * FROM `cloud`.`physical_network` WHERE id = \"" + physicalNetworkId + "\""; String errorMsg = "Could not read physicalNetwork ID from database. Please contact Cloud Support."; return DatabaseConfig.getDatabaseValueLong(selectSql, "id", errorMsg); } - - @DB - public Vector getAllZoneIDs() { - Vector allZoneIDs = new Vector(); - - String selectSql = "SELECT id FROM data_center"; - Transaction txn = Transaction.currentTxn(); - try { - PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); - ResultSet rs = stmt.executeQuery(); - while (rs.next()) { - Long dcId = rs.getLong("id"); - allZoneIDs.add(dcId); - } + + @DB + public Vector getAllZoneIDs() { + Vector allZoneIDs = new Vector(); + + String selectSql = "SELECT id FROM data_center"; + Transaction txn = Transaction.currentTxn(); + try { + PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); + ResultSet rs = stmt.executeQuery(); + while (rs.next()) { + Long dcId = rs.getLong("id"); + allZoneIDs.add(dcId); + } } catch (SQLException ex) { - System.out.println(ex.getMessage()); - printError("There was an issue with reading zone IDs. Please contact Cloud Support."); + System.out.println(ex.getMessage()); + printError("There was an issue with reading zone IDs. Please contact Cloud Support."); return null; } - + return allZoneIDs; - } - - - public static boolean validPod(String pod, String zone) { - return (getPodId(pod, zone) != -1); - } - - public static boolean validZone(String zone) { - return (getZoneId(zone) != -1); - } - - public static String getPodName(long podId, long dcId) { - return DatabaseConfig.getDatabaseValueString("SELECT * FROM `cloud`.`host_pod_ref` WHERE id=" + podId + " AND data_center_id=" + dcId, "name", - "Unable to start DB connection to read pod name. Please contact Cloud Support."); - } - - public static String getZoneName(long dcId) { - return DatabaseConfig.getDatabaseValueString("SELECT * FROM `cloud`.`data_center` WHERE id=" + dcId, "name", - "Unable to start DB connection to read zone name. Please contact Cloud Support."); - } - - private static void printError(String message) { - DatabaseConfig.printError(message); - } - - private List genReturnList(String success, String message) { - List returnList = new ArrayList(); - returnList.add(0, success); - returnList.add(1, message); - return returnList; } - + + + public static boolean validPod(String pod, String zone) { + return (getPodId(pod, zone) != -1); + } + + public static boolean validZone(String zone) { + return (getZoneId(zone) != -1); + } + + public static String getPodName(long podId, long dcId) { + return DatabaseConfig.getDatabaseValueString("SELECT * FROM `cloud`.`host_pod_ref` WHERE id=" + podId + " AND data_center_id=" + dcId, "name", + "Unable to start DB connection to read pod name. Please contact Cloud Support."); + } + + public static String getZoneName(long dcId) { + return DatabaseConfig.getDatabaseValueString("SELECT * FROM `cloud`.`data_center` WHERE id=" + dcId, "name", + "Unable to start DB connection to read zone name. Please contact Cloud Support."); + } + + private static void printError(String message) { + DatabaseConfig.printError(message); + } + + private List genReturnList(String success, String message) { + List returnList = new ArrayList(); + returnList.add(0, success); + returnList.add(1, message); + return returnList; + } + } diff --git a/server/src/com/cloud/upgrade/DatabaseCreator.java b/server/src/com/cloud/upgrade/DatabaseCreator.java index 079e1e93b14..f0a8c5a03b2 100755 --- a/server/src/com/cloud/upgrade/DatabaseCreator.java +++ b/server/src/com/cloud/upgrade/DatabaseCreator.java @@ -26,7 +26,7 @@ import java.sql.Connection; import java.sql.SQLException; import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.component.SystemIntegrityChecker; import com.cloud.utils.db.ScriptRunner; import com.cloud.utils.db.Transaction; diff --git a/server/src/com/cloud/upgrade/DatabaseIntegrityChecker.java b/server/src/com/cloud/upgrade/DatabaseIntegrityChecker.java index d6e55a9d0af..1905bb3c2f8 100755 --- a/server/src/com/cloud/upgrade/DatabaseIntegrityChecker.java +++ b/server/src/com/cloud/upgrade/DatabaseIntegrityChecker.java @@ -30,7 +30,7 @@ import org.springframework.stereotype.Component; import com.cloud.maint.Version; import com.cloud.upgrade.dao.VersionDao; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.component.SystemIntegrityChecker; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.Transaction; diff --git a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java index c07d03f873e..f831a032385 100755 --- a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -63,7 +63,7 @@ import com.cloud.upgrade.dao.VersionDao; import com.cloud.upgrade.dao.VersionDaoImpl; import com.cloud.upgrade.dao.VersionVO; import com.cloud.upgrade.dao.VersionVO.Step; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.component.SystemIntegrityChecker; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.ScriptRunner; diff --git a/server/src/com/cloud/upgrade/PremiumDatabaseUpgradeChecker.java b/server/src/com/cloud/upgrade/PremiumDatabaseUpgradeChecker.java index 43d025ada22..896cb5618ed 100755 --- a/server/src/com/cloud/upgrade/PremiumDatabaseUpgradeChecker.java +++ b/server/src/com/cloud/upgrade/PremiumDatabaseUpgradeChecker.java @@ -43,7 +43,7 @@ import com.cloud.upgrade.dao.Upgrade30to301; import com.cloud.upgrade.dao.UpgradeSnapshot217to224; import com.cloud.upgrade.dao.UpgradeSnapshot223to224; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.component.SystemIntegrityChecker; @Component diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java index b90413218de..cf8602c22d2 100755 --- a/server/src/com/cloud/user/AccountManagerImpl.java +++ b/server/src/com/cloud/user/AccountManagerImpl.java @@ -38,6 +38,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd; @@ -113,7 +114,7 @@ import com.cloud.user.dao.UserDao; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.component.Manager; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; @@ -1537,6 +1538,31 @@ public class AccountManagerImpl implements AccountManager, AccountService, Manag } } + @Override + public RoleType getRoleType(Account account) { + RoleType roleType = RoleType.Unknown; + if (account == null) + return roleType; + short accountType = account.getType(); + + // Account type to role type translation + switch (accountType) { + case Account.ACCOUNT_TYPE_ADMIN: + roleType = RoleType.Admin; + break; + case Account.ACCOUNT_TYPE_DOMAIN_ADMIN: + roleType = RoleType.DomainAdmin; + break; + case Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN: + roleType = RoleType.ResourceAdmin; + break; + case Account.ACCOUNT_TYPE_NORMAL: + roleType = RoleType.User; + break; + } + return roleType; + } + @Override public User getActiveUser(long userId) { return _userDao.findById(userId); diff --git a/server/src/com/cloud/user/DomainManagerImpl.java b/server/src/com/cloud/user/DomainManagerImpl.java index f7d39ee52d5..9df34d1e4c6 100644 --- a/server/src/com/cloud/user/DomainManagerImpl.java +++ b/server/src/com/cloud/user/DomainManagerImpl.java @@ -87,6 +87,11 @@ public class DomainManagerImpl implements DomainManager, DomainService, Manager return _domainDao.findById(domainId); } + @Override + public Domain getDomain(String domainUuid) { + return _domainDao.findByUuid(domainUuid); + } + @Override public String getName() { return _name; diff --git a/server/src/com/cloud/vm/SystemVmLoadScanner.java b/server/src/com/cloud/vm/SystemVmLoadScanner.java index 174d8c7a3e6..4251b405e1b 100644 --- a/server/src/com/cloud/vm/SystemVmLoadScanner.java +++ b/server/src/com/cloud/vm/SystemVmLoadScanner.java @@ -22,7 +22,6 @@ import java.util.concurrent.TimeUnit; import org.apache.log4j.Logger; -import com.cloud.cluster.StackMaid; import com.cloud.utils.Pair; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.GlobalLock; @@ -32,27 +31,27 @@ import com.cloud.utils.db.Transaction; // TODO: simple load scanner, to minimize code changes required in console proxy manager and SSVM, we still leave most of work at handler // public class SystemVmLoadScanner { - public enum AfterScanAction { nop, expand, shrink } - + public enum AfterScanAction { nop, expand, shrink } + private static final Logger s_logger = Logger.getLogger(SystemVmLoadScanner.class); private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 seconds - - private final SystemVmLoadScanHandler _scanHandler; + + private final SystemVmLoadScanHandler _scanHandler; private final ScheduledExecutorService _capacityScanScheduler; private final GlobalLock _capacityScanLock; - - public SystemVmLoadScanner(SystemVmLoadScanHandler scanHandler) { - _scanHandler = scanHandler; - _capacityScanScheduler = Executors.newScheduledThreadPool(1, new NamedThreadFactory(scanHandler.getScanHandlerName())); - _capacityScanLock = GlobalLock.getInternLock(scanHandler.getScanHandlerName() + ".scan.lock"); - } - - public void initScan(long startupDelayMs, long scanIntervalMs) { + + public SystemVmLoadScanner(SystemVmLoadScanHandler scanHandler) { + _scanHandler = scanHandler; + _capacityScanScheduler = Executors.newScheduledThreadPool(1, new NamedThreadFactory(scanHandler.getScanHandlerName())); + _capacityScanLock = GlobalLock.getInternLock(scanHandler.getScanHandlerName() + ".scan.lock"); + } + + public void initScan(long startupDelayMs, long scanIntervalMs) { _capacityScanScheduler.scheduleAtFixedRate(getCapacityScanTask(), startupDelayMs, scanIntervalMs, TimeUnit.MILLISECONDS); - } - - public void stop() { + } + + public void stop() { _capacityScanScheduler.shutdownNow(); try { @@ -61,8 +60,8 @@ public class SystemVmLoadScanner { } _capacityScanLock.releaseRef(); - } - + } + private Runnable getCapacityScanTask() { return new Runnable() { @@ -74,56 +73,55 @@ public class SystemVmLoadScanner { } catch (Throwable e) { s_logger.warn("Unexpected exception " + e.getMessage(), e); } finally { - StackMaid.current().exitCleanup(); txn.close(); } } private void reallyRun() { - loadScan(); + loadScan(); } }; } - + private void loadScan() { - if(!_scanHandler.canScan()) { - return; - } - + if(!_scanHandler.canScan()) { + return; + } + if (!_capacityScanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { if (s_logger.isTraceEnabled()) { s_logger.trace("Capacity scan lock is used by others, skip and wait for my turn"); } return; } - + try { - _scanHandler.onScanStart(); - - T[] pools = _scanHandler.getScannablePools(); - for(T p : pools) { - if(_scanHandler.isPoolReadyForScan(p)) { - Pair actionInfo = _scanHandler.scanPool(p); - - switch(actionInfo.first()) { - case nop: - break; - - case expand: - _scanHandler.expandPool(p, actionInfo.second()); - break; - - case shrink: - _scanHandler.shrinkPool(p, actionInfo.second()); - break; - } - } - } - - _scanHandler.onScanEnd(); - + _scanHandler.onScanStart(); + + T[] pools = _scanHandler.getScannablePools(); + for(T p : pools) { + if(_scanHandler.isPoolReadyForScan(p)) { + Pair actionInfo = _scanHandler.scanPool(p); + + switch(actionInfo.first()) { + case nop: + break; + + case expand: + _scanHandler.expandPool(p, actionInfo.second()); + break; + + case shrink: + _scanHandler.shrinkPool(p, actionInfo.second()); + break; + } + } + } + + _scanHandler.onScanEnd(); + } finally { - _capacityScanLock.unlock(); + _capacityScanLock.unlock(); } } } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index 55d0d4fe756..ac14e77c510 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -214,7 +214,6 @@ import com.cloud.uservm.UserVm; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.PasswordGenerator; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.component.Manager; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.crypt.RSAHelper; @@ -381,7 +380,7 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager protected String _instance; protected String _zone; - private ConfigurationDao _configDao; + @Inject ConfigurationDao _configDao; private int _createprivatetemplatefromvolumewait; private int _createprivatetemplatefromsnapshotwait; @@ -1333,8 +1332,6 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager throws ConfigurationException { _name = name; - ComponentLocator locator = ComponentLocator.getCurrentLocator(); - _configDao = locator.getDao(ConfigurationDao.class); if (_configDao == null) { throw new ConfigurationException( "Unable to get the configuration dao."); diff --git a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java index 2897a63afed..95dc9a66458 100755 --- a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -71,7 +71,6 @@ import com.cloud.agent.manager.allocator.HostAllocator; import com.cloud.alert.AlertManager; import com.cloud.capacity.CapacityManager; import com.cloud.cluster.ClusterManager; -import com.cloud.cluster.StackMaid; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; @@ -238,7 +237,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene @Inject protected ConfigurationDao _configDao; - + Map> _vmGurus = new HashMap>(); protected StateMachine2 _stateMachine; @@ -288,7 +287,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene if (s_logger.isDebugEnabled()) { s_logger.debug("Allocating nics for " + vm); } - + try { _networkMgr.allocate(vmProfile, networks); } catch (ConcurrentOperationException e) { @@ -673,7 +672,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene } continue; } - + StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); if (!pool.isInMaintenance()) { if (s_logger.isDebugEnabled()) { @@ -707,7 +706,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene } } } - + VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, offering, account, params); DeployDestination dest = null; for (DeploymentPlanner planner : _planners) { @@ -757,7 +756,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene if(!reuseVolume){ reuseVolume = true; } - + Commands cmds = null; vmGuru.finalizeVirtualMachineProfile(vmProfile, dest, ctx); @@ -776,10 +775,10 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene _workDao.updateStep(work, Step.Starting); _agentMgr.send(destHostId, cmds); - + _workDao.updateStep(work, Step.Started); - - + + StartAnswer startAnswer = cmds.getAnswer(StartAnswer.class); if (startAnswer != null && startAnswer.getResult()) { String host_guid = startAnswer.getHost_guid(); @@ -803,7 +802,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene if (s_logger.isDebugEnabled()) { s_logger.info("The guru did not like the answers so stopping " + vm); } - + StopCommand cmd = new StopCommand(vm.getInstanceName()); StopAnswer answer = (StopAnswer) _agentMgr.easySend(destHostId, cmd); if (answer == null || !answer.getResult()) { @@ -815,7 +814,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene } } s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails())); - + } catch (OperationTimedoutException e) { s_logger.debug("Unable to send the start command to host " + dest.getHost()); if (e.isActive()) { @@ -1071,7 +1070,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene } vmGuru.prepareStop(profile); - + StopCommand stop = new StopCommand(vm, vm.getInstanceName(), null); boolean stopped = false; StopAnswer answer = null; @@ -1560,13 +1559,13 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene public boolean isVirtualMachineUpgradable(VirtualMachine vm, ServiceOffering offering) { boolean isMachineUpgradable = true; for(HostAllocator allocator : _hostAllocators) { - isMachineUpgradable = allocator.isVirtualMachineUpgradable(vm, offering); - if(isMachineUpgradable) - continue; - else - break; + isMachineUpgradable = allocator.isVirtualMachineUpgradable(vm, offering); + if(isMachineUpgradable) + continue; + else + break; } - + return isMachineUpgradable; } @@ -1644,7 +1643,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene commands.addCommand(command); } } - + for (final AgentVmInfo left : infos.values()) { boolean found = false; for (VirtualMachineGuru vmGuru : _vmGurus.values()) { @@ -1740,7 +1739,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene public void fullSync(final long clusterId, Map> newStates) { - if (newStates==null)return; + if (newStates==null)return; Map infos = convertToInfos(newStates); Set set_vms = Collections.synchronizedSet(new HashSet()); set_vms.addAll(_vmDao.listByClusterId(clusterId)); @@ -1750,11 +1749,11 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene AgentVmInfo info = infos.remove(vm.getId()); VMInstanceVO castedVm = null; if ((info == null && (vm.getState() == State.Running || vm.getState() == State.Starting)) - || (info != null && (info.state == State.Running && vm.getState() == State.Starting))) + || (info != null && (info.state == State.Running && vm.getState() == State.Starting))) { - s_logger.info("Found vm " + vm.getInstanceName() + " in inconsistent state. " + vm.getState() + " on CS while " + (info == null ? "Stopped" : "Running") + " on agent"); + s_logger.info("Found vm " + vm.getInstanceName() + " in inconsistent state. " + vm.getState() + " on CS while " + (info == null ? "Stopped" : "Running") + " on agent"); info = new AgentVmInfo(vm.getInstanceName(), getVmGuru(vm), vm, State.Stopped); - + // Bug 13850- grab outstanding work item if any for this VM state so that we mark it as DONE after we change VM state, else it will remain pending ItWorkVO work = _workDao.findByOutstandingWork(vm.getId(), vm.getState()); if (work != null) { @@ -1763,8 +1762,8 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene } } vm.setState(State.Running); // set it as running and let HA take care of it - _vmDao.persist(vm); - + _vmDao.persist(vm); + if (work != null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Updating outstanding work item to Done, id:" + work.getId()); @@ -1772,7 +1771,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene work.setStep(Step.Done); _workDao.update(work.getId(), work); } - + castedVm = info.guru.findById(vm.getId()); try { Host host = _hostDao.findByGuid(info.getHostUuid()); @@ -1812,20 +1811,20 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene } } else - // host id can change - if (info != null && vm.getState() == State.Running){ - // check for host id changes - Host host = _hostDao.findByGuid(info.getHostUuid()); - if (host != null && (vm.getHostId() == null || host.getId() != vm.getHostId())){ - s_logger.info("Found vm " + vm.getInstanceName() + " with inconsistent host in db, new host is " + host.getId()); - try { - stateTransitTo(vm, VirtualMachine.Event.AgentReportMigrated, host.getId()); - } catch (NoTransitionException e) { - s_logger.warn(e.getMessage()); - } - } - } - /* else if(info == null && vm.getState() == State.Stopping) { //Handling CS-13376 + // host id can change + if (info != null && vm.getState() == State.Running){ + // check for host id changes + Host host = _hostDao.findByGuid(info.getHostUuid()); + if (host != null && (vm.getHostId() == null || host.getId() != vm.getHostId())){ + s_logger.info("Found vm " + vm.getInstanceName() + " with inconsistent host in db, new host is " + host.getId()); + try { + stateTransitTo(vm, VirtualMachine.Event.AgentReportMigrated, host.getId()); + } catch (NoTransitionException e) { + s_logger.warn(e.getMessage()); + } + } + } + /* else if(info == null && vm.getState() == State.Stopping) { //Handling CS-13376 s_logger.warn("Marking the VM as Stopped as it was still stopping on the CS" +vm.getName()); vm.setState(State.Stopped); // Setting the VM as stopped on the DB and clearing it from the host vm.setLastHostId(vm.getHostId()); @@ -1863,7 +1862,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene boolean is_alien_vm = true; long alien_vm_count = -1; for (Map.Entry> entry : newStates.entrySet()) { - is_alien_vm = true; + is_alien_vm = true; for (VirtualMachineGuru vmGuru : vmGurus) { String name = entry.getKey(); VMInstanceVO vm = vmGuru.findByName(name); @@ -1881,8 +1880,8 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene } // alien VMs if (is_alien_vm){ - map.put(alien_vm_count--, new AgentVmInfo(entry.getKey(), null, null, entry.getValue().second(), entry.getValue().first())); - s_logger.warn("Found an alien VM " + entry.getKey()); + map.put(alien_vm_count--, new AgentVmInfo(entry.getKey(), null, null, entry.getValue().second(), entry.getValue().first())); + s_logger.warn("Found an alien VM " + entry.getKey()); } } return map; @@ -2267,13 +2266,13 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene Long clusterId = agent.getClusterId(); long agentId = agent.getId(); if (agent.getHypervisorType() == HypervisorType.XenServer) { // only for Xen - StartupRoutingCommand startup = (StartupRoutingCommand) cmd; - HashMap> allStates = startup.getClusterVMStateChanges(); - if (allStates != null){ - this.fullSync(clusterId, allStates); - } - - // initiate the cron job + StartupRoutingCommand startup = (StartupRoutingCommand) cmd; + HashMap> allStates = startup.getClusterVMStateChanges(); + if (allStates != null){ + this.fullSync(clusterId, allStates); + } + + // initiate the cron job ClusterSyncCommand syncCmd = new ClusterSyncCommand(Integer.parseInt(Config.ClusterDeltaSyncInterval.getDefaultValue()), clusterId); try { long seq_no = _agentMgr.send(agentId, new Commands(syncCmd), this); @@ -2340,7 +2339,6 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene } catch (Exception e) { s_logger.warn("Caught the following exception on transition checking", e); } finally { - StackMaid.current().exitCleanup(); lock.unlock(); } } @@ -2375,7 +2373,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene public VMInstanceVO findById(long vmId) { return _vmDao.findById(vmId); } - + @Override public void checkIfCanUpgrade(VirtualMachine vmInstance, long newServiceOfferingId) { ServiceOfferingVO newServiceOffering = _offeringDao.findById(newServiceOfferingId); @@ -2387,7 +2385,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene if (!vmInstance.getState().equals(State.Stopped)) { s_logger.warn("Unable to upgrade virtual machine " + vmInstance.toString() + " in state " + vmInstance.getState()); throw new InvalidParameterValueException("Unable to upgrade virtual machine " + vmInstance.toString() + " " + - "in state " + vmInstance.getState() + "in state " + vmInstance.getState() + "; make sure the virtual machine is stopped and not in an error state before upgrading."); } @@ -2395,11 +2393,11 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene if (vmInstance.getServiceOfferingId() == newServiceOffering.getId()) { if (s_logger.isInfoEnabled()) { s_logger.info("Not upgrading vm " + vmInstance.toString() + " since it already has the requested " + - "service offering (" + newServiceOffering.getName() + ")"); + "service offering (" + newServiceOffering.getName() + ")"); } throw new InvalidParameterValueException("Not upgrading vm " + vmInstance.toString() + " since it already " + - "has the requested service offering (" + newServiceOffering.getName() + ")"); + "has the requested service offering (" + newServiceOffering.getName() + ")"); } ServiceOfferingVO currentServiceOffering = _offeringDao.findByIdIncludingRemoved(vmInstance.getServiceOfferingId()); @@ -2421,7 +2419,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene "useLocalStorage=" + currentServiceOffering.getUseLocalStorage() + ", target offering useLocalStorage=" + newServiceOffering.getUseLocalStorage()); } - + // if vm is a system vm, check if it is a system service offering, if yes return with error as it cannot be used for user vms if (currentServiceOffering.getSystemUse() != newServiceOffering.getSystemUse()) { throw new InvalidParameterValueException("isSystem property is different for current service offering and new service offering"); @@ -2430,7 +2428,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene // Check that there are enough resources to upgrade the service offering if (!isVirtualMachineUpgradable(vmInstance, newServiceOffering)) { throw new InvalidParameterValueException("Unable to upgrade virtual machine, not enough resources available " + - "for an offering of " + newServiceOffering.getCpu() + " cpu(s) at " + "for an offering of " + newServiceOffering.getCpu() + " cpu(s) at " + newServiceOffering.getSpeed() + " Mhz, and " + newServiceOffering.getRamSize() + " MB of memory"); } @@ -2439,12 +2437,12 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene List newTags = _configMgr.csvTagsToList(newServiceOffering.getTags()); if (!newTags.containsAll(currentTags)) { throw new InvalidParameterValueException("Unable to upgrade virtual machine; the new service offering " + - "does not have all the tags of the " + "does not have all the tags of the " + "current service offering. Current service offering tags: " + currentTags + "; " + "new service " + - "offering tags: " + newTags); + "offering tags: " + newTags); } } - + @Override public boolean upgradeVmDb(long vmId, long serviceOfferingId) { VMInstanceVO vmForUpdate = _vmDao.createForUpdate(); @@ -2455,38 +2453,38 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene vmForUpdate.setServiceOfferingId(newSvcOff.getId()); return _vmDao.update(vmId, vmForUpdate); } - + @Override public NicProfile addVmToNetwork(VirtualMachine vm, Network network, NicProfile requested) throws ConcurrentOperationException, - ResourceUnavailableException, InsufficientCapacityException { - + ResourceUnavailableException, InsufficientCapacityException { + s_logger.debug("Adding vm " + vm + " to network " + network + "; requested nic profile " + requested); VMInstanceVO vmVO = _vmDao.findById(vm.getId()); ReservationContext context = new ReservationContextImpl(null, null, _accountMgr.getActiveUser(User.UID_SYSTEM), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM)); - + VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmVO, null, null, null, null); - + DataCenter dc = _configMgr.getZone(network.getDataCenterId()); Host host = _hostDao.findById(vm.getHostId()); DeployDestination dest = new DeployDestination(dc, null, null, host); - + //check vm state if (vm.getState() == State.Running) { //1) allocate and prepare nic NicProfile nic = _networkMgr.createNicForVm(network, requested, context, vmProfile, true); - + //2) Convert vmProfile to vmTO HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vmProfile.getVirtualMachine().getHypervisorType()); VirtualMachineTO vmTO = hvGuru.implement(vmProfile); - + //3) Convert nicProfile to NicTO NicTO nicTO = toNicTO(nic, vmProfile.getVirtualMachine().getHypervisorType()); - + //4) plug the nic to the vm VirtualMachineGuru vmGuru = getVmGuru(vmVO); - + s_logger.debug("Plugging nic for vm " + vm + " in network " + network); if (vmGuru.plugNic(network, nicTO, vmTO, context, dest)) { s_logger.debug("Nic is plugged successfully for vm " + vm + " in network " + network + ". Vm is a part of network now"); @@ -2509,40 +2507,40 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene @Override public NicTO toNicTO(NicProfile nic, HypervisorType hypervisorType) { HypervisorGuru hvGuru = _hvGuruMgr.getGuru(hypervisorType); - + NicTO nicTO = hvGuru.toNicTO(nic); return nicTO; } - + @Override public boolean removeVmFromNetwork(VirtualMachine vm, Network network, URI broadcastUri) throws ConcurrentOperationException, ResourceUnavailableException { VMInstanceVO vmVO = _vmDao.findById(vm.getId()); ReservationContext context = new ReservationContextImpl(null, null, _accountMgr.getActiveUser(User.UID_SYSTEM), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM)); - + VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmVO, null, null, null, null); - + DataCenter dc = _configMgr.getZone(network.getDataCenterId()); Host host = _hostDao.findById(vm.getHostId()); DeployDestination dest = new DeployDestination(dc, null, null, host); VirtualMachineGuru vmGuru = getVmGuru(vmVO); HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vmProfile.getVirtualMachine().getHypervisorType()); VirtualMachineTO vmTO = hvGuru.implement(vmProfile); - + Nic nic = null; - + if (broadcastUri != null) { nic = _nicsDao.findByNetworkIdInstanceIdAndBroadcastUri(network.getId(), vm.getId(), broadcastUri.toString()); } else { nic = _networkMgr.getNicInNetwork(vm.getId(), network.getId()); } - + NicProfile nicProfile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), _networkMgr.getNetworkRate(network.getId(), vm.getId()), _networkMgr.isSecurityGroupSupportedInNetwork(network), _networkMgr.getNetworkTag(vmProfile.getVirtualMachine().getHypervisorType(), network)); - + //1) Unplug the nic NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType()); s_logger.debug("Un-plugging nic for vm " + vm + " from network " + network); @@ -2553,14 +2551,14 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene s_logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network); return false; } - + //2) Release the nic _networkMgr.releaseNic(vmProfile, nic); s_logger.debug("Successfully released nic " + nic + "for vm " + vm); - + //3) Remove the nic _networkMgr.removeNic(vmProfile, nic); return result; } - + } diff --git a/server/src/com/cloud/vm/dao/DomainRouterDaoImpl.java b/server/src/com/cloud/vm/dao/DomainRouterDaoImpl.java index 40c97e1e278..cfe9f43f5f3 100755 --- a/server/src/com/cloud/vm/dao/DomainRouterDaoImpl.java +++ b/server/src/com/cloud/vm/dao/DomainRouterDaoImpl.java @@ -49,6 +49,7 @@ import com.cloud.vm.VirtualMachine.State; @Component @Local(value = { DomainRouterDao.class }) +@DB public class DomainRouterDaoImpl extends GenericDaoBase implements DomainRouterDao { protected SearchBuilder AllFieldsSearch; diff --git a/server/src/com/cloud/vm/dao/UserVmDaoImpl.java b/server/src/com/cloud/vm/dao/UserVmDaoImpl.java index 8eda2e24ba1..e2cf02e010c 100755 --- a/server/src/com/cloud/vm/dao/UserVmDaoImpl.java +++ b/server/src/com/cloud/vm/dao/UserVmDaoImpl.java @@ -37,7 +37,7 @@ import com.cloud.configuration.Resource; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.user.Account; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.Attribute; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; diff --git a/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java b/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java index d5693da9d0c..2e1d8514f7f 100644 --- a/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -39,7 +39,7 @@ import com.cloud.host.dao.HostDaoImpl; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.Attribute; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; diff --git a/server/test/com/cloud/async/TestAsync.java b/server/test/com/cloud/async/TestAsync.java index 187f2e56c57..6f67fe2227f 100644 --- a/server/test/com/cloud/async/TestAsync.java +++ b/server/test/com/cloud/async/TestAsync.java @@ -19,18 +19,13 @@ package com.cloud.async; import java.util.List; -import org.apache.log4j.Logger; - import junit.framework.Assert; -import com.cloud.async.AsyncJobVO; -import com.cloud.cluster.StackMaid; +import org.apache.log4j.Logger; + import com.cloud.cluster.CheckPointVO; import com.cloud.cluster.dao.StackMaidDao; import com.cloud.cluster.dao.StackMaidDaoImpl; -import com.cloud.serializer.Param; -import com.cloud.utils.ActionDelegate; -import com.cloud.utils.Pair; import com.cloud.utils.db.Transaction; import com.cloud.utils.testcase.Log4jEnabledTestCase; @@ -42,15 +37,15 @@ public class TestAsync extends Log4jEnabledTestCase { public static class SampleAsyncResult { @Param(name="name", propName="name") private final String _name; - + @Param private final int count; - + public SampleAsyncResult(String name, int count) { _name = name; this.count = count; } - + public String getName() { return _name; } public int getCount() { return count; } } @@ -60,31 +55,31 @@ public class TestAsync extends Log4jEnabledTestCase { AsyncJobVO job = new AsyncJobVO(1, 1, "TestCmd", null); job.setInstanceType("user_vm"); job.setInstanceId(1000L); - + char[] buf = new char[1024]; for(int i = 0; i < 1024; i++) buf[i] = 'a'; - + job.setResult(new String(buf)); dao.persist(job); - + AsyncJobVO jobVerify = dao.findById(job.getId()); - + Assert.assertTrue(jobVerify.getCmd().equals(job.getCmd())); Assert.assertTrue(jobVerify.getUserId() == 1); Assert.assertTrue(jobVerify.getAccountId() == 1); - + String result = jobVerify.getResult(); for(int i = 0; i < 1024; i++) Assert.assertTrue(result.charAt(i) == 'a'); - + jobVerify = dao.findInstancePendingAsyncJob("user_vm", 1000L); Assert.assertTrue(jobVerify != null); Assert.assertTrue(jobVerify.getCmd().equals(job.getCmd())); Assert.assertTrue(jobVerify.getUserId() == 1); Assert.assertTrue(jobVerify.getAccountId() == 1); } - + public void testSerialization() { List> l; int value = 1; @@ -93,23 +88,23 @@ public class TestAsync extends Log4jEnabledTestCase { Assert.assertTrue(l.get(0).first().equals("result")); Assert.assertTrue(l.get(0).second().equals("1")); l.clear(); - + SampleAsyncResult result = new SampleAsyncResult("vmops", 1); l = SerializerHelper.toPairList(result, "result"); - + Assert.assertTrue(l.size() == 2); Assert.assertTrue(l.get(0).first().equals("name")); Assert.assertTrue(l.get(0).second().equals("vmops")); Assert.assertTrue(l.get(1).first().equals("count")); Assert.assertTrue(l.get(1).second().equals("1")); } - + public void testAsyncResult() { AsyncJobResult result = new AsyncJobResult(1); - + result.setResultObject(100); Assert.assertTrue(result.getResult().equals("java.lang.Integer/100")); - + Object obj = result.getResultObject(); Assert.assertTrue(obj instanceof Integer); Assert.assertTrue(((Integer)obj).intValue() == 100); @@ -119,7 +114,7 @@ public class TestAsync extends Log4jEnabledTestCase { Transaction txn = Transaction.open("testTransaction"); try { txn.start(); - + AsyncJobDao dao = new AsyncJobDaoImpl(); AsyncJobVO job = new AsyncJobVO(1, 1, "TestCmd", null); job.setInstanceType("user_vm"); @@ -131,11 +126,11 @@ public class TestAsync extends Log4jEnabledTestCase { txn.close(); } } - + public void testMorevingian() { int threadCount = 10; final int testCount = 10; - + Thread[] threads = new Thread[threadCount]; for(int i = 0; i < threadCount; i++) { final int threadNum = i + 1; @@ -145,35 +140,35 @@ public class TestAsync extends Log4jEnabledTestCase { Transaction txn = Transaction.open(Transaction.CLOUD_DB); try { AsyncJobDao dao = new AsyncJobDaoImpl(); - + s_logger.info("Thread " + threadNum + " acquiring lock"); AsyncJobVO job = dao.acquire(1L, 30); if(job != null) { s_logger.info("Thread " + threadNum + " acquired lock"); - + try { Thread.sleep(Log4jEnabledTestCase.getRandomMilliseconds(1000, 3000)); } catch (InterruptedException e) { } - + s_logger.info("Thread " + threadNum + " acquiring lock nestly"); AsyncJobVO job2 = dao.acquire(1L, 30); if(job2 != null) { s_logger.info("Thread " + threadNum + " acquired lock nestly"); - + try { Thread.sleep(Log4jEnabledTestCase.getRandomMilliseconds(1000, 3000)); } catch (InterruptedException e) { } - + s_logger.info("Thread " + threadNum + " releasing lock (nestly acquired)"); dao.release(1L); s_logger.info("Thread " + threadNum + " released lock (nestly acquired)"); - + } else { s_logger.info("Thread " + threadNum + " was unable to acquire lock nestly"); } - + s_logger.info("Thread " + threadNum + " releasing lock"); dao.release(1L); s_logger.info("Thread " + threadNum + " released lock"); @@ -183,7 +178,7 @@ public class TestAsync extends Log4jEnabledTestCase { } finally { txn.close(); } - + try { Thread.sleep(Log4jEnabledTestCase.getRandomMilliseconds(1000, 10000)); } catch (InterruptedException e) { @@ -192,11 +187,11 @@ public class TestAsync extends Log4jEnabledTestCase { } }); } - + for(int i = 0; i < threadCount; i++) { threads[i].start(); } - + for(int i = 0; i < threadCount; i++) { try { threads[i].join(); @@ -204,88 +199,83 @@ public class TestAsync extends Log4jEnabledTestCase { } } } - */ - - public void testMaid() { - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - - StackMaidDao dao = new StackMaidDaoImpl(); - dao.pushCleanupDelegate(1L, 0, "delegate1", "Hello, world"); - dao.pushCleanupDelegate(1L, 1, "delegate2", new Long(100)); - dao.pushCleanupDelegate(1L, 2, "delegate3", null); - - CheckPointVO item = dao.popCleanupDelegate(1L); - Assert.assertTrue(item.getDelegate().equals("delegate3")); - Assert.assertTrue(item.getContext() == null); - - item = dao.popCleanupDelegate(1L); - Assert.assertTrue(item.getDelegate().equals("delegate2")); - s_logger.info(item.getContext()); + */ - item = dao.popCleanupDelegate(1L); - Assert.assertTrue(item.getDelegate().equals("delegate1")); - s_logger.info(item.getContext()); - - txn.close(); - } - - public void testMaidClear() { - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - - StackMaidDao dao = new StackMaidDaoImpl(); - dao.pushCleanupDelegate(1L, 0, "delegate1", "Hello, world"); - dao.pushCleanupDelegate(1L, 1, "delegate2", new Long(100)); - dao.pushCleanupDelegate(1L, 2, "delegate3", null); - - dao.clearStack(1L); - Assert.assertTrue(dao.popCleanupDelegate(1L) == null); - txn.close(); - } - - public void testMaidExitCleanup() { - StackMaid.current().push(1L, "com.cloud.async.CleanupDelegate", "Hello, world1"); - StackMaid.current().push(1L, "com.cloud.async.CleanupDelegate", "Hello, world2"); - - StackMaid.current().exitCleanup(1L); - } - - public void testMaidLeftovers() { + public void testMaid() { + Transaction txn = Transaction.open(Transaction.CLOUD_DB); - Thread[] threads = new Thread[3]; - for(int i = 0; i < 3; i++) { - final int threadNum = i+1; - threads[i] = new Thread(new Runnable() { - public void run() { - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - - StackMaidDao dao = new StackMaidDaoImpl(); - dao.pushCleanupDelegate(1L, 0, "delegate-" + threadNum, "Hello, world"); - dao.pushCleanupDelegate(1L, 1, "delegate-" + threadNum, new Long(100)); - dao.pushCleanupDelegate(1L, 2, "delegate-" + threadNum, null); - - txn.close(); - } - }); - - threads[i].start(); - } - - for(int i = 0; i < 3; i++) { - try { - threads[i].join(); - } catch (InterruptedException e) { - } - } + StackMaidDao dao = new StackMaidDaoImpl(); + dao.pushCleanupDelegate(1L, 0, "delegate1", "Hello, world"); + dao.pushCleanupDelegate(1L, 1, "delegate2", new Long(100)); + dao.pushCleanupDelegate(1L, 2, "delegate3", null); - - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - - StackMaidDao dao = new StackMaidDaoImpl(); - List l = dao.listLeftoversByMsid(1L); - for(CheckPointVO maid : l) { - s_logger.info("" + maid.getThreadId() + " " + maid.getDelegate() + " " + maid.getContext()); - } - - txn.close(); - } + CheckPointVO item = dao.popCleanupDelegate(1L); + Assert.assertTrue(item.getDelegate().equals("delegate3")); + Assert.assertTrue(item.getContext() == null); + + item = dao.popCleanupDelegate(1L); + Assert.assertTrue(item.getDelegate().equals("delegate2")); + s_logger.info(item.getContext()); + + item = dao.popCleanupDelegate(1L); + Assert.assertTrue(item.getDelegate().equals("delegate1")); + s_logger.info(item.getContext()); + + txn.close(); + } + + public void testMaidClear() { + Transaction txn = Transaction.open(Transaction.CLOUD_DB); + + StackMaidDao dao = new StackMaidDaoImpl(); + dao.pushCleanupDelegate(1L, 0, "delegate1", "Hello, world"); + dao.pushCleanupDelegate(1L, 1, "delegate2", new Long(100)); + dao.pushCleanupDelegate(1L, 2, "delegate3", null); + + dao.clearStack(1L); + Assert.assertTrue(dao.popCleanupDelegate(1L) == null); + txn.close(); + } + + + public void testMaidLeftovers() { + + Thread[] threads = new Thread[3]; + for(int i = 0; i < 3; i++) { + final int threadNum = i+1; + threads[i] = new Thread(new Runnable() { + @Override + public void run() { + Transaction txn = Transaction.open(Transaction.CLOUD_DB); + + StackMaidDao dao = new StackMaidDaoImpl(); + dao.pushCleanupDelegate(1L, 0, "delegate-" + threadNum, "Hello, world"); + dao.pushCleanupDelegate(1L, 1, "delegate-" + threadNum, new Long(100)); + dao.pushCleanupDelegate(1L, 2, "delegate-" + threadNum, null); + + txn.close(); + } + }); + + threads[i].start(); + } + + for(int i = 0; i < 3; i++) { + try { + threads[i].join(); + } catch (InterruptedException e) { + } + } + + + Transaction txn = Transaction.open(Transaction.CLOUD_DB); + + StackMaidDao dao = new StackMaidDaoImpl(); + List l = dao.listLeftoversByMsid(1L); + for(CheckPointVO maid : l) { + s_logger.info("" + maid.getThreadId() + " " + maid.getDelegate() + " " + maid.getContext()); + } + + txn.close(); + } } diff --git a/server/test/com/cloud/async/TestAsyncJobManager.java b/server/test/com/cloud/async/TestAsyncJobManager.java index 8ce51fa3849..7cb24ddc388 100644 --- a/server/test/com/cloud/async/TestAsyncJobManager.java +++ b/server/test/com/cloud/async/TestAsyncJobManager.java @@ -20,7 +20,10 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import javax.inject.Inject; + import junit.framework.Assert; +import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -31,102 +34,99 @@ import com.cloud.exception.PermissionDeniedException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDaoImpl; -import com.cloud.server.ManagementServer; -import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.Transaction; -import com.cloud.utils.testcase.ComponentSetup; -import com.cloud.utils.testcase.ComponentTestCase; -@ComponentSetup(managerName="management-server", setupXml="async-job-component.xml") -public class TestAsyncJobManager extends ComponentTestCase { +public class TestAsyncJobManager extends TestCase { public static final Logger s_logger = Logger.getLogger(TestAsyncJobManager.class.getName()); - + volatile long s_count = 0; - public void asyncCall() { - AsyncJobManager asyncMgr = ComponentLocator.getLocator(ManagementServer.Name).getManager(AsyncJobManager.class); + @Inject AsyncJobManager asyncMgr; + public void asyncCall() { // long jobId = mgr.rebootVirtualMachineAsync(1, 1); long jobId = 0L; - s_logger.info("Async-call job id: " + jobId); - - while(true) { - AsyncJobResult result; - try { - result = asyncMgr.queryAsyncJobResult(jobId); - - if(result.getJobStatus() != AsyncJobResult.STATUS_IN_PROGRESS) { - s_logger.info("Async-call completed, result: " + result.toString()); - break; - } - s_logger.info("Async-call is in progress, progress: " + result.toString()); - - } catch (PermissionDeniedException e1) { - } - - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - } - } - } - - public void sequence() { - final HostDao hostDao = new HostDaoImpl(); - long seq = hostDao.getNextSequence(1); - s_logger.info("******* seq : " + seq + " ********"); - - HashMap hashMap = new HashMap(); - final Map map = Collections.synchronizedMap(hashMap); - - s_count = 0; - final long maxCount = 1000000; // test one million times - - Thread t1 = new Thread(new Runnable() { - public void run() { - while(s_count < maxCount) { - s_count++; - long seq = hostDao.getNextSequence(1); - Assert.assertTrue(map.put(seq, seq) == null); - } - } - }); - - Thread t2 = new Thread(new Runnable() { - public void run() { - while(s_count < maxCount) { - s_count++; - long seq = hostDao.getNextSequence(1); - Assert.assertTrue(map.put(seq, seq) == null); - } - } - }); - - t1.start(); - t2.start(); - - try { - t1.join(); - t2.join(); - } catch (InterruptedException e) { - } - } + s_logger.info("Async-call job id: " + jobId); - /* + while(true) { + AsyncJobResult result; + try { + result = asyncMgr.queryAsyncJobResult(jobId); + + if(result.getJobStatus() != AsyncJobResult.STATUS_IN_PROGRESS) { + s_logger.info("Async-call completed, result: " + result.toString()); + break; + } + s_logger.info("Async-call is in progress, progress: " + result.toString()); + + } catch (PermissionDeniedException e1) { + } + + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + } + } + } + + public void sequence() { + final HostDao hostDao = new HostDaoImpl(); + long seq = hostDao.getNextSequence(1); + s_logger.info("******* seq : " + seq + " ********"); + + HashMap hashMap = new HashMap(); + final Map map = Collections.synchronizedMap(hashMap); + + s_count = 0; + final long maxCount = 1000000; // test one million times + + Thread t1 = new Thread(new Runnable() { + @Override + public void run() { + while(s_count < maxCount) { + s_count++; + long seq = hostDao.getNextSequence(1); + Assert.assertTrue(map.put(seq, seq) == null); + } + } + }); + + Thread t2 = new Thread(new Runnable() { + @Override + public void run() { + while(s_count < maxCount) { + s_count++; + long seq = hostDao.getNextSequence(1); + Assert.assertTrue(map.put(seq, seq) == null); + } + } + }); + + t1.start(); + t2.start(); + + try { + t1.join(); + t2.join(); + } catch (InterruptedException e) { + } + } + + /* public void ipAssignment() { final IPAddressDao ipAddressDao = new IPAddressDaoImpl(); - + final ConcurrentHashMap map = new ConcurrentHashMap(); //final Map map = Collections.synchronizedMap(hashMap); - + s_count = 0; final long maxCount = 1000000; // test one million times - + Thread t1 = new Thread(new Runnable() { public void run() { while(s_count < maxCount) { s_count++; - + Transaction txn = Transaction.open("Alex1"); try { IPAddressVO addr = ipAddressDao.assignIpAddress(1, 0, 1, false); @@ -141,12 +141,12 @@ public class TestAsyncJobManager extends ComponentTestCase { } } }); - + Thread t2 = new Thread(new Runnable() { public void run() { while(s_count < maxCount) { s_count++; - + Transaction txn = Transaction.open("Alex2"); try { IPAddressVO addr = ipAddressDao.assignIpAddress(1, 0, 1, false); @@ -157,96 +157,96 @@ public class TestAsyncJobManager extends ComponentTestCase { } } }); - + t1.start(); t2.start(); - + try { t1.join(); t2.join(); } catch (InterruptedException e) { } } - */ - - private long getRandomLockId() { - return 1L; - - /* - * will use in the future test cases + */ + + private long getRandomLockId() { + return 1L; + + /* + * will use in the future test cases int i = new Random().nextInt(); if(i % 2 == 0) return 1L; return 2L; - */ - } - - public void tstLocking() { - - int testThreads = 20; - Thread[] threads = new Thread[testThreads]; - - for(int i = 0; i < testThreads; i++) { - final int current = i; - threads[i] = new Thread(new Runnable() { - public void run() { - - final HostDao hostDao = new HostDaoImpl(); - while(true) { - Transaction txn = Transaction.currentTxn(); - try { - HostVO host = hostDao.acquireInLockTable(getRandomLockId(), 10); - if(host != null) { - s_logger.info("Thread " + (current + 1) + " acquired lock"); - - try { Thread.sleep(getRandomMilliseconds(1000, 5000)); } catch (InterruptedException e) {} - - s_logger.info("Thread " + (current + 1) + " released lock"); - hostDao.releaseFromLockTable(host.getId()); - - try { Thread.sleep(getRandomMilliseconds(1000, 5000)); } catch (InterruptedException e) {} - } else { - s_logger.info("Thread " + (current + 1) + " is not able to acquire lock"); - } - } finally { - txn.close(); - } - } - } - }); - threads[i].start(); - } - - try { - for(int i = 0; i < testThreads; i++) - threads[i].join(); - } catch(InterruptedException e) { - } - } - - public void testDomain() { - getRandomMilliseconds(1, 100); - DomainDao domainDao = new DomainDaoImpl(); - - DomainVO domain1 = new DomainVO("d1", 2L, 1L, null); - domainDao.create(domain1); - - DomainVO domain2 = new DomainVO("d2", 2L, 1L, null); - domainDao.create(domain2); - - DomainVO domain3 = new DomainVO("d3", 2L, 1L, null); - domainDao.create(domain3); + */ + } - DomainVO domain11 = new DomainVO("d11", 2L, domain1.getId(), null); - domainDao.create(domain11); - - domainDao.remove(domain11.getId()); - - DomainVO domain12 = new DomainVO("d12", 2L, domain1.getId(), null); - domainDao.create(domain12); - - domainDao.remove(domain3.getId()); - DomainVO domain4 = new DomainVO("d4", 2L, 1L, null); - domainDao.create(domain4); - } + public void tstLocking() { + + int testThreads = 20; + Thread[] threads = new Thread[testThreads]; + + for(int i = 0; i < testThreads; i++) { + final int current = i; + threads[i] = new Thread(new Runnable() { + @Override + public void run() { + + final HostDao hostDao = new HostDaoImpl(); + while(true) { + Transaction txn = Transaction.currentTxn(); + try { + HostVO host = hostDao.acquireInLockTable(getRandomLockId(), 10); + if(host != null) { + s_logger.info("Thread " + (current + 1) + " acquired lock"); + + try { Thread.sleep(1000); } catch (InterruptedException e) {} + + s_logger.info("Thread " + (current + 1) + " released lock"); + hostDao.releaseFromLockTable(host.getId()); + + try { Thread.sleep(1000); } catch (InterruptedException e) {} + } else { + s_logger.info("Thread " + (current + 1) + " is not able to acquire lock"); + } + } finally { + txn.close(); + } + } + } + }); + threads[i].start(); + } + + try { + for(int i = 0; i < testThreads; i++) + threads[i].join(); + } catch(InterruptedException e) { + } + } + + public void testDomain() { + DomainDao domainDao = new DomainDaoImpl(); + + DomainVO domain1 = new DomainVO("d1", 2L, 1L, null); + domainDao.create(domain1); + + DomainVO domain2 = new DomainVO("d2", 2L, 1L, null); + domainDao.create(domain2); + + DomainVO domain3 = new DomainVO("d3", 2L, 1L, null); + domainDao.create(domain3); + + DomainVO domain11 = new DomainVO("d11", 2L, domain1.getId(), null); + domainDao.create(domain11); + + domainDao.remove(domain11.getId()); + + DomainVO domain12 = new DomainVO("d12", 2L, domain1.getId(), null); + domainDao.create(domain12); + + domainDao.remove(domain3.getId()); + DomainVO domain4 = new DomainVO("d4", 2L, 1L, null); + domainDao.create(domain4); + } } diff --git a/server/test/com/cloud/async/TestSyncQueueManager.java b/server/test/com/cloud/async/TestSyncQueueManager.java index 2bbf7bcc8bd..2322aecd332 100644 --- a/server/test/com/cloud/async/TestSyncQueueManager.java +++ b/server/test/com/cloud/async/TestSyncQueueManager.java @@ -18,194 +18,187 @@ package com.cloud.async; import java.util.List; +import javax.inject.Inject; + +import junit.framework.TestCase; + import org.apache.log4j.Logger; import org.junit.Assert; -import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.testcase.ComponentSetup; -import com.cloud.utils.testcase.ComponentTestCase; -@ComponentSetup(managerName="management-server", setupXml="sync-queue-component.xml") -public class TestSyncQueueManager extends ComponentTestCase { +public class TestSyncQueueManager extends TestCase { public static final Logger s_logger = Logger.getLogger(TestSyncQueueManager.class.getName()); - + private volatile int count = 0; private volatile long expectingCurrent = 1; + @Inject SyncQueueManager mgr; - public void leftOverItems() { - SyncQueueManager mgr = ComponentLocator.getCurrentLocator().getManager( - SyncQueueManager.class); + public void leftOverItems() { - List l = mgr.getActiveQueueItems(1L, false); - if(l != null && l.size() > 0) { - for(SyncQueueItemVO item : l) { - s_logger.info("Left over item: " + item.toString()); - mgr.purgeItem(item.getId()); - } - } - } + List l = mgr.getActiveQueueItems(1L, false); + if(l != null && l.size() > 0) { + for(SyncQueueItemVO item : l) { + s_logger.info("Left over item: " + item.toString()); + mgr.purgeItem(item.getId()); + } + } + } - public void dequeueFromOneQueue() { - final SyncQueueManager mgr = ComponentLocator.getCurrentLocator().getManager( - SyncQueueManager.class); - - final int totalRuns = 5000; - final SyncQueueVO queue = mgr.queue("vm_instance", 1L, "Async-job", 1, 1); - for(int i = 1; i < totalRuns; i++) - mgr.queue("vm_instance", 1L, "Async-job", i+1, 1); - - count = 0; - expectingCurrent = 1; - Thread thread1 = new Thread(new Runnable() { - public void run() { - while(count < totalRuns) { - SyncQueueItemVO item = mgr.dequeueFromOne(queue.getId(), 1L); - if(item != null) { - s_logger.info("Thread 1 process item: " + item.toString()); - - Assert.assertEquals(expectingCurrent, item.getContentId().longValue()); - expectingCurrent++; - count++; - - mgr.purgeItem(item.getId()); - } - try { - Thread.sleep(getRandomMilliseconds(1, 10)); - } catch (InterruptedException e) { - } - } - } - } - ); - - Thread thread2 = new Thread(new Runnable() { - public void run() { - while(count < totalRuns) { - SyncQueueItemVO item = mgr.dequeueFromOne(queue.getId(), 1L); - if(item != null) { - s_logger.info("Thread 2 process item: " + item.toString()); - - Assert.assertEquals(expectingCurrent, item.getContentId().longValue()); - expectingCurrent++; - count++; - mgr.purgeItem(item.getId()); - } - - try { - Thread.sleep(getRandomMilliseconds(1, 10)); - } catch (InterruptedException e) { - } - } - } - } - ); - - thread1.start(); - thread2.start(); - try { - thread1.join(); - } catch (InterruptedException e) { - } - try { - thread2.join(); - } catch (InterruptedException e) { - } - - Assert.assertEquals(totalRuns, count); - } - - public void dequeueFromAnyQueue() { - final SyncQueueManager mgr = ComponentLocator.getCurrentLocator().getManager( - SyncQueueManager.class); + public void dequeueFromOneQueue() { + final int totalRuns = 5000; + final SyncQueueVO queue = mgr.queue("vm_instance", 1L, "Async-job", 1, 1); + for(int i = 1; i < totalRuns; i++) + mgr.queue("vm_instance", 1L, "Async-job", i+1, 1); - // simulate 30 queues - final int queues = 30; - final int totalRuns = 100; - final int itemsPerRun = 20; - for(int q = 1; q <= queues; q++) - for(int i = 0; i < totalRuns; i++) - mgr.queue("vm_instance", q, "Async-job", i+1, 1); - - count = 0; - Thread thread1 = new Thread(new Runnable() { - public void run() { - while(count < totalRuns*queues) { - List l = mgr.dequeueFromAny(1L, itemsPerRun); - if(l != null && l.size() > 0) { - s_logger.info("Thread 1 get " + l.size() + " dequeued items"); - - for(SyncQueueItemVO item : l) { - s_logger.info("Thread 1 process item: " + item.toString()); - count++; - - mgr.purgeItem(item.getId()); - } - } - try { - Thread.sleep(getRandomMilliseconds(1, 10)); - } catch (InterruptedException e) { - } - } - } - } - ); - - Thread thread2 = new Thread(new Runnable() { - public void run() { - while(count < totalRuns*queues) { - List l = mgr.dequeueFromAny(1L, itemsPerRun); - if(l != null && l.size() > 0) { - s_logger.info("Thread 2 get " + l.size() + " dequeued items"); - - for(SyncQueueItemVO item : l) { - s_logger.info("Thread 2 process item: " + item.toString()); - count++; - mgr.purgeItem(item.getId()); - } - } - - try { - Thread.sleep(getRandomMilliseconds(1, 10)); - } catch (InterruptedException e) { - } - } - } - } - ); - - thread1.start(); - thread2.start(); - try { - thread1.join(); - } catch (InterruptedException e) { - } - try { - thread2.join(); - } catch (InterruptedException e) { - } - Assert.assertEquals(queues*totalRuns, count); - } - - public void testPopulateQueueData() { - final int queues = 30000; - final int totalRuns = 100; - - final SyncQueueManager mgr = ComponentLocator.getCurrentLocator().getManager( - SyncQueueManager.class); - for(int q = 1; q <= queues; q++) - for(int i = 0; i < totalRuns; i++) - mgr.queue("vm_instance", q, "Async-job", i+1, 1); - } - + count = 0; + expectingCurrent = 1; + Thread thread1 = new Thread(new Runnable() { + @Override + public void run() { + while(count < totalRuns) { + SyncQueueItemVO item = mgr.dequeueFromOne(queue.getId(), 1L); + if(item != null) { + s_logger.info("Thread 1 process item: " + item.toString()); + + Assert.assertEquals(expectingCurrent, item.getContentId().longValue()); + expectingCurrent++; + count++; + + mgr.purgeItem(item.getId()); + } + try { + Thread.sleep(100); + } catch (InterruptedException e) { + } + } + } + } + ); + + Thread thread2 = new Thread(new Runnable() { + @Override + public void run() { + while(count < totalRuns) { + SyncQueueItemVO item = mgr.dequeueFromOne(queue.getId(), 1L); + if(item != null) { + s_logger.info("Thread 2 process item: " + item.toString()); + + Assert.assertEquals(expectingCurrent, item.getContentId().longValue()); + expectingCurrent++; + count++; + mgr.purgeItem(item.getId()); + } + + try { + Thread.sleep(100); + } catch (InterruptedException e) { + } + } + } + } + ); + + thread1.start(); + thread2.start(); + try { + thread1.join(); + } catch (InterruptedException e) { + } + try { + thread2.join(); + } catch (InterruptedException e) { + } + + Assert.assertEquals(totalRuns, count); + } + + public void dequeueFromAnyQueue() { + // simulate 30 queues + final int queues = 30; + final int totalRuns = 100; + final int itemsPerRun = 20; + for(int q = 1; q <= queues; q++) + for(int i = 0; i < totalRuns; i++) + mgr.queue("vm_instance", q, "Async-job", i+1, 1); + + count = 0; + Thread thread1 = new Thread(new Runnable() { + @Override + public void run() { + while(count < totalRuns*queues) { + List l = mgr.dequeueFromAny(1L, itemsPerRun); + if(l != null && l.size() > 0) { + s_logger.info("Thread 1 get " + l.size() + " dequeued items"); + + for(SyncQueueItemVO item : l) { + s_logger.info("Thread 1 process item: " + item.toString()); + count++; + + mgr.purgeItem(item.getId()); + } + } + try { + Thread.sleep(100); + } catch (InterruptedException e) { + } + } + } + } + ); + + Thread thread2 = new Thread(new Runnable() { + @Override + public void run() { + while(count < totalRuns*queues) { + List l = mgr.dequeueFromAny(1L, itemsPerRun); + if(l != null && l.size() > 0) { + s_logger.info("Thread 2 get " + l.size() + " dequeued items"); + + for(SyncQueueItemVO item : l) { + s_logger.info("Thread 2 process item: " + item.toString()); + count++; + mgr.purgeItem(item.getId()); + } + } + + try { + Thread.sleep(100); + } catch (InterruptedException e) { + } + } + } + } + ); + + thread1.start(); + thread2.start(); + try { + thread1.join(); + } catch (InterruptedException e) { + } + try { + thread2.join(); + } catch (InterruptedException e) { + } + Assert.assertEquals(queues*totalRuns, count); + } + + public void testPopulateQueueData() { + final int queues = 30000; + final int totalRuns = 100; + + for(int q = 1; q <= queues; q++) + for(int i = 0; i < totalRuns; i++) + mgr.queue("vm_instance", q, "Async-job", i+1, 1); + } + public void testSyncQueue() { - final SyncQueueManager mgr = ComponentLocator.getCurrentLocator().getManager( - SyncQueueManager.class); mgr.queue("vm_instance", 1, "Async-job", 1, 1); mgr.queue("vm_instance", 1, "Async-job", 2, 1); mgr.queue("vm_instance", 1, "Async-job", 3, 1); mgr.dequeueFromAny(100L, 1); - + List l = mgr.getBlockedQueueItems(100000, false); for(SyncQueueItemVO item : l) { System.out.println("Blocked item. " + item.getContentType() + "-" + item.getContentId()); diff --git a/server/test/com/cloud/cluster/CheckPointManagerTest.java b/server/test/com/cloud/cluster/CheckPointManagerTest.java deleted file mode 100755 index 74b069882b3..00000000000 --- a/server/test/com/cloud/cluster/CheckPointManagerTest.java +++ /dev/null @@ -1,390 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.cluster; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import javax.ejb.Local; -import javax.naming.ConfigurationException; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.agent.Listener; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.Command; -import com.cloud.cluster.dao.StackMaidDao; -import com.cloud.cluster.dao.StackMaidDaoImpl; -import com.cloud.configuration.Config; -import com.cloud.configuration.DefaultInterceptorLibrary; -import com.cloud.configuration.dao.ConfigurationDaoImpl; -import com.cloud.exception.AgentUnavailableException; -import com.cloud.exception.OperationTimedoutException; -import com.cloud.host.Status.Event; -import com.cloud.serializer.SerializerHelper; -import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.component.MockComponentLocator; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.exception.CloudRuntimeException; - -public class CheckPointManagerTest extends TestCase { - private final static Logger s_logger = Logger.getLogger(CheckPointManagerTest.class); - - @Override - @Before - public void setUp() { - MockComponentLocator locator = new MockComponentLocator("management-server"); - locator.addDao("StackMaidDao", StackMaidDaoImpl.class); - locator.addDao("ConfigurationDao", ConfigurationDaoImpl.class); - locator.addManager("ClusterManager", MockClusterManager.class); - locator.makeActive(new DefaultInterceptorLibrary()); - MockMaid.map.clear(); - s_logger.info("Cleaning up the database"); - Connection conn = Transaction.getStandaloneConnection(); - try { - conn.setAutoCommit(true); - PreparedStatement stmt = conn.prepareStatement("DELETE FROM stack_maid"); - stmt.executeUpdate(); - stmt.close(); - conn.close(); - } catch (SQLException e) { - throw new CloudRuntimeException("Unable to setup database", e); - } - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void testCompleteCase() throws Exception { - ComponentLocator locator = ComponentLocator.getCurrentLocator(); - - CheckPointManagerImpl taskMgr = ComponentLocator.inject(CheckPointManagerImpl.class); - assertTrue(taskMgr.configure("TaskManager", new HashMap())); - assertTrue(taskMgr.start()); - - MockMaid delegate = new MockMaid(); - delegate.setValue("first"); - long taskId = taskMgr.pushCheckPoint(delegate); - - StackMaidDao maidDao = locator.getDao(StackMaidDao.class); - CheckPointVO task = maidDao.findById(taskId); - - assertEquals(task.getDelegate(), MockMaid.class.getName()); - MockMaid retrieved = (MockMaid)SerializerHelper.fromSerializedString(task.getContext()); - assertEquals(retrieved.getValue(), delegate.getValue()); - - delegate.setValue("second"); - taskMgr.updateCheckPointState(taskId, delegate); - - task = maidDao.findById(taskId); - assertEquals(task.getDelegate(), MockMaid.class.getName()); - retrieved = (MockMaid)SerializerHelper.fromSerializedString(task.getContext()); - assertEquals(retrieved.getValue(), delegate.getValue()); - - taskMgr.popCheckPoint(taskId); - assertNull(maidDao.findById(taskId)); - } - - public void testSimulatedReboot() throws Exception { - ComponentLocator locator = ComponentLocator.getCurrentLocator(); - - CheckPointManagerImpl taskMgr = ComponentLocator.inject(CheckPointManagerImpl.class); - assertTrue(taskMgr.configure("TaskManager", new HashMap())); - assertTrue(taskMgr.start()); - - MockMaid maid = new MockMaid(); - maid.setValue("first"); - long taskId = taskMgr.pushCheckPoint(maid); - - StackMaidDao maidDao = locator.getDao(StackMaidDao.class); - CheckPointVO task = maidDao.findById(taskId); - - assertEquals(task.getDelegate(), MockMaid.class.getName()); - MockMaid retrieved = (MockMaid)SerializerHelper.fromSerializedString(task.getContext()); - assertEquals(retrieved.getValue(), maid.getValue()); - - taskMgr.stop(); - - assertNotNull(MockMaid.map.get(maid.getSeq())); - - taskMgr = ComponentLocator.inject(CheckPointManagerImpl.class); - HashMap params = new HashMap(); - params.put(Config.TaskCleanupRetryInterval.key(), "1"); - taskMgr.configure("TaskManager", params); - taskMgr.start(); - - int i = 0; - while (MockMaid.map.get(maid.getSeq()) != null && i++ < 5) { - Thread.sleep(1000); - } - - assertNull(MockMaid.map.get(maid.getSeq())); - } - - public void testTakeover() throws Exception { - ComponentLocator locator = ComponentLocator.getCurrentLocator(); - - CheckPointManagerImpl taskMgr = ComponentLocator.inject(CheckPointManagerImpl.class); - assertTrue(taskMgr.configure("TaskManager", new HashMap())); - assertTrue(taskMgr.start()); - - MockMaid delegate = new MockMaid(); - delegate.setValue("first"); - long taskId = taskMgr.pushCheckPoint(delegate); - - StackMaidDao maidDao = locator.getDao(StackMaidDao.class); - CheckPointVO task = maidDao.findById(taskId); - - assertEquals(task.getDelegate(), MockMaid.class.getName()); - MockMaid retrieved = (MockMaid)SerializerHelper.fromSerializedString(task.getContext()); - assertEquals(retrieved.getValue(), delegate.getValue()); - - Connection conn = Transaction.getStandaloneConnection(); - try { - conn.setAutoCommit(true); - PreparedStatement stmt = conn.prepareStatement("update stack_maid set msid=? where msid=?"); - stmt.setLong(1, 1234); - stmt.setLong(2, ManagementServerNode.getManagementServerId()); - stmt.executeUpdate(); - stmt.close(); - } finally { - conn.close(); - } - - MockClusterManager clusterMgr = (MockClusterManager)locator.getManager(ClusterManager.class); - clusterMgr.triggerTakeover(1234); - - int i = 0; - while (MockMaid.map.get(delegate.getSeq()) != null && i++ < 500) { - Thread.sleep(1000); - } - - assertNull(MockMaid.map.get(delegate.getSeq())); - } - - public static class MockMaid implements CleanupMaid { - private static int s_seq = 1; - public static Map map = new ConcurrentHashMap(); - - int seq; - boolean canBeCleanup; - String value; - - protected MockMaid() { - canBeCleanup = true; - seq = s_seq++; - map.put(seq, this); - } - - public int getSeq() { - return seq; - } - - public String getValue() { - return value; - } - - public void setCanBeCleanup(boolean canBeCleanup) { - this.canBeCleanup = canBeCleanup; - } - - @Override - public int cleanup(CheckPointManager checkPointMgr) { - s_logger.debug("Cleanup called for " + seq); - map.remove(seq); - return canBeCleanup ? 0 : -1; - } - - public void setValue(String value) { - this.value = value; - } - - @Override - public String getCleanupProcedure() { - return "No cleanup necessary"; - } - } - - @Local(value=ClusterManager.class) - public static class MockClusterManager implements ClusterManager { - String _name; - ClusterManagerListener _listener; - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - _name = name; - return true; - } - - @Override - public boolean start() { - return true; - } - - @Override - public boolean stop() { - return true; - } - - @Override - public String getName() { - return _name; - } - - @Override - public void OnReceiveClusterServicePdu(ClusterServicePdu pdu) { - throw new CloudRuntimeException("Not implemented"); - } - - @Override - public Answer[] execute(String strPeer, long agentId, Command[] cmds, boolean stopOnError) { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public Answer[] sendToAgent(Long hostId, Command[] cmds, boolean stopOnError) throws AgentUnavailableException, OperationTimedoutException { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public boolean executeAgentUserRequest(long agentId, Event event) throws AgentUnavailableException { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public Boolean propagateAgentEvent(long agentId, Event event) throws AgentUnavailableException { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public int getHeartbeatThreshold() { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public long getManagementNodeId() { - return ManagementServerNode.getManagementServerId(); - } - - @Override - public boolean isManagementNodeAlive(long msid) { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public boolean pingManagementNode(long msid) { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public long getCurrentRunId() { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public String getSelfPeerName() { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public String getSelfNodeIP() { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public String getPeerName(long agentHostId) { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public void registerListener(ClusterManagerListener listener) { - _listener = listener; - } - - @Override - public void unregisterListener(ClusterManagerListener listener) { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public ManagementServerHostVO getPeer(String peerName) { - throw new UnsupportedOperationException("Not implemented"); - } - - @Override - public void broadcast(long agentId, Command[] cmds) { - throw new UnsupportedOperationException("Not implemented"); - } - - public void triggerTakeover(long msId) { - ManagementServerHostVO node = new ManagementServerHostVO(); - node.setMsid(msId); - - List lst = new ArrayList(); - lst.add(node); - - _listener.onManagementNodeLeft(lst, ManagementServerNode.getManagementServerId()); - } - - protected MockClusterManager() { - } - - @Override - public boolean rebalanceAgent(long agentId, Event event, long currentOwnerId, long futureOwnerId) throws AgentUnavailableException, OperationTimedoutException { - return false; - } - - @Override - public boolean isAgentRebalanceEnabled() { - return false; - } - - @Override - public Boolean propagateResourceEvent(long agentId, com.cloud.resource.ResourceState.Event event) throws AgentUnavailableException { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean executeResourceUserRequest(long hostId, com.cloud.resource.ResourceState.Event event) throws AgentUnavailableException { - // TODO Auto-generated method stub - return false; - } - - /* (non-Javadoc) - * @see com.cloud.cluster.ClusterManager#executeAsync(java.lang.String, long, com.cloud.agent.api.Command[], boolean) - */ - @Override - public void executeAsync(String strPeer, long agentId, Command[] cmds, boolean stopOnError) { - // TODO Auto-generated method stub - - } - } - -} diff --git a/server/test/com/cloud/network/MockNetworkManagerImpl.java b/server/test/com/cloud/network/MockNetworkManagerImpl.java index 26a6e60f714..874e01767a9 100755 --- a/server/test/com/cloud/network/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/network/MockNetworkManagerImpl.java @@ -112,6 +112,12 @@ public class MockNetworkManagerImpl implements NetworkManager, Manager, NetworkS return null; } + @Override + public Network getNetwork(String networkUuid) { + // TODO Auto-generated method stub + return null; + } + @Override public IpAddress getIp(long id) { // TODO Auto-generated method stub diff --git a/server/test/com/cloud/network/security/SecurityGroupManagerImpl2Test.java b/server/test/com/cloud/network/security/SecurityGroupManagerImpl2Test.java index 461bde0b95d..723e4e6ee27 100644 --- a/server/test/com/cloud/network/security/SecurityGroupManagerImpl2Test.java +++ b/server/test/com/cloud/network/security/SecurityGroupManagerImpl2Test.java @@ -19,6 +19,7 @@ package com.cloud.network.security; import java.util.ArrayList; import java.util.List; +import javax.inject.Inject; import javax.naming.ConfigurationException; import junit.framework.TestCase; @@ -46,8 +47,7 @@ import com.cloud.user.MockAccountManagerImpl; import com.cloud.user.MockDomainManagerImpl; import com.cloud.user.dao.AccountDaoImpl; import com.cloud.utils.Profiler; -import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.component.MockComponentLocator; + import com.cloud.vm.MockUserVmManagerImpl; import com.cloud.vm.MockVirtualMachineManagerImpl; import com.cloud.vm.dao.UserVmDaoImpl; @@ -55,14 +55,13 @@ import com.cloud.vm.dao.VMInstanceDaoImpl; public class SecurityGroupManagerImpl2Test extends TestCase { //private final static Logger s_logger = Logger.getLogger(SecurityGroupManagerImpl2Test.class); - SecurityGroupManagerImpl2 _sgMgr = null; - UserVmDaoImpl _vmDao = null; + @Inject SecurityGroupManagerImpl2 _sgMgr = null; + @Inject UserVmDaoImpl _vmDao = null; @Before @Override public void setUp() { - MockComponentLocator locator = new MockComponentLocator("management-server"); - +/* locator.addDao("ConfigurationDao", ConfigurationDaoImpl.class); locator.addDao("SecurityGroupDao", SecurityGroupDaoImpl.class); @@ -87,8 +86,7 @@ public class SecurityGroupManagerImpl2Test extends TestCase { locator.addManager("DomainManager", MockDomainManagerImpl.class); locator.addManager("ProjectManager", MockProjectManagerImpl.class); locator.makeActive(new DefaultInterceptorLibrary()); - _sgMgr = ComponentLocator.inject(SecurityGroupManagerImpl2.class); - _sgMgr._mBean = new SecurityManagerMBeanImpl(_sgMgr); +*/ } @Override diff --git a/server/test/com/cloud/snapshot/SnapshotDaoTest.java b/server/test/com/cloud/snapshot/SnapshotDaoTest.java index c412f49b3d4..2f2803e067e 100644 --- a/server/test/com/cloud/snapshot/SnapshotDaoTest.java +++ b/server/test/com/cloud/snapshot/SnapshotDaoTest.java @@ -18,19 +18,20 @@ package com.cloud.snapshot; import java.util.List; +import javax.inject.Inject; + import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; import com.cloud.storage.dao.SnapshotDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import junit.framework.Assert; import junit.framework.TestCase; public class SnapshotDaoTest extends TestCase { - + @Inject SnapshotDaoImpl dao; + public void testListBy() { - SnapshotDaoImpl dao = ComponentLocator.inject(SnapshotDaoImpl.class); - List snapshots = dao.listByInstanceId(3, Snapshot.Status.BackedUp); for(SnapshotVO snapshot : snapshots) { Assert.assertTrue(snapshot.getStatus() == Snapshot.Status.BackedUp); diff --git a/server/test/com/cloud/storage/dao/StoragePoolDaoTest.java b/server/test/com/cloud/storage/dao/StoragePoolDaoTest.java index ed766f557f6..87dbf168ef9 100644 --- a/server/test/com/cloud/storage/dao/StoragePoolDaoTest.java +++ b/server/test/com/cloud/storage/dao/StoragePoolDaoTest.java @@ -16,15 +16,17 @@ // under the License. package com.cloud.storage.dao; +import javax.inject.Inject; + import junit.framework.TestCase; import com.cloud.storage.StoragePoolStatus; -import com.cloud.utils.component.ComponentLocator; + public class StoragePoolDaoTest extends TestCase { + @Inject StoragePoolDaoImpl dao; public void testCountByStatus() { - StoragePoolDaoImpl dao = ComponentLocator.inject(StoragePoolDaoImpl.class); long count = dao.countPoolsByStatus(StoragePoolStatus.Up); System.out.println("Found " + count + " storage pools"); } diff --git a/server/test/com/cloud/upgrade/AdvanceZone217To224UpgradeTest.java b/server/test/com/cloud/upgrade/AdvanceZone217To224UpgradeTest.java index 27b2a7b723c..532a62f3cba 100644 --- a/server/test/com/cloud/upgrade/AdvanceZone217To224UpgradeTest.java +++ b/server/test/com/cloud/upgrade/AdvanceZone217To224UpgradeTest.java @@ -22,6 +22,8 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -29,13 +31,15 @@ import org.junit.After; import org.junit.Before; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DbTestUtils; import com.cloud.utils.db.Transaction; public class AdvanceZone217To224UpgradeTest extends TestCase { private static final Logger s_logger = Logger.getLogger(AdvanceZone217To224UpgradeTest.class); - + @Inject VersionDaoImpl dao; + @Inject DatabaseUpgradeChecker checker; + @Override @Before public void setUp() throws Exception { @@ -54,9 +58,6 @@ public class AdvanceZone217To224UpgradeTest extends TestCase { Connection conn; PreparedStatement pstmt; - VersionDaoImpl dao = ComponentLocator.inject(VersionDaoImpl.class); - DatabaseUpgradeChecker checker = ComponentLocator.inject(DatabaseUpgradeChecker.class); - String version = dao.getCurrentVersion(); assert version.equals("2.1.7") : "Version returned is not 2.1.7 but " + version; diff --git a/server/test/com/cloud/upgrade/AdvanceZone223To224UpgradeTest.java b/server/test/com/cloud/upgrade/AdvanceZone223To224UpgradeTest.java index a0394993404..519ae704c91 100644 --- a/server/test/com/cloud/upgrade/AdvanceZone223To224UpgradeTest.java +++ b/server/test/com/cloud/upgrade/AdvanceZone223To224UpgradeTest.java @@ -18,6 +18,8 @@ package com.cloud.upgrade; import java.sql.SQLException; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -25,10 +27,12 @@ import org.junit.After; import org.junit.Before; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + public class AdvanceZone223To224UpgradeTest extends TestCase { private static final Logger s_logger = Logger.getLogger(AdvanceZone223To224UpgradeTest.class); + @Inject VersionDaoImpl dao; + @Inject DatabaseUpgradeChecker checker; @Override @Before @@ -43,8 +47,6 @@ public class AdvanceZone223To224UpgradeTest extends TestCase { public void test223to224Upgrade() throws SQLException { - VersionDaoImpl dao = ComponentLocator.inject(VersionDaoImpl.class); - DatabaseUpgradeChecker checker = ComponentLocator.inject(DatabaseUpgradeChecker.class); String version = dao.getCurrentVersion(); assert version.equals("2.2.3") : "Version returned is not 2.2.3 but " + version; diff --git a/server/test/com/cloud/upgrade/BasicZone218To224UpgradeTest.java b/server/test/com/cloud/upgrade/BasicZone218To224UpgradeTest.java index 521e92a042f..8bd9f0625ef 100644 --- a/server/test/com/cloud/upgrade/BasicZone218To224UpgradeTest.java +++ b/server/test/com/cloud/upgrade/BasicZone218To224UpgradeTest.java @@ -22,6 +22,8 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -29,12 +31,15 @@ import org.junit.After; import org.junit.Before; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DbTestUtils; import com.cloud.utils.db.Transaction; public class BasicZone218To224UpgradeTest extends TestCase { private static final Logger s_logger = Logger.getLogger(BasicZone218To224UpgradeTest.class); + + @Inject VersionDaoImpl dao; + @Inject DatabaseUpgradeChecker checker; @Override @Before @@ -54,9 +59,6 @@ public class BasicZone218To224UpgradeTest extends TestCase { Connection conn = Transaction.getStandaloneConnection(); PreparedStatement pstmt; - VersionDaoImpl dao = ComponentLocator.inject(VersionDaoImpl.class); - DatabaseUpgradeChecker checker = ComponentLocator.inject(DatabaseUpgradeChecker.class); - String version = dao.getCurrentVersion(); if (!version.equals("2.1.8")) { diff --git a/server/test/com/cloud/upgrade/HostCapacity218to22Test.java b/server/test/com/cloud/upgrade/HostCapacity218to22Test.java index af6321abf60..76ad12eeb19 100644 --- a/server/test/com/cloud/upgrade/HostCapacity218to22Test.java +++ b/server/test/com/cloud/upgrade/HostCapacity218to22Test.java @@ -18,6 +18,8 @@ package com.cloud.upgrade; import java.sql.SQLException; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -25,11 +27,14 @@ import org.junit.After; import org.junit.Before; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DbTestUtils; public class HostCapacity218to22Test extends TestCase { private static final Logger s_logger = Logger.getLogger(HostCapacity218to22Test.class); + + @Inject VersionDaoImpl dao; + @Inject DatabaseUpgradeChecker checker; @Override @Before @@ -46,9 +51,6 @@ public class HostCapacity218to22Test extends TestCase { s_logger.debug("Finding sample data from 2.1.8"); DbTestUtils.executeScript("fake.sql", false, true); - VersionDaoImpl dao = ComponentLocator.inject(VersionDaoImpl.class); - DatabaseUpgradeChecker checker = ComponentLocator.inject(DatabaseUpgradeChecker.class); - String version = dao.getCurrentVersion(); if (!version.equals("2.1.8")) { diff --git a/server/test/com/cloud/upgrade/InstanceGroup218To224UpgradeTest.java b/server/test/com/cloud/upgrade/InstanceGroup218To224UpgradeTest.java index 601b1e83212..41f334dab6a 100644 --- a/server/test/com/cloud/upgrade/InstanceGroup218To224UpgradeTest.java +++ b/server/test/com/cloud/upgrade/InstanceGroup218To224UpgradeTest.java @@ -23,6 +23,8 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -30,13 +32,16 @@ import org.junit.After; import org.junit.Before; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DbTestUtils; import com.cloud.utils.db.Transaction; public class InstanceGroup218To224UpgradeTest extends TestCase { private static final Logger s_logger = Logger.getLogger(InstanceGroup218To224UpgradeTest.class); + @Inject VersionDaoImpl dao; + @Inject DatabaseUpgradeChecker checker; + @Override @Before public void setUp() throws Exception { @@ -55,9 +60,6 @@ public class InstanceGroup218To224UpgradeTest extends TestCase { PreparedStatement pstmt; ResultSet rs; - VersionDaoImpl dao = ComponentLocator.inject(VersionDaoImpl.class); - DatabaseUpgradeChecker checker = ComponentLocator.inject(DatabaseUpgradeChecker.class); - String version = dao.getCurrentVersion(); if (!version.equals("2.1.8")) { diff --git a/server/test/com/cloud/upgrade/PortForwarding218To224UpgradeTest.java b/server/test/com/cloud/upgrade/PortForwarding218To224UpgradeTest.java index a430584d7bf..a9cb51fe00c 100644 --- a/server/test/com/cloud/upgrade/PortForwarding218To224UpgradeTest.java +++ b/server/test/com/cloud/upgrade/PortForwarding218To224UpgradeTest.java @@ -22,6 +22,8 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -29,12 +31,15 @@ import org.junit.After; import org.junit.Before; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DbTestUtils; import com.cloud.utils.db.Transaction; public class PortForwarding218To224UpgradeTest extends TestCase { private static final Logger s_logger = Logger.getLogger(PortForwarding218To224UpgradeTest.class); + + @Inject VersionDaoImpl dao; + @Inject DatabaseUpgradeChecker checker; @Override @Before @@ -55,9 +60,6 @@ public class PortForwarding218To224UpgradeTest extends TestCase { PreparedStatement pstmt; ResultSet rs; - VersionDaoImpl dao = ComponentLocator.inject(VersionDaoImpl.class); - DatabaseUpgradeChecker checker = ComponentLocator.inject(DatabaseUpgradeChecker.class); - String version = dao.getCurrentVersion(); if (!version.equals("2.1.8")) { diff --git a/server/test/com/cloud/upgrade/Sanity220To224UpgradeTest.java b/server/test/com/cloud/upgrade/Sanity220To224UpgradeTest.java index ef47aadc83b..d33192fbf9c 100644 --- a/server/test/com/cloud/upgrade/Sanity220To224UpgradeTest.java +++ b/server/test/com/cloud/upgrade/Sanity220To224UpgradeTest.java @@ -21,6 +21,8 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -28,13 +30,16 @@ import org.junit.After; import org.junit.Before; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DbTestUtils; import com.cloud.utils.db.Transaction; public class Sanity220To224UpgradeTest extends TestCase { private static final Logger s_logger = Logger.getLogger(Sanity220To224UpgradeTest.class); + @Inject VersionDaoImpl dao; + @Inject DatabaseUpgradeChecker checker; + @Override @Before public void setUp() throws Exception { @@ -54,9 +59,6 @@ public class Sanity220To224UpgradeTest extends TestCase { PreparedStatement pstmt; ResultSet rs; - VersionDaoImpl dao = ComponentLocator.inject(VersionDaoImpl.class); - DatabaseUpgradeChecker checker = ComponentLocator.inject(DatabaseUpgradeChecker.class); - String version = dao.getCurrentVersion(); if (!version.equals("2.2.1")) { diff --git a/server/test/com/cloud/upgrade/Sanity222To224UpgradeTest.java b/server/test/com/cloud/upgrade/Sanity222To224UpgradeTest.java index aa30df2a5f6..108eca919a6 100644 --- a/server/test/com/cloud/upgrade/Sanity222To224UpgradeTest.java +++ b/server/test/com/cloud/upgrade/Sanity222To224UpgradeTest.java @@ -21,6 +21,8 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -28,13 +30,16 @@ import org.junit.After; import org.junit.Before; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DbTestUtils; import com.cloud.utils.db.Transaction; public class Sanity222To224UpgradeTest extends TestCase { private static final Logger s_logger = Logger.getLogger(Sanity222To224UpgradeTest.class); + @Inject VersionDaoImpl dao; + @Inject DatabaseUpgradeChecker checker; + @Override @Before public void setUp() throws Exception { @@ -54,8 +59,6 @@ public class Sanity222To224UpgradeTest extends TestCase { PreparedStatement pstmt; ResultSet rs; - VersionDaoImpl dao = ComponentLocator.inject(VersionDaoImpl.class); - DatabaseUpgradeChecker checker = ComponentLocator.inject(DatabaseUpgradeChecker.class); String version = dao.getCurrentVersion(); diff --git a/server/test/com/cloud/upgrade/Sanity223To225UpgradeTest.java b/server/test/com/cloud/upgrade/Sanity223To225UpgradeTest.java index 32910276948..fd0b219af7e 100644 --- a/server/test/com/cloud/upgrade/Sanity223To225UpgradeTest.java +++ b/server/test/com/cloud/upgrade/Sanity223To225UpgradeTest.java @@ -21,6 +21,8 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -28,12 +30,15 @@ import org.junit.After; import org.junit.Before; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.Transaction; public class Sanity223To225UpgradeTest extends TestCase { private static final Logger s_logger = Logger.getLogger(Sanity223To225UpgradeTest.class); + @Inject VersionDaoImpl dao; + @Inject DatabaseUpgradeChecker checker; + @Override @Before public void setUp() throws Exception { @@ -53,9 +58,6 @@ public class Sanity223To225UpgradeTest extends TestCase { PreparedStatement pstmt; ResultSet rs; - VersionDaoImpl dao = ComponentLocator.inject(VersionDaoImpl.class); - DatabaseUpgradeChecker checker = ComponentLocator.inject(DatabaseUpgradeChecker.class); - String version = dao.getCurrentVersion(); if (!version.equals("2.2.3")) { diff --git a/server/test/com/cloud/upgrade/Sanity224To225UpgradeTest.java b/server/test/com/cloud/upgrade/Sanity224To225UpgradeTest.java index a7b6ba152a4..775a62ee501 100644 --- a/server/test/com/cloud/upgrade/Sanity224To225UpgradeTest.java +++ b/server/test/com/cloud/upgrade/Sanity224To225UpgradeTest.java @@ -21,6 +21,8 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -28,13 +30,16 @@ import org.junit.After; import org.junit.Before; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DbTestUtils; import com.cloud.utils.db.Transaction; public class Sanity224To225UpgradeTest extends TestCase { private static final Logger s_logger = Logger.getLogger(Sanity224To225UpgradeTest.class); + @Inject VersionDaoImpl dao; + @Inject DatabaseUpgradeChecker checker; + @Override @Before public void setUp() throws Exception { @@ -54,9 +59,6 @@ public class Sanity224To225UpgradeTest extends TestCase { PreparedStatement pstmt; ResultSet rs; - VersionDaoImpl dao = ComponentLocator.inject(VersionDaoImpl.class); - DatabaseUpgradeChecker checker = ComponentLocator.inject(DatabaseUpgradeChecker.class); - String version = dao.getCurrentVersion(); if (!version.equals("2.2.4")) { diff --git a/server/test/com/cloud/upgrade/Template2214To30UpgradeTest.java b/server/test/com/cloud/upgrade/Template2214To30UpgradeTest.java index e7a01e30859..06835b56774 100644 --- a/server/test/com/cloud/upgrade/Template2214To30UpgradeTest.java +++ b/server/test/com/cloud/upgrade/Template2214To30UpgradeTest.java @@ -23,13 +23,15 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.List; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DbTestUtils; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; @@ -37,6 +39,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class Template2214To30UpgradeTest extends TestCase { private static final Logger s_logger = Logger .getLogger(Template2214To30UpgradeTest.class); + @Inject DatabaseUpgradeChecker checker; @Override @Before @@ -56,8 +59,6 @@ public class Template2214To30UpgradeTest extends TestCase { "fake.sql", false, true); - DatabaseUpgradeChecker checker = ComponentLocator - .inject(DatabaseUpgradeChecker.class); checker.upgrade("2.2.14", "3.0.0"); diff --git a/server/test/com/cloud/upgrade/Test2214To30DBUpgrade.java b/server/test/com/cloud/upgrade/Test2214To30DBUpgrade.java index 5f05ac32a1c..ff448033764 100644 --- a/server/test/com/cloud/upgrade/Test2214To30DBUpgrade.java +++ b/server/test/com/cloud/upgrade/Test2214To30DBUpgrade.java @@ -23,21 +23,26 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.List; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DbTestUtils; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; + public class Test2214To30DBUpgrade extends TestCase { private static final Logger s_logger = Logger .getLogger(Test2214To30DBUpgrade.class); + @Inject DatabaseUpgradeChecker checker; + @Override @Before public void setUp() throws Exception { @@ -56,9 +61,6 @@ public class Test2214To30DBUpgrade extends TestCase { "fake.sql", false, true); - DatabaseUpgradeChecker checker = ComponentLocator - .inject(DatabaseUpgradeChecker.class); - checker.upgrade("2.2.14", "3.0.0"); Connection conn = Transaction.getStandaloneConnection(); diff --git a/server/test/com/cloud/upgrade/Usage217To224UpgradeTest.java b/server/test/com/cloud/upgrade/Usage217To224UpgradeTest.java index d349247a810..741af5a03f0 100644 --- a/server/test/com/cloud/upgrade/Usage217To224UpgradeTest.java +++ b/server/test/com/cloud/upgrade/Usage217To224UpgradeTest.java @@ -22,6 +22,8 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -29,13 +31,16 @@ import org.junit.After; import org.junit.Before; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DbTestUtils; import com.cloud.utils.db.Transaction; public class Usage217To224UpgradeTest extends TestCase { private static final Logger s_logger = Logger.getLogger(Usage217To224UpgradeTest.class); + @Inject VersionDaoImpl dao; + @Inject PremiumDatabaseUpgradeChecker checker; + @Override @Before public void setUp() throws Exception { @@ -56,8 +61,6 @@ public class Usage217To224UpgradeTest extends TestCase { Connection conn; PreparedStatement pstmt; - VersionDaoImpl dao = ComponentLocator.inject(VersionDaoImpl.class); - PremiumDatabaseUpgradeChecker checker = ComponentLocator.inject(PremiumDatabaseUpgradeChecker.class); String version = dao.getCurrentVersion(); assert version.equals("2.1.7") : "Version returned is not 2.1.7 but " + version; diff --git a/server/test/com/cloud/upgrade/UsageEvents218To224UpgradeTest.java b/server/test/com/cloud/upgrade/UsageEvents218To224UpgradeTest.java index 7319afa4469..cde114b5e63 100644 --- a/server/test/com/cloud/upgrade/UsageEvents218To224UpgradeTest.java +++ b/server/test/com/cloud/upgrade/UsageEvents218To224UpgradeTest.java @@ -22,6 +22,8 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -29,13 +31,16 @@ import org.junit.After; import org.junit.Before; import com.cloud.upgrade.dao.VersionDaoImpl; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.DbTestUtils; import com.cloud.utils.db.Transaction; public class UsageEvents218To224UpgradeTest extends TestCase { private static final Logger s_logger = Logger.getLogger(UsageEvents218To224UpgradeTest.class); + @Inject VersionDaoImpl dao; + @Inject DatabaseUpgradeChecker checker; + @Override @Before public void setUp() throws Exception { @@ -54,9 +59,6 @@ public class UsageEvents218To224UpgradeTest extends TestCase { Connection conn; PreparedStatement pstmt; - VersionDaoImpl dao = ComponentLocator.inject(VersionDaoImpl.class); - DatabaseUpgradeChecker checker = ComponentLocator.inject(DatabaseUpgradeChecker.class); - String version = dao.getCurrentVersion(); assert version.equals("2.1.8") : "Version returned is not 2.1.8 but " + version; diff --git a/server/test/com/cloud/user/MockAccountManagerImpl.java b/server/test/com/cloud/user/MockAccountManagerImpl.java index ae5d0e5de4b..550304adfff 100644 --- a/server/test/com/cloud/user/MockAccountManagerImpl.java +++ b/server/test/com/cloud/user/MockAccountManagerImpl.java @@ -23,6 +23,7 @@ import javax.ejb.Local; import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import com.cloud.api.query.vo.ControlledViewEntity; @@ -344,4 +345,9 @@ public class MockAccountManagerImpl implements Manager, AccountManager, AccountS return null; } + @Override + public RoleType getRoleType(Account account) { + return null; + } + } diff --git a/server/test/com/cloud/user/MockDomainManagerImpl.java b/server/test/com/cloud/user/MockDomainManagerImpl.java index 6dc4d075b9e..9f49535ce68 100644 --- a/server/test/com/cloud/user/MockDomainManagerImpl.java +++ b/server/test/com/cloud/user/MockDomainManagerImpl.java @@ -46,6 +46,12 @@ public class MockDomainManagerImpl implements Manager, DomainManager { return null; } + @Override + public Domain getDomain(String uuid) { + // TODO Auto-generated method stub + return null; + } + @Override public boolean isChildDomain(Long parentId, Long childId) { // TODO Auto-generated method stub diff --git a/server/test/com/cloud/vm/dao/UserVmDaoImplTest.java b/server/test/com/cloud/vm/dao/UserVmDaoImplTest.java index f07abca439c..1a5a9008d92 100644 --- a/server/test/com/cloud/vm/dao/UserVmDaoImplTest.java +++ b/server/test/com/cloud/vm/dao/UserVmDaoImplTest.java @@ -16,17 +16,20 @@ // under the License. package com.cloud.vm.dao; +import javax.inject.Inject; + import junit.framework.TestCase; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.vm.UserVmVO; import com.cloud.vm.VirtualMachine; public class UserVmDaoImplTest extends TestCase { - public void testPersist() { - UserVmDao dao = ComponentLocator.inject(UserVmDaoImpl.class); + @Inject UserVmDao dao; + + public void testPersist() { dao.expunge(1000l); diff --git a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java index 4403d75fbd0..b29af9e1422 100644 --- a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java @@ -66,7 +66,6 @@ import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.user.Account; import com.cloud.user.User; import com.cloud.utils.Pair; -import com.cloud.utils.component.Adapters; import com.cloud.utils.component.Manager; import com.cloud.vm.*; import com.cloud.vpc.dao.MockVpcVirtualRouterElement; @@ -157,6 +156,12 @@ public class MockNetworkManagerImpl implements NetworkManager, Manager{ return null; } + @Override + public Network getNetwork(String networkUuid) { + // TODO Auto-generated method stub + return null; + } + /* (non-Javadoc) * @see com.cloud.network.NetworkService#getIp(long) */ diff --git a/server/test/com/cloud/vpc/MockVpcManagerImpl.java b/server/test/com/cloud/vpc/MockVpcManagerImpl.java index 25799d19b9e..e7d888e992f 100644 --- a/server/test/com/cloud/vpc/MockVpcManagerImpl.java +++ b/server/test/com/cloud/vpc/MockVpcManagerImpl.java @@ -21,6 +21,7 @@ import java.util.Map; import java.util.Set; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.ControlledEntity.ACLType; @@ -50,14 +51,14 @@ import com.cloud.offering.NetworkOffering; import com.cloud.user.Account; import com.cloud.user.User; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.component.Manager; import com.cloud.vm.DomainRouterVO; import com.cloud.vpc.dao.MockVpcDaoImpl; @Local(value = { VpcManager.class, VpcService.class }) public class MockVpcManagerImpl implements VpcManager, Manager{ - MockVpcDaoImpl _vpcDao = ComponentLocator.inject(MockVpcDaoImpl.class); + @Inject MockVpcDaoImpl _vpcDao; /* (non-Javadoc) * @see com.cloud.network.vpc.VpcService#getVpcOffering(long) diff --git a/server/test/com/cloud/vpc/VpcApiUnitTest.java b/server/test/com/cloud/vpc/VpcApiUnitTest.java index 5cc325ffac0..9693f443130 100644 --- a/server/test/com/cloud/vpc/VpcApiUnitTest.java +++ b/server/test/com/cloud/vpc/VpcApiUnitTest.java @@ -19,6 +19,8 @@ package com.cloud.vpc; import java.util.ArrayList; import java.util.List; +import javax.inject.Inject; + import junit.framework.TestCase; import org.apache.log4j.Logger; @@ -45,8 +47,7 @@ import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.user.AccountVO; import com.cloud.user.MockAccountManagerImpl; import com.cloud.user.dao.AccountDaoImpl; -import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.component.MockComponentLocator; + import com.cloud.vm.dao.DomainRouterDaoImpl; import com.cloud.vpc.dao.MockNetworkDaoImpl; import com.cloud.vpc.dao.MockNetworkOfferingDaoImpl; @@ -58,45 +59,11 @@ import com.cloud.vpc.dao.MockVpcOfferingServiceMapDaoImpl; public class VpcApiUnitTest extends TestCase{ private static final Logger s_logger = Logger.getLogger(VpcApiUnitTest.class); - MockComponentLocator _locator; - VpcManager _vpcService; + @Inject VpcManager _vpcService; @Override @Before public void setUp() throws Exception { - _locator = new MockComponentLocator(ManagementService.Name); - _locator.addDao("VpcDao", MockVpcDaoImpl.class); - _locator.addDao("VpcOfferingDao", VpcOfferingDaoImpl.class); - _locator.addDao("ConfigurationDao", ConfigurationDaoImpl.class); - _locator.addDao("NetworkDao", MockNetworkDaoImpl.class); - _locator.addDao("IPAddressDao", IPAddressDaoImpl.class); - _locator.addDao("DomainRouterDao", DomainRouterDaoImpl.class); - _locator.addDao("VpcGatewayDao", VpcGatewayDaoImpl.class); - _locator.addDao("PrivateIpDao", PrivateIpDaoImpl.class); - _locator.addDao("StaticRouteDao", StaticRouteDaoImpl.class); - _locator.addDao("NetworkOfferingServiceMapDao", MockNetworkOfferingServiceMapDaoImpl.class); - _locator.addDao("VpcOfferingServiceMapDao", MockVpcOfferingServiceMapDaoImpl.class); - _locator.addDao("PhysicalNetworkDao", PhysicalNetworkDaoImpl.class); - _locator.addDao("ResourceTagDao", ResourceTagsDaoImpl.class); - _locator.addDao("FirewallRulesDao", FirewallRulesDaoImpl.class); - _locator.addDao("VlanDao", VlanDaoImpl.class); - _locator.addDao("AccountDao", AccountDaoImpl.class); - _locator.addDao("ResourceCountDao", ResourceCountDaoImpl.class); - _locator.addDao("NetworkOfferingDao", MockNetworkOfferingDaoImpl.class); - _locator.addDao("NetworkServiceMapDao", MockNetworkServiceMapDaoImpl.class); - _locator.addDao("VpcOfferingDao", MockVpcOfferingDaoImpl.class); - _locator.addDao("Site2SiteVpnDao", Site2SiteVpnGatewayDaoImpl.class); - - _locator.addManager("ConfigService", MockConfigurationManagerImpl.class); - _locator.addManager("vpc manager", VpcManagerImpl.class); - _locator.addManager("account manager", MockAccountManagerImpl.class); - _locator.addManager("network manager", MockNetworkManagerImpl.class); - _locator.addManager("Site2SiteVpnManager", MockSite2SiteVpnManagerImpl.class); - _locator.addManager("ResourceLimitService", MockResourceLimitManagerImpl.class); - - _locator.makeActive(null); - - _vpcService = ComponentLocator.inject(VpcManagerImpl.class); } public void test() { diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql index 990e7d8ca4f..864fe52224a 100755 --- a/setup/db/create-schema.sql +++ b/setup/db/create-schema.sql @@ -157,6 +157,43 @@ DROP TABLE IF EXISTS `cloud`.`autoscale_policies`; DROP TABLE IF EXISTS `cloud`.`counter`; DROP TABLE IF EXISTS `cloud`.`conditions`; DROP TABLE IF EXISTS `cloud`.`inline_load_balancer_nic_map`; +DROP TABLE IF EXISTS `cloud`.`cmd_exec_log`; +DROP TABLE IF EXISTS `cloud`.`keystore`; +DROP TABLE IF EXISTS `cloud`.`swift`; +DROP TABLE IF EXISTS `cloud`.`project_account`; +DROP TABLE IF EXISTS `cloud`.`project_invitations`; +DROP TABLE IF EXISTS `cloud`.`elastic_lb_vm_map`; +DROP TABLE IF EXISTS `cloud`.`ntwk_offering_service_map`; +DROP TABLE IF EXISTS `cloud`.`ntwk_service_map`; +DROP TABLE IF EXISTS `cloud`.`external_load_balancer_devices`; +DROP TABLE IF EXISTS `cloud`.`external_firewall_devices`; +DROP TABLE IF EXISTS `cloud`.`network_external_lb_device_map`; +DROP TABLE IF EXISTS `cloud`.`network_external_firewall_device_map`; +DROP TABLE IF EXISTS `cloud`.`virtual_router_providers`; +DROP TABLE IF EXISTS `cloud`.`op_user_stats_log`; +DROP TABLE IF EXISTS `cloud`.`netscaler_pod_ref`; +DROP TABLE IF EXISTS `cloud`.`mshost_peer`; +DROP TABLE IF EXISTS `cloud`.`vm_template_details`; +DROP TABLE IF EXISTS `cloud`.`hypervisor_capabilities`; +DROP TABLE IF EXISTS `cloud`.`template_swift_ref`; +DROP TABLE IF EXISTS `cloud`.`account_details`; +DROP TABLE IF EXISTS `cloud`.`vpc`; +DROP TABLE IF EXISTS `cloud`.`vpc_offerings`; +DROP TABLE IF EXISTS `cloud`.`vpc_offering_service_map`; +DROP TABLE IF EXISTS `cloud`.`vpc_gateways`; +DROP TABLE IF EXISTS `cloud`.`router_network_ref`; +DROP TABLE IF EXISTS `cloud`.`private_ip_address`; +DROP TABLE IF EXISTS `cloud`.`static_routes`; +DROP TABLE IF EXISTS `cloud`.`resource_tags`; +DROP TABLE IF EXISTS `cloud`.`primary_data_store_provider`; +DROP TABLE IF EXISTS `cloud`.`image_data_store_provider`; +DROP TABLE IF EXISTS `cloud`.`image_data_store`; +DROP TABLE IF EXISTS `cloud`.`vm_compute_tags`; +DROP TABLE IF EXISTS `cloud`.`vm_root_disk_tags`; +DROP TABLE IF EXISTS `cloud`.`vm_network_map`; +DROP TABLE IF EXISTS `cloud`.`netapp_volume`; +DROP TABLE IF EXISTS `cloud`.`netapp_pool`; +DROP TABLE IF EXISTS `cloud`.`netapp_lun`; CREATE TABLE `cloud`.`version` ( `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT COMMENT 'id', diff --git a/test/integration/smoke/test_iso.py b/test/integration/smoke/test_iso.py index 22d424f86cc..8228a278cc9 100644 --- a/test/integration/smoke/test_iso.py +++ b/test/integration/smoke/test_iso.py @@ -219,14 +219,14 @@ class TestISO(cloudstackTestCase): # Finding the OsTypeId from Ostype ostypes = list_os_types( cls.api_client, - description=self.services["ostype"] + description=cls.services["ostype"] ) if not isinstance(ostypes, list): raise unittest.SkipTest("OSTypeId for given description not found") - self.services["iso_1"]["ostypeid"] = ostypes[0].id - self.services["iso_2"]["ostypeid"] = ostypes[0].id - self.services["ostypeid"] = ostypes[0].id + cls.services["iso_1"]["ostypeid"] = ostypes[0].id + cls.services["iso_2"]["ostypeid"] = ostypes[0].id + cls.services["ostypeid"] = ostypes[0].id cls.iso_1 = Iso.create( cls.api_client, diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 7739aea633f..0b281a29c1d 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -129,7 +129,6 @@ known_categories = { 'AutoScale': 'AutoScale', 'Counter': 'AutoScale', 'Condition': 'AutoScale', - 'Api': 'API Discovery', } diff --git a/tools/apidoc/pom.xml b/tools/apidoc/pom.xml index e0b02bc5dc6..bc7411f7013 100644 --- a/tools/apidoc/pom.xml +++ b/tools/apidoc/pom.xml @@ -57,7 +57,7 @@ ${client.config.jars} ./target -f - ${client.config.conf}/commands.properties,${client.config.conf}/commands-ext.properties,${client.config.conf}/virtualrouter_commands.properties,${client.config.conf}/nicira-nvp_commands.properties,${client.config.conf}/api-discovery_commands.properties + ${client.config.conf}/commands.properties,${client.config.conf}/commands-ext.properties,${client.config.conf}/virtualrouter_commands.properties,${client.config.conf}/nicira-nvp_commands.properties diff --git a/tools/devcloud-kvm/README.md b/tools/devcloud-kvm/README.md new file mode 100644 index 00000000000..3261fbe4b8e --- /dev/null +++ b/tools/devcloud-kvm/README.md @@ -0,0 +1,21 @@ +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. + +=========================================================== + +This directory hosts configs for setting up the devcloud-kvm +environment. diff --git a/tools/devcloud-kvm/devcloud-kvm.cfg b/tools/devcloud-kvm/devcloud-kvm.cfg new file mode 100644 index 00000000000..47a128fea14 --- /dev/null +++ b/tools/devcloud-kvm/devcloud-kvm.cfg @@ -0,0 +1,97 @@ +{ + "zones": [ + { + "name": "DevCloudKVM0", + "physical_networks": [ + { + "broadcastdomainrange": "Zone", + "name": "test-network", + "traffictypes": [ + { + "typ": "Guest" + }, + { + "typ": "Management" + } + ], + "providers": [ + { + "broadcastdomainrange": "ZONE", + "name": "VirtualRouter" + }, + { + "broadcastdomainrange": "Pod", + "name": "SecurityGroupProvider" + } + ] + } + ], + "dns2": "4.4.4.4", + "dns1": "8.8.8.8", + "securitygroupenabled": "true", + "localstorageenabled": "true", + "networktype": "Basic", + "pods": [ + { + "endip": "192.168.100.250", + "name": "test00", + "startip": "192.168.100.200", + "guestIpRanges": [ + { + "startip": "192.168.100.100", + "endip": "192.168.100.199", + "netmask": "255.255.255.0", + "gateway": "192.168.100.1" + } + ], + "netmask": "255.255.255.0", + "clusters": [ + { + "clustername": "test000", + "hypervisor": "KVM", + "hosts": [ + { + "username": "root", + "url": "http://192.168.100.10/", + "password": "password" + } + ], + "clustertype": "CloudManaged" + } + ], + "gateway": "192.168.100.1" + } + ], + "internaldns1": "192.168.100.10", + "secondaryStorages": [ + { + "url": "nfs://192.168.100.10:/nfs/secondary" + } + ] + } + ], + "logger": [ + { + "name": "TestClient", + "file": "/tmp/testclient.log" + }, + { + "name": "TestCase", + "file": "/tmp/testcase.log" + } + ], + "mgtSvr": [ + { + "mgtSvrIp": "127.0.0.1", + "port": 8096 + } + ], + "dbSvr": + { + "dbSvr": "127.0.0.1", + "port": 3306, + "user": "cloud", + "passwd": "cloud", + "db": "cloud" + } +} diff --git a/tools/devcloud-kvm/devcloud-kvm.sql b/tools/devcloud-kvm/devcloud-kvm.sql new file mode 100644 index 00000000000..97478834bf3 --- /dev/null +++ b/tools/devcloud-kvm/devcloud-kvm.sql @@ -0,0 +1,40 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + + +INSERT INTO `cloud`.`disk_offering` (id, name, uuid, display_text, created, use_local_storage, type, disk_size) VALUES (17, 'tinyOffering', UUID(), 'tinyOffering', NOW(), 1, 'Service', 0); +INSERT INTO `cloud`.`service_offering` (id, cpu, speed, ram_size) VALUES (17, 1, 100, 100); +INSERT INTO `cloud`.`disk_offering` (id, name, uuid, display_text, created, type, disk_size) VALUES (18, 'tinyDiskOffering', UUID(), 'tinyDiskOffering', NOW(), 'Disk', 1073741824); +INSERT INTO `cloud`.`configuration` (instance, name,value) VALUE('DEFAULT','router.ram.size', '100'); +INSERT INTO `cloud`.`configuration` (instance, name,value) VALUE('DEFAULT','router.cpu.mhz','100'); +INSERT INTO `cloud`.`configuration` (instance, name,value) VALUE('DEFAULT','console.ram.size','100'); +INSERT INTO `cloud`.`configuration` (instance, name,value) VALUE('DEFAULT','console.cpu.mhz', '100'); +INSERT INTO `cloud`.`configuration` (instance, name,value) VALUE('DEFAULT','ssvm.ram.size','100'); +INSERT INTO `cloud`.`configuration` (instance, name,value) VALUE('DEFAULT','ssvm.cpu.mhz','100'); +INSERT INTO `cloud`.`configuration` (instance, name, value) VALUE('DEFAULT', 'system.vm.use.local.storage', 'true'); +INSERT INTO `cloud`.`configuration` (instance, name, value) VALUE('DEFAULT', 'expunge.workers', '3'); +INSERT INTO `cloud`.`configuration` (instance, name, value) VALUE('DEFAULT', 'expunge.delay', '60'); +INSERT INTO `cloud`.`configuration` (instance, name, value) VALUE('DEFAULT', 'expunge.interval', '60'); +INSERT INTO `cloud`.`configuration` (instance, name, value) VALUE('DEFAULT', 'enable.ec2.api', 'true'); +INSERT INTO `cloud`.`configuration` (instance, name, value) VALUE('DEFAULT', 'enable.s3.api', 'true'); +INSERT INTO `cloud`.`configuration` (instance, name, value) VALUE('DEFAULT', 'host', '192.168.100.10'); +INSERT INTO `cloud`.`configuration` (instance, name, value) VALUE('DEFAULT', 'management.network.cidr', '192.168.100.0/24'); +INSERT INTO `cloud`.`configuration` (instance, name, value) VALUE('DEFAULT', 'secstorage.allowed.internal.sites', '192.168.0.0/8'); +UPDATE `cloud`.`configuration` SET value='10' where name = 'storage.overprovisioning.factor'; +UPDATE `cloud`.`configuration` SET value='10' where name = 'cpu.overprovisioning.factor'; +UPDATE `cloud`.`configuration` SET value='10' where name = 'mem.overprovisioning.factor'; +UPDATE `cloud`.`vm_template` SET unique_name="tiny Linux",name="tiny Linux",url="http://marcus.mlsorensen.com/cloudstack-extras/ttylinux_pv.qcow2",checksum="81dcf4b4ca05a3b637a040e851568f29",display_text="tiny Linux",format='QCOW2',hypervisor_type='KVM' where id=5; diff --git a/tools/devcloud-kvm/pom.xml b/tools/devcloud-kvm/pom.xml new file mode 100644 index 00000000000..c9af192bee3 --- /dev/null +++ b/tools/devcloud-kvm/pom.xml @@ -0,0 +1,138 @@ + + + 4.0.0 + cloud-devcloud-kvm + Apache CloudStack Developer Tools + pom + + org.apache.cloudstack + cloudstack + 4.1.0-SNAPSHOT + ../../pom.xml + + + + mysql + mysql-connector-java + 5.1.21 + runtime + + + + + install + + + + deploydb + + + deploydb + + + + + + org.codehaus.mojo + properties-maven-plugin + 1.0-alpha-2 + + + initialize + + read-project-properties + + + + ${project.parent.basedir}/utils/conf/db.properties + ${project.parent.basedir}/utils/conf/db.properties.override + + true + + + + + + org.codehaus.mojo + sql-maven-plugin + 1.5 + + + + mysql + mysql-connector-java + ${cs.mysql.version} + + + + org.gjt.mm.mysql.Driver + jdbc:mysql://${db.cloud.host}:${db.cloud.port}/cloud + ${db.cloud.username} + ${db.cloud.password} + + ${maven.test.skip} + true + + + + create-schema + process-test-resources + + execute + + + + ${basedir}/devcloud-kvm.sql + + + + + + + + + + deploysvr + + + deploysvr + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + package + + exec + + + + + python + + ../marvin/marvin/deployDataCenter.py + -i + devcloud-kvm.cfg + + + + + + + + diff --git a/usage/src/com/cloud/usage/UsageAlertManagerImpl.java b/usage/src/com/cloud/usage/UsageAlertManagerImpl.java index 2b698c83708..f6e0374875a 100644 --- a/usage/src/com/cloud/usage/UsageAlertManagerImpl.java +++ b/usage/src/com/cloud/usage/UsageAlertManagerImpl.java @@ -40,7 +40,7 @@ import com.cloud.alert.AlertVO; import com.cloud.alert.dao.AlertDao; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.component.ComponentLocator; + import com.sun.mail.smtp.SMTPMessage; import com.sun.mail.smtp.SMTPSSLTransport; import com.sun.mail.smtp.SMTPTransport; diff --git a/usage/src/com/cloud/usage/UsageManagerImpl.java b/usage/src/com/cloud/usage/UsageManagerImpl.java index bbf2cd65ba8..4944a14b14e 100644 --- a/usage/src/com/cloud/usage/UsageManagerImpl.java +++ b/usage/src/com/cloud/usage/UsageManagerImpl.java @@ -30,6 +30,7 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; @@ -66,8 +67,8 @@ import com.cloud.user.AccountVO; import com.cloud.user.UserStatisticsVO; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserStatisticsDao; -import com.cloud.utils.component.ComponentLocator; -import com.cloud.utils.component.Inject; + + import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; @@ -89,23 +90,23 @@ public class UsageManagerImpl implements UsageManager, Runnable { private static final int THREE_DAYS_IN_MINUTES = 60 * 24 * 3; private static final int USAGE_AGGREGATION_RANGE_MIN = 10; - private final ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); - private final AccountDao m_accountDao = _locator.getDao(AccountDao.class); - private final UserStatisticsDao m_userStatsDao = _locator.getDao(UserStatisticsDao.class); - private final UsageDao m_usageDao = _locator.getDao(UsageDao.class); - private final UsageVMInstanceDao m_usageInstanceDao = _locator.getDao(UsageVMInstanceDao.class); - private final UsageIPAddressDao m_usageIPAddressDao = _locator.getDao(UsageIPAddressDao.class); - private final UsageNetworkDao m_usageNetworkDao = _locator.getDao(UsageNetworkDao.class); - private final UsageVolumeDao m_usageVolumeDao = _locator.getDao(UsageVolumeDao.class); - private final UsageStorageDao m_usageStorageDao = _locator.getDao(UsageStorageDao.class); - private final UsageLoadBalancerPolicyDao m_usageLoadBalancerPolicyDao = _locator.getDao(UsageLoadBalancerPolicyDao.class); - private final UsagePortForwardingRuleDao m_usagePortForwardingRuleDao = _locator.getDao(UsagePortForwardingRuleDao.class); - private final UsageNetworkOfferingDao m_usageNetworkOfferingDao = _locator.getDao(UsageNetworkOfferingDao.class); - private final UsageVPNUserDao m_usageVPNUserDao = _locator.getDao(UsageVPNUserDao.class); - private final UsageSecurityGroupDao m_usageSecurityGroupDao = _locator.getDao(UsageSecurityGroupDao.class); - private final UsageJobDao m_usageJobDao = _locator.getDao(UsageJobDao.class); + @Inject private AccountDao m_accountDao; + @Inject private UserStatisticsDao m_userStatsDao; + @Inject private UsageDao m_usageDao; + @Inject private UsageVMInstanceDao m_usageInstanceDao; + @Inject private UsageIPAddressDao m_usageIPAddressDao; + @Inject private UsageNetworkDao m_usageNetworkDao; + @Inject private UsageVolumeDao m_usageVolumeDao; + @Inject private UsageStorageDao m_usageStorageDao; + @Inject private UsageLoadBalancerPolicyDao m_usageLoadBalancerPolicyDao; + @Inject private UsagePortForwardingRuleDao m_usagePortForwardingRuleDao; + @Inject private UsageNetworkOfferingDao m_usageNetworkOfferingDao; + @Inject private UsageVPNUserDao m_usageVPNUserDao; + @Inject private UsageSecurityGroupDao m_usageSecurityGroupDao; + @Inject private UsageJobDao m_usageJobDao; @Inject protected AlertManager _alertMgr; @Inject protected UsageEventDao _usageEventDao; + @Inject ConfigurationDao _configDao; private String m_version = null; private String m_name = null; @@ -152,14 +153,7 @@ public class UsageManagerImpl implements UsageManager, Runnable { m_name = name; - ComponentLocator locator = ComponentLocator.getCurrentLocator(); - ConfigurationDao configDao = locator.getDao(ConfigurationDao.class); - if (configDao == null) { - s_logger.error("Unable to get the configuration dao."); - return false; - } - - Map configs = configDao.getConfiguration(params); + Map configs = _configDao.getConfiguration(params); if (params != null) { mergeConfigs(configs, params); diff --git a/usage/src/com/cloud/usage/UsageServer.java b/usage/src/com/cloud/usage/UsageServer.java index 4cdca7986c2..eaf91328929 100644 --- a/usage/src/com/cloud/usage/UsageServer.java +++ b/usage/src/com/cloud/usage/UsageServer.java @@ -16,14 +16,17 @@ // under the License. package com.cloud.usage; +import javax.inject.Inject; + import org.apache.log4j.Logger; -import com.cloud.utils.component.ComponentLocator; + public class UsageServer { private static final Logger s_logger = Logger.getLogger(UsageServer.class.getName()); public static final String Name = "usage-server"; + @Inject UsageManager mgr; /** * @param args */ @@ -38,8 +41,6 @@ public class UsageServer { } public void start() { - final ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); - UsageManager mgr = _locator.getManager(UsageManager.class); if (mgr != null) { if (s_logger.isInfoEnabled()) { s_logger.info("UsageServer ready..."); diff --git a/usage/src/com/cloud/usage/parser/IPAddressUsageParser.java b/usage/src/com/cloud/usage/parser/IPAddressUsageParser.java index 08cb02190e6..a5a40c0fa04 100644 --- a/usage/src/com/cloud/usage/parser/IPAddressUsageParser.java +++ b/usage/src/com/cloud/usage/parser/IPAddressUsageParser.java @@ -22,7 +22,11 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.usage.UsageIPAddressVO; import com.cloud.usage.UsageServer; @@ -32,16 +36,24 @@ import com.cloud.usage.dao.UsageDao; import com.cloud.usage.dao.UsageIPAddressDao; import com.cloud.user.AccountVO; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; +@Component public class IPAddressUsageParser { public static final Logger s_logger = Logger.getLogger(IPAddressUsageParser.class.getName()); - private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); - private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); - private static UsageIPAddressDao m_usageIPAddressDao = _locator.getDao(UsageIPAddressDao.class); + private static UsageDao m_usageDao; + private static UsageIPAddressDao m_usageIPAddressDao; + + @Inject private UsageDao _usageDao; + @Inject private UsageIPAddressDao _usageIPAddressDao; + @PostConstruct + void init() { + m_usageDao = _usageDao; + m_usageIPAddressDao = _usageIPAddressDao; + } + public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (s_logger.isDebugEnabled()) { s_logger.debug("Parsing IP Address usage for account: " + account.getId()); diff --git a/usage/src/com/cloud/usage/parser/LoadBalancerUsageParser.java b/usage/src/com/cloud/usage/parser/LoadBalancerUsageParser.java index c1423c6cc8d..edea320aa08 100644 --- a/usage/src/com/cloud/usage/parser/LoadBalancerUsageParser.java +++ b/usage/src/com/cloud/usage/parser/LoadBalancerUsageParser.java @@ -22,7 +22,11 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.usage.UsageLoadBalancerPolicyVO; import com.cloud.usage.UsageServer; @@ -32,14 +36,22 @@ import com.cloud.usage.dao.UsageDao; import com.cloud.usage.dao.UsageLoadBalancerPolicyDao; import com.cloud.user.AccountVO; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; +@Component public class LoadBalancerUsageParser { public static final Logger s_logger = Logger.getLogger(LoadBalancerUsageParser.class.getName()); - private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); - private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); - private static UsageLoadBalancerPolicyDao m_usageLoadBalancerPolicyDao = _locator.getDao(UsageLoadBalancerPolicyDao.class); + private static UsageDao m_usageDao; + private static UsageLoadBalancerPolicyDao m_usageLoadBalancerPolicyDao; + + @Inject private UsageDao _usageDao; + @Inject private UsageLoadBalancerPolicyDao _usageLoadBalancerPolicyDao; + + @PostConstruct + void init() { + m_usageDao = _usageDao; + m_usageLoadBalancerPolicyDao = _usageLoadBalancerPolicyDao; + } public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (s_logger.isDebugEnabled()) { diff --git a/usage/src/com/cloud/usage/parser/NetworkOfferingUsageParser.java b/usage/src/com/cloud/usage/parser/NetworkOfferingUsageParser.java index fc7fc2a54e7..f6ddf9f1bbb 100644 --- a/usage/src/com/cloud/usage/parser/NetworkOfferingUsageParser.java +++ b/usage/src/com/cloud/usage/parser/NetworkOfferingUsageParser.java @@ -22,6 +22,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.log4j.Logger; import com.cloud.usage.UsageNetworkOfferingVO; @@ -32,14 +35,22 @@ import com.cloud.usage.dao.UsageDao; import com.cloud.usage.dao.UsageNetworkOfferingDao; import com.cloud.user.AccountVO; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + public class NetworkOfferingUsageParser { public static final Logger s_logger = Logger.getLogger(NetworkOfferingUsageParser.class.getName()); - private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); - private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); - private static UsageNetworkOfferingDao m_usageNetworkOfferingDao = _locator.getDao(UsageNetworkOfferingDao.class); + private static UsageDao m_usageDao; + private static UsageNetworkOfferingDao m_usageNetworkOfferingDao; + + @Inject private UsageDao _usageDao; + @Inject private UsageNetworkOfferingDao _usageNetworkOfferingDao; + + @PostConstruct + void init() { + m_usageDao = _usageDao; + m_usageNetworkOfferingDao = _usageNetworkOfferingDao; + } public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (s_logger.isDebugEnabled()) { diff --git a/usage/src/com/cloud/usage/parser/NetworkUsageParser.java b/usage/src/com/cloud/usage/parser/NetworkUsageParser.java index acdbc484dcd..fb673d73c5f 100644 --- a/usage/src/com/cloud/usage/parser/NetworkUsageParser.java +++ b/usage/src/com/cloud/usage/parser/NetworkUsageParser.java @@ -21,26 +21,35 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.log4j.Logger; import com.cloud.usage.UsageNetworkVO; -import com.cloud.usage.UsageServer; import com.cloud.usage.UsageTypes; import com.cloud.usage.UsageVO; import com.cloud.usage.dao.UsageDao; import com.cloud.usage.dao.UsageNetworkDao; import com.cloud.user.AccountVO; -import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + import com.cloud.utils.db.SearchCriteria; public class NetworkUsageParser { public static final Logger s_logger = Logger.getLogger(NetworkUsageParser.class.getName()); - private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); - private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); - private static UsageNetworkDao m_usageNetworkDao = _locator.getDao(UsageNetworkDao.class); + private static UsageDao m_usageDao; + private static UsageNetworkDao m_usageNetworkDao; + @Inject private UsageDao _usageDao; + @Inject private UsageNetworkDao _usageNetworkDao; + + @PostConstruct + void init() { + m_usageDao = _usageDao; + m_usageNetworkDao = _usageNetworkDao; + } + public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (s_logger.isDebugEnabled()) { s_logger.debug("Parsing all Network usage events for account: " + account.getId()); diff --git a/usage/src/com/cloud/usage/parser/PortForwardingUsageParser.java b/usage/src/com/cloud/usage/parser/PortForwardingUsageParser.java index 469fb655f9e..16921804aa8 100644 --- a/usage/src/com/cloud/usage/parser/PortForwardingUsageParser.java +++ b/usage/src/com/cloud/usage/parser/PortForwardingUsageParser.java @@ -22,6 +22,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.log4j.Logger; import com.cloud.usage.UsagePortForwardingRuleVO; @@ -32,14 +35,22 @@ import com.cloud.usage.dao.UsageDao; import com.cloud.usage.dao.UsagePortForwardingRuleDao; import com.cloud.user.AccountVO; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + public class PortForwardingUsageParser { public static final Logger s_logger = Logger.getLogger(PortForwardingUsageParser.class.getName()); - private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); - private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); - private static UsagePortForwardingRuleDao m_usagePFRuleDao = _locator.getDao(UsagePortForwardingRuleDao.class); + private static UsageDao m_usageDao; + private static UsagePortForwardingRuleDao m_usagePFRuleDao; + + @Inject private UsageDao _usageDao; + @Inject private UsagePortForwardingRuleDao _usagePFRuleDao; + + @PostConstruct + void init() { + m_usageDao = _usageDao; + _usagePFRuleDao = _usagePFRuleDao; + } public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (s_logger.isDebugEnabled()) { diff --git a/usage/src/com/cloud/usage/parser/SecurityGroupUsageParser.java b/usage/src/com/cloud/usage/parser/SecurityGroupUsageParser.java index 28323851ed9..ed7acf348e1 100644 --- a/usage/src/com/cloud/usage/parser/SecurityGroupUsageParser.java +++ b/usage/src/com/cloud/usage/parser/SecurityGroupUsageParser.java @@ -22,6 +22,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.log4j.Logger; import com.cloud.usage.UsageSecurityGroupVO; @@ -32,14 +35,22 @@ import com.cloud.usage.dao.UsageDao; import com.cloud.usage.dao.UsageSecurityGroupDao; import com.cloud.user.AccountVO; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + public class SecurityGroupUsageParser { public static final Logger s_logger = Logger.getLogger(SecurityGroupUsageParser.class.getName()); - private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); - private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); - private static UsageSecurityGroupDao m_usageSecurityGroupDao = _locator.getDao(UsageSecurityGroupDao.class); + private static UsageDao m_usageDao; + private static UsageSecurityGroupDao m_usageSecurityGroupDao; + + @Inject private UsageDao _usageDao; + @Inject private UsageSecurityGroupDao _usageSecurityGroupDao; + + @PostConstruct + void init() { + m_usageDao = _usageDao; + m_usageSecurityGroupDao = _usageSecurityGroupDao; + } public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (s_logger.isDebugEnabled()) { diff --git a/usage/src/com/cloud/usage/parser/StorageUsageParser.java b/usage/src/com/cloud/usage/parser/StorageUsageParser.java index 4d48e39b750..7542063fca3 100644 --- a/usage/src/com/cloud/usage/parser/StorageUsageParser.java +++ b/usage/src/com/cloud/usage/parser/StorageUsageParser.java @@ -22,6 +22,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.log4j.Logger; import com.cloud.usage.StorageTypes; @@ -33,14 +36,22 @@ import com.cloud.usage.dao.UsageDao; import com.cloud.usage.dao.UsageStorageDao; import com.cloud.user.AccountVO; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + public class StorageUsageParser { public static final Logger s_logger = Logger.getLogger(StorageUsageParser.class.getName()); - private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); - private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); - private static UsageStorageDao m_usageStorageDao = _locator.getDao(UsageStorageDao.class); + private static UsageDao m_usageDao; + private static UsageStorageDao m_usageStorageDao; + + @Inject private UsageDao _usageDao; + @Inject private UsageStorageDao _usageStorageDao; + + @PostConstruct + void init() { + m_usageDao = _usageDao; + m_usageStorageDao = _usageStorageDao; + } public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (s_logger.isDebugEnabled()) { diff --git a/usage/src/com/cloud/usage/parser/VMInstanceUsageParser.java b/usage/src/com/cloud/usage/parser/VMInstanceUsageParser.java index 681e8ec31ba..8d2e465aa89 100644 --- a/usage/src/com/cloud/usage/parser/VMInstanceUsageParser.java +++ b/usage/src/com/cloud/usage/parser/VMInstanceUsageParser.java @@ -22,6 +22,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.log4j.Logger; import com.cloud.usage.UsageServer; @@ -32,14 +35,22 @@ import com.cloud.usage.dao.UsageDao; import com.cloud.usage.dao.UsageVMInstanceDao; import com.cloud.user.AccountVO; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + public class VMInstanceUsageParser { public static final Logger s_logger = Logger.getLogger(VMInstanceUsageParser.class.getName()); - private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); - private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); - private static UsageVMInstanceDao m_usageInstanceDao = _locator.getDao(UsageVMInstanceDao.class); + private static UsageDao m_usageDao; + private static UsageVMInstanceDao m_usageInstanceDao; + + @Inject private static UsageDao _usageDao;; + @Inject private static UsageVMInstanceDao _usageInstanceDao; + + @PostConstruct + void init() { + m_usageDao = _usageDao; + m_usageInstanceDao = _usageInstanceDao; + } public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (s_logger.isDebugEnabled()) { diff --git a/usage/src/com/cloud/usage/parser/VPNUserUsageParser.java b/usage/src/com/cloud/usage/parser/VPNUserUsageParser.java index 089bf9072c0..c9a863b99d6 100644 --- a/usage/src/com/cloud/usage/parser/VPNUserUsageParser.java +++ b/usage/src/com/cloud/usage/parser/VPNUserUsageParser.java @@ -22,6 +22,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.log4j.Logger; import com.cloud.usage.UsageVPNUserVO; @@ -32,14 +35,22 @@ import com.cloud.usage.dao.UsageDao; import com.cloud.usage.dao.UsageVPNUserDao; import com.cloud.user.AccountVO; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + public class VPNUserUsageParser { public static final Logger s_logger = Logger.getLogger(VPNUserUsageParser.class.getName()); - private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); - private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); - private static UsageVPNUserDao m_usageVPNUserDao = _locator.getDao(UsageVPNUserDao.class); + private static UsageDao m_usageDao; + private static UsageVPNUserDao m_usageVPNUserDao; + + @Inject private UsageDao _usageDao; + @Inject private UsageVPNUserDao _usageVPNUserDao; + + @PostConstruct + void init() { + m_usageDao = _usageDao; + m_usageVPNUserDao = _usageVPNUserDao; + } public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (s_logger.isDebugEnabled()) { diff --git a/usage/src/com/cloud/usage/parser/VolumeUsageParser.java b/usage/src/com/cloud/usage/parser/VolumeUsageParser.java index db58f41a6ef..e797f1c5f2f 100644 --- a/usage/src/com/cloud/usage/parser/VolumeUsageParser.java +++ b/usage/src/com/cloud/usage/parser/VolumeUsageParser.java @@ -22,6 +22,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.log4j.Logger; import com.cloud.usage.UsageServer; @@ -32,14 +35,22 @@ import com.cloud.usage.dao.UsageDao; import com.cloud.usage.dao.UsageVolumeDao; import com.cloud.user.AccountVO; import com.cloud.utils.Pair; -import com.cloud.utils.component.ComponentLocator; + public class VolumeUsageParser { public static final Logger s_logger = Logger.getLogger(VolumeUsageParser.class.getName()); - private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); - private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); - private static UsageVolumeDao m_usageVolumeDao = _locator.getDao(UsageVolumeDao.class); + private static UsageDao m_usageDao; + private static UsageVolumeDao m_usageVolumeDao; + + @Inject private UsageDao _usageDao; + @Inject private UsageVolumeDao _usageVolumeDao; + + @PostConstruct + void init() { + m_usageDao = _usageDao; + m_usageVolumeDao = _usageVolumeDao; + } public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (s_logger.isDebugEnabled()) { diff --git a/utils/src/com/cloud/utils/IdentityProxy.java b/utils/src/com/cloud/utils/IdentityProxy.java deleted file mode 100644 index 7e385fbf05a..00000000000 --- a/utils/src/com/cloud/utils/IdentityProxy.java +++ /dev/null @@ -1,60 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.utils; - -public class IdentityProxy { - private String _tableName; - private Long _value; - private String _idFieldName; - - public IdentityProxy() { - } - - public IdentityProxy(String tableName) { - _tableName = tableName; - } - - public IdentityProxy(String tableName, Long id, String fieldName) { - _tableName = tableName; - _value = id; - _idFieldName = fieldName; - } - - public String getTableName() { - return _tableName; - } - - public void setTableName(String tableName) { - _tableName = tableName; - } - - public Long getValue() { - return _value; - } - - public void setValue(Long value) { - _value = value; - } - - public void setidFieldName(String value) { - _idFieldName = value; - } - - public String getidFieldName() { - return _idFieldName; - } -} diff --git a/utils/src/com/cloud/utils/PropertiesUtil.java b/utils/src/com/cloud/utils/PropertiesUtil.java index 3909ca876b6..90f8af8b33f 100755 --- a/utils/src/com/cloud/utils/PropertiesUtil.java +++ b/utils/src/com/cloud/utils/PropertiesUtil.java @@ -17,6 +17,8 @@ package com.cloud.utils; import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.net.URL; @@ -28,6 +30,7 @@ import java.util.Set; import org.apache.log4j.Logger; public class PropertiesUtil { + private static final Logger s_logger = Logger.getLogger(PropertiesUtil.class); /** * Searches the class path and local paths to find the config file. * @param path path to find. if it starts with / then it's absolute path. @@ -116,4 +119,41 @@ public class PropertiesUtil { } return null; } + + // Returns key=value pairs by parsing a commands.properties/config file + // with syntax; key=cmd;value (with this syntax cmd is stripped) and key=value + public static Map processConfigFile(String[] configFiles) { + Map configMap = new HashMap(); + Properties preProcessedCommands = new Properties(); + for (String configFile : configFiles) { + File commandsFile = findConfigFile(configFile); + if (commandsFile != null) { + try { + preProcessedCommands.load(new FileInputStream(commandsFile)); + } catch (FileNotFoundException fnfex) { + // in case of a file within a jar in classpath, try to open stream using url + InputStream stream = PropertiesUtil.openStreamFromURL(configFile); + if (stream != null) { + try { + preProcessedCommands.load(stream); + } catch (IOException e) { + s_logger.error("IO Exception, unable to find properties file:", fnfex); + } + } else { + s_logger.error("Unable to find properites file", fnfex); + } + } catch (IOException ioe) { + s_logger.error("IO Exception loading properties file", ioe); + } + } + } + + for (Object key : preProcessedCommands.keySet()) { + String preProcessedCommand = preProcessedCommands.getProperty((String) key); + int splitIndex = preProcessedCommand.lastIndexOf(";"); + String value = preProcessedCommand.substring(splitIndex+1); + configMap.put((String)key, value); + } + return configMap; + } } diff --git a/utils/src/com/cloud/utils/UriUtils.java b/utils/src/com/cloud/utils/UriUtils.java index 4a56988759f..a8b5ccb0934 100644 --- a/utils/src/com/cloud/utils/UriUtils.java +++ b/utils/src/com/cloud/utils/UriUtils.java @@ -32,7 +32,7 @@ public class UriUtils { throw new CloudRuntimeException("Unable to form nfs URI: " + host + " - " + path); } } - + public static String formIscsiUri(String host, String iqn, Integer lun) { try { String path = iqn; @@ -48,34 +48,34 @@ public class UriUtils { public static String formFileUri(String path) { File file = new File(path); - + return file.toURI().toString(); } - + // a simple URI component helper (Note: it does not deal with URI paramemeter area) public static String encodeURIComponent(String url) { - int schemeTail = url.indexOf("://"); - - int pathStart = 0; - if(schemeTail > 0) - pathStart = url.indexOf('/', schemeTail + 3); - else - pathStart = url.indexOf('/'); - - if(pathStart > 0) { - String[] tokens = url.substring(pathStart + 1).split("/"); - if(tokens != null) { - StringBuffer sb = new StringBuffer(); - sb.append(url.substring(0, pathStart)); - for(String token : tokens) { - sb.append("/").append(URLEncoder.encode(token)); - } - - return sb.toString(); - } - } - - // no need to do URL component encoding - return url; + int schemeTail = url.indexOf("://"); + + int pathStart = 0; + if(schemeTail > 0) + pathStart = url.indexOf('/', schemeTail + 3); + else + pathStart = url.indexOf('/'); + + if(pathStart > 0) { + String[] tokens = url.substring(pathStart + 1).split("/"); + if(tokens != null) { + StringBuffer sb = new StringBuffer(); + sb.append(url.substring(0, pathStart)); + for(String token : tokens) { + sb.append("/").append(URLEncoder.encode(token)); + } + + return sb.toString(); + } + } + + // no need to do URL component encoding + return url; } } diff --git a/utils/src/com/cloud/utils/component/AdapterBase.java b/utils/src/com/cloud/utils/component/AdapterBase.java index e7be829bdbf..8fd374aebee 100644 --- a/utils/src/com/cloud/utils/component/AdapterBase.java +++ b/utils/src/com/cloud/utils/component/AdapterBase.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.utils.component; +import java.util.List; import java.util.Map; import javax.naming.ConfigurationException; @@ -34,6 +35,10 @@ public class AdapterBase implements Adapter { public String getName() { return _name; } + + public void setName(String name) { + _name = name; + } @Override public boolean start() { @@ -45,4 +50,12 @@ public class AdapterBase implements Adapter { return true; } + public static T getAdapterByName(List adapters, String name) { + for(T adapter : adapters) { + if(adapter.getName().equals(name)) + return adapter; + } + return null; + } + } diff --git a/utils/src/com/cloud/utils/component/Adapters.java b/utils/src/com/cloud/utils/component/Adapters.java deleted file mode 100755 index 2a2203ff555..00000000000 --- a/utils/src/com/cloud/utils/component/Adapters.java +++ /dev/null @@ -1,93 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.utils.component; - -import java.util.Collection; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -import com.cloud.utils.EnumerationImpl; -import com.cloud.utils.component.LegacyComponentLocator.ComponentInfo; - -/** - * the iterator even during dynamic reloading. - * - **/ -public class Adapters implements Iterable { - protected Map _map; - protected List> _infos; - - protected String _name; - - public Adapters(String name, List> adapters) { - _name = name; - set(adapters); - } - - /** - * Get the adapter list name. - * - * @return the name of the list of adapters. - */ - public String getName() { - return _name; - } - - public Enumeration enumeration() { - return new EnumerationImpl(_map.values().iterator()); - } - - @Override - public Iterator iterator() { - return new EnumerationImpl(_map.values().iterator()); - } - - protected Collection get() { - return _map.values(); - } - - protected void set(List> adapters) { - HashMap map = new LinkedHashMap(adapters.size()); - for (ComponentInfo adapter : adapters) { - @SuppressWarnings("unchecked") - T t = (T)adapter.instance; - map.put(adapter.getName(), t); - } - this._map = map; - this._infos = adapters; - } - - public T get(String name) { - return _map.get(name); - } - - public boolean isSet() { - return _map.size() != 0; - } - - public static T getAdapterByName(List adapters, String name) { - for(T adapter : adapters) { - if(adapter.getName().equals(name)) - return adapter; - } - return null; - } -} diff --git a/utils/src/com/cloud/utils/component/ComponentContext.java b/utils/src/com/cloud/utils/component/ComponentContext.java index ab049ee1a83..be293b66282 100644 --- a/utils/src/com/cloud/utils/component/ComponentContext.java +++ b/utils/src/com/cloud/utils/component/ComponentContext.java @@ -54,12 +54,12 @@ public class ComponentContext implements ApplicationContextAware { return s_appContext; } - public static T getCompanent(String name) { + public static T getComponent(String name) { assert(s_appContext != null); return (T)s_appContext.getBean(name); } - public static T getCompanent(Class beanType) { + public static T getComponent(Class beanType) { assert(s_appContext != null); try { return (T)s_appContext.getBean(beanType); diff --git a/utils/src/com/cloud/utils/component/ComponentInject.java b/utils/src/com/cloud/utils/component/ComponentInject.java deleted file mode 100644 index c88cd3f4c3c..00000000000 --- a/utils/src/com/cloud/utils/component/ComponentInject.java +++ /dev/null @@ -1,27 +0,0 @@ -package com.cloud.utils.component; - -import javax.inject.Inject; - -import org.springframework.beans.factory.config.AutowireCapableBeanFactory; -import org.springframework.stereotype.Component; - -@Component -public class ComponentInject { - - private static AutowireCapableBeanFactory beanFactory; - @SuppressWarnings("unused") - @Inject - private void setbeanFactory(AutowireCapableBeanFactory bf) { - ComponentInject.beanFactory = bf; - } - - public static T inject(Class clazz) { - return beanFactory.createBean(clazz); - } - - public static T inject(T obj) { - beanFactory.autowireBean(obj); - beanFactory.initializeBean(obj, null); - return obj; - } -} diff --git a/utils/src/com/cloud/utils/component/ComponentLibrary.java b/utils/src/com/cloud/utils/component/ComponentLibrary.java deleted file mode 100755 index 52c470389c0..00000000000 --- a/utils/src/com/cloud/utils/component/ComponentLibrary.java +++ /dev/null @@ -1,56 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.utils.component; - -import java.util.List; -import java.util.Map; - -import com.cloud.utils.component.LegacyComponentLocator.ComponentInfo; -import com.cloud.utils.db.GenericDao; - -/** - * ComponentLibrary specifies the implementation classes that a server needs - * attribute of the server element within components.xml. ComponentLocator - * first loads the implementations specified here, then, it loads the - * implementations from components.xml. If an interface is specified in both - * within the components.xml overrides the one within ComponentLibrary. - * - */ -public interface ComponentLibrary { - /** - * @return all of the daos - */ - Map>> getDaos(); - - /** - * @return all of the Managers - */ - Map> getManagers(); - - /** - * @return all of the adapters - */ - Map>> getAdapters(); - - Map, Class> getFactories(); - - /** - * @return all the services - * - */ - Map> getPluggableServices(); -} diff --git a/utils/src/com/cloud/utils/component/ComponentLibraryBase.java b/utils/src/com/cloud/utils/component/ComponentLibraryBase.java deleted file mode 100644 index 58649e425d6..00000000000 --- a/utils/src/com/cloud/utils/component/ComponentLibraryBase.java +++ /dev/null @@ -1,99 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.utils.component; - -import java.io.Serializable; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -import com.cloud.utils.Pair; -import com.cloud.utils.component.LegacyComponentLocator.ComponentInfo; -import com.cloud.utils.db.GenericDao; - -public abstract class ComponentLibraryBase implements ComponentLibrary { - - protected final Map>> _daos = new LinkedHashMap>>(); - - protected ComponentInfo> addDao(String name, Class> clazz) { - return addDao(name, clazz, new ArrayList>(), true); - } - - protected ComponentInfo> addDao(String name, Class> clazz, List> params, boolean singleton) { - ComponentInfo> componentInfo = new ComponentInfo>(name, clazz, params, singleton); - for (String key : componentInfo.getKeys()) { - _daos.put(key, componentInfo); - } - return componentInfo; - } - - protected Map> _managers = new LinkedHashMap>(); - protected Map>> _adapters = new LinkedHashMap>>(); - protected Map> _pluggableServices = new LinkedHashMap>(); - - protected ComponentInfo addManager(String name, Class clazz, List> params, boolean singleton) { - ComponentInfo info = new ComponentInfo(name, clazz, params, singleton); - for (String key : info.getKeys()) { - _managers.put(key, info); - } - return info; - } - - protected ComponentInfo addManager(String name, Class clazz) { - return addManager(name, clazz, new ArrayList>(), true); - } - - protected List> addAdapterChain(Class interphace, List>> adapters) { - ArrayList> lst = new ArrayList>(adapters.size()); - for (Pair> adapter : adapters) { - @SuppressWarnings("unchecked") - Class clazz = (Class)adapter.second(); - lst.add(new ComponentInfo(adapter.first(), clazz)); - } - _adapters.put(interphace.getName(), lst); - return lst; - } - - protected void addAdapter(Class interphace, String name, Class adapterClass) { - List> lst = _adapters.get(interphace.getName()); - if (lst == null) { - addOneAdapter(interphace, name, adapterClass); - } else { - @SuppressWarnings("unchecked") - Class clazz = (Class)adapterClass; - lst.add(new ComponentInfo(name, clazz)); - } - } - - protected ComponentInfo addOneAdapter(Class interphace, String name, Class adapterClass) { - List>> adapters = new ArrayList>>(); - adapters.add(new Pair>(name, adapterClass)); - return addAdapterChain(interphace, adapters).get(0); - } - - - protected ComponentInfo addService(String name, Class serviceInterphace, Class clazz, List> params, boolean singleton) { - ComponentInfo info = new ComponentInfo(name, clazz, params, singleton); - _pluggableServices.put(serviceInterphace.getName(), info); - return info; - } - - protected ComponentInfo addService(String name, Class serviceInterphace, Class clazz) { - return addService(name, serviceInterphace, clazz, new ArrayList>(), true); - } - } diff --git a/utils/src/com/cloud/utils/component/ComponentLocator.java b/utils/src/com/cloud/utils/component/ComponentLocator.java deleted file mode 100644 index d8d6e63b199..00000000000 --- a/utils/src/com/cloud/utils/component/ComponentLocator.java +++ /dev/null @@ -1,58 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.utils.component; - -import java.io.Serializable; - -import org.springframework.stereotype.Component; - -import com.cloud.utils.db.GenericDao; - -@Component -public class ComponentLocator { - public static ComponentLocator getCurrentLocator() { - return ComponentContext.getCompanent(ComponentLocator.class); - } - - public static ComponentLocator getLocator(String server) { - return ComponentContext.getCompanent(ComponentLocator.class); - } - - public static ComponentLocator getLocator(String server, String configFileName, String log4jFilename) { - return ComponentContext.getCompanent(ComponentLocator.class); - } - - public static Object getComponent(String componentName) { - return ComponentContext.getCompanent(componentName); - } - - public > T getDao(Class clazz) { - return ComponentContext.getCompanent(clazz); - } - - public T getManager(Class clazz) { - return ComponentContext.getCompanent(clazz); - } - - public T getPluggableService(Class clazz) { - return ComponentContext.getCompanent(clazz); - } - - public static T inject(Class clazz) { - return ComponentContext.inject(clazz); - } -} diff --git a/utils/src/com/cloud/utils/component/ComponentLocatorMBean.java b/utils/src/com/cloud/utils/component/ComponentLocatorMBean.java deleted file mode 100755 index 125e92ac5d9..00000000000 --- a/utils/src/com/cloud/utils/component/ComponentLocatorMBean.java +++ /dev/null @@ -1,43 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.utils.component; - - -import java.util.Collection; -import java.util.List; -import java.util.Map; - -import com.cloud.utils.mgmt.ManagementBean; - -public interface ComponentLocatorMBean extends ManagementBean { - - /** - * @return the list of adapters accessible by this component locator. - **/ - Map> getAdapterNames(); - - /** - * @return the list of managers accessible by this component locator. - **/ - Collection getManagerNames(); - - /** - * @return the list of DAOs accessible by this component locator. - */ - Collection getDaoNames(); - -} diff --git a/utils/src/com/cloud/utils/component/LegacyComponentLocator.java b/utils/src/com/cloud/utils/component/LegacyComponentLocator.java deleted file mode 100755 index 719f5601456..00000000000 --- a/utils/src/com/cloud/utils/component/LegacyComponentLocator.java +++ /dev/null @@ -1,1282 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.utils.component; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.Serializable; -import java.lang.reflect.Constructor; -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.management.InstanceAlreadyExistsException; -import javax.management.MBeanRegistrationException; -import javax.management.MalformedObjectNameException; -import javax.management.NotCompliantMBeanException; -import javax.naming.ConfigurationException; -import javax.xml.parsers.ParserConfigurationException; -import javax.xml.parsers.SAXParser; -import javax.xml.parsers.SAXParserFactory; - -import net.sf.cglib.proxy.Callback; -import net.sf.cglib.proxy.CallbackFilter; -import net.sf.cglib.proxy.Enhancer; -import net.sf.cglib.proxy.Factory; -import net.sf.cglib.proxy.MethodInterceptor; -import net.sf.cglib.proxy.MethodProxy; -import net.sf.cglib.proxy.NoOp; - -import org.apache.log4j.Logger; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.xml.DOMConfigurator; -import org.xml.sax.Attributes; -import org.xml.sax.SAXException; -import org.xml.sax.helpers.DefaultHandler; - -import com.cloud.utils.Pair; -import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.db.DatabaseCallback; -import com.cloud.utils.db.DatabaseCallbackFilter; -import com.cloud.utils.db.GenericDao; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.mgmt.JmxUtil; -import com.cloud.utils.mgmt.ManagementBean; - -/** - * ComponentLocator ties together several different concepts. First, it - * deals with how a system should be put together. It manages different - * types of components: - * - Manager: Singleton implementation of a certain process. - * - Adapter: Different singleton implementations for the same functions. - * - SystemIntegrityChecker: Singletons that are called at the load time. - * - Dao: Data Access Objects. - * - * These components can be declared in several ways: - * - ComponentLibrary - A Java class that declares the above components. The - * advantage of declaring components here is they change automatically - * with any refactoring. - * - components specification - An xml file that overrides the - * ComponentLibrary. The advantage of declaring components here is - * they can change by hand on every deployment. - * - * The two are NOT mutually exclusive. ComponentLocator basically locates - * the components specification, which specifies the ComponentLibrary within. - * Components found in the ComponentLibrary are overridden by components - * found in components specification. - * - * Components specification can also be nested. One components specification - * can point to another components specification and, therefore, "inherits" - * those components but still override one or more components. ComponentLocator - * reads the child components specification first and follow the chain up. - * the child's components overrides the ones in the parent. - * - * ComponentLocator looks for the components specification as follows: - * 1. By following the path specified by "cloud-stack-components-specification" - * within the environment.properties file. - * 2. Look for components.xml in the class path. - * - * ComponentLocator also ties in component injection. Components can specify - * an @Inject annotation to components ComponentLocator knows. When - * instantiating components, ComponentLocator attempts to inject these - * components. - * - **/ -@SuppressWarnings("unchecked") -public class LegacyComponentLocator implements ComponentLocatorMBean { - protected static final Logger s_logger = Logger.getLogger(LegacyComponentLocator.class); - - protected static final ThreadLocal s_tl = new ThreadLocal(); - protected static final ConcurrentHashMap, Singleton> s_singletons = new ConcurrentHashMap, Singleton>(111); - protected static final HashMap s_locators = new HashMap(); - protected static final HashMap, InjectInfo> s_factories = new HashMap, InjectInfo>(); - protected static Boolean s_once = false; - protected static Boolean _hasCheckerRun = false; - protected static Callback[] s_callbacks = new Callback[] { NoOp.INSTANCE, new DatabaseCallback()}; - protected static CallbackFilter s_callbackFilter = new DatabaseCallbackFilter(); - protected static final List> s_interceptors = new ArrayList>(); - protected static CleanupThread s_janitor = null; - - protected HashMap> _adapterMap; - protected HashMap> _managerMap; - protected LinkedHashMap> _checkerMap; - protected LinkedHashMap>> _daoMap; - protected String _serverName; - protected Object _component; - protected HashMap, Class> _factories; - protected HashMap> _pluginsMap; - - static { - if (s_janitor == null) { - s_janitor = new CleanupThread(); - Runtime.getRuntime().addShutdownHook(new CleanupThread()); - } - } - - public LegacyComponentLocator(String server) { - _serverName = server; - if (s_janitor == null) { - s_janitor = new CleanupThread(); - Runtime.getRuntime().addShutdownHook(new CleanupThread()); - } - } - - public String getLocatorName() { - return _serverName; - } - - @Override - public String getName() { - return getLocatorName(); - } - - protected Pair>>> parse2(String filename) { - try { - SAXParserFactory spfactory = SAXParserFactory.newInstance(); - SAXParser saxParser = spfactory.newSAXParser(); - _daoMap = new LinkedHashMap>>(); - _managerMap = new LinkedHashMap>(); - _checkerMap = new LinkedHashMap>(); - _adapterMap = new HashMap>(); - _factories = new HashMap, Class>(); - _pluginsMap = new LinkedHashMap>(); - File file = PropertiesUtil.findConfigFile(filename); - if (file == null) { - s_logger.info("Unable to find " + filename); - return null; - } - s_logger.info("Config file found at " + file.getAbsolutePath() + ". Configuring " + _serverName); - XmlHandler handler = new XmlHandler(_serverName); - saxParser.parse(file, handler); - - HashMap>> adapters = new HashMap>>(); - if (handler.parent != null) { - String[] tokens = handler.parent.split(":"); - String parentFile = filename; - String parentName = handler.parent; - if (tokens.length > 1) { - parentFile = tokens[0]; - parentName = tokens[1]; - } - LegacyComponentLocator parentLocator = new LegacyComponentLocator(parentName); - adapters.putAll(parentLocator.parse2(parentFile).second()); - _daoMap.putAll(parentLocator._daoMap); - _managerMap.putAll(parentLocator._managerMap); - _factories.putAll(parentLocator._factories); - _pluginsMap.putAll(parentLocator._pluginsMap); - } - - ComponentLibrary library = null; - if (handler.library != null) { - Class clazz = Class.forName(handler.library); - library = (ComponentLibrary)clazz.newInstance(); - _daoMap.putAll(library.getDaos()); - _managerMap.putAll(library.getManagers()); - adapters.putAll(library.getAdapters()); - _factories.putAll(library.getFactories()); - _pluginsMap.putAll(library.getPluggableServices()); - } - - _daoMap.putAll(handler.daos); - _managerMap.putAll(handler.managers); - _checkerMap.putAll(handler.checkers); - adapters.putAll(handler.adapters); - _pluginsMap.putAll(handler.pluggableServices); - - return new Pair>>>(handler, adapters); - } catch (ParserConfigurationException e) { - s_logger.error("Unable to load " + _serverName + " due to errors while parsing " + filename, e); - System.exit(1); - } catch (SAXException e) { - s_logger.error("Unable to load " + _serverName + " due to errors while parsing " + filename, e); - System.exit(1); - } catch (IOException e) { - s_logger.error("Unable to load " + _serverName + " due to errors while reading from " + filename, e); - System.exit(1); - } catch (CloudRuntimeException e) { - s_logger.error("Unable to load configuration for " + _serverName + " from " + filename, e); - System.exit(1); - } catch (Exception e) { - s_logger.error("Unable to load configuration for " + _serverName + " from " + filename, e); - System.exit(1); - } - return null; - } - - protected void parse(String filename) { - Pair>>> result = parse2(filename); - if (result == null) { - s_logger.info("Skipping configuration using " + filename); - return; - } - - instantiatePluggableServices(); - - XmlHandler handler = result.first(); - HashMap>> adapters = result.second(); - try { - runCheckers(); - startDaos(); // daos should not be using managers and adapters. - instantiateAdapters(adapters); - instantiateManagers(); - if (handler.componentClass != null) { - _component = createInstance(handler.componentClass, true, true); - } - configureManagers(); - configureAdapters(); - startManagers(); - startAdapters(); - //TODO do we need to follow the instantiate -> inject -> configure -> start -> stop flow of singletons like managers/adapters? - //TODO do we need to expose pluggableServices to MBean (provide getNames?) - } catch (CloudRuntimeException e) { - s_logger.error("Unable to load configuration for " + _serverName + " from " + filename, e); - System.exit(1); - } catch (Exception e) { - s_logger.error("Unable to load configuration for " + _serverName + " from " + filename, e); - System.exit(1); - } - } - - protected void runCheckers() { - Set>> entries = _checkerMap.entrySet(); - for (Map.Entry> entry : entries) { - ComponentInfo info = entry.getValue(); - try { - info.instance = (SystemIntegrityChecker)createInstance(info.clazz, false, info.singleton); - info.instance.check(); - } catch (Exception e) { - s_logger.error("Problems with running checker:" + info.name, e); - System.exit(1); - } - } - } - /** - * Daos should not refer to any other components so it is safe to start them - * here. - */ - protected void startDaos() { - Set>>> entries = _daoMap.entrySet(); - - for (Map.Entry>> entry : entries) { - ComponentInfo> info = entry.getValue(); - try { - info.instance = (GenericDao)createInstance(info.clazz, true, info.singleton); - if (info.singleton) { - s_logger.info("Starting singleton DAO: " + info.name); - Singleton singleton = s_singletons.get(info.clazz); - if (singleton.state == Singleton.State.Instantiated) { - inject(info.clazz, info.instance); - singleton.state = Singleton.State.Injected; - } - if (singleton.state == Singleton.State.Injected) { - if (!info.instance.configure(info.name, info.params)) { - s_logger.error("Unable to configure DAO: " + info.name); - System.exit(1); - } - singleton.state = Singleton.State.Started; - } - } else { - s_logger.info("Starting DAO: " + info.name); - inject(info.clazz, info.instance); - if (!info.instance.configure(info.name, info.params)) { - s_logger.error("Unable to configure DAO: " + info.name); - System.exit(1); - } - } - } catch (ConfigurationException e) { - s_logger.error("Unable to configure DAO: " + info.name, e); - System.exit(1); - } catch (Exception e) { - s_logger.error("Problems while configuring DAO: " + info.name, e); - System.exit(1); - } - if (info.instance instanceof ManagementBean) { - registerMBean((ManagementBean) info.instance); - } - } - } - - private static Object createInstance(Class clazz, boolean inject, boolean singleton, Object... args) { - Factory factory = null; - Singleton entity = null; - synchronized(s_factories) { - if (singleton) { - entity = s_singletons.get(clazz); - if (entity != null) { - s_logger.debug("Found singleton instantiation for " + clazz.toString()); - return entity.singleton; - } - } - InjectInfo info = s_factories.get(clazz); - if (info == null) { - Enhancer enhancer = new Enhancer(); - enhancer.setSuperclass(clazz); - enhancer.setCallbackFilter(s_callbackFilter); - enhancer.setCallbacks(s_callbacks); - factory = (Factory)enhancer.create(); - info = new InjectInfo(enhancer, factory); - s_factories.put(clazz, info); - } else { - factory = info.factory; - } - } - - - Class[] argTypes = null; - if (args != null && args.length > 0) { - Constructor[] constructors = clazz.getConstructors(); - for (Constructor constructor : constructors) { - Class[] paramTypes = constructor.getParameterTypes(); - if (paramTypes.length == args.length) { - boolean found = true; - for (int i = 0; i < paramTypes.length; i++) { - if (!paramTypes[i].isAssignableFrom(args[i].getClass()) && !paramTypes[i].isPrimitive()) { - found = false; - break; - } - } - if (found) { - argTypes = paramTypes; - break; - } - } - } - - if (argTypes == null) { - throw new CloudRuntimeException("Unable to find constructor to match parameters given: " + clazz.getName()); - } - - entity = new Singleton(factory.newInstance(argTypes, args, s_callbacks)); - } else { - entity = new Singleton(factory.newInstance(s_callbacks)); - } - - if (inject) { - inject(clazz, entity.singleton); - entity.state = Singleton.State.Injected; - } - - if (singleton) { - synchronized(s_factories) { - s_singletons.put(clazz, entity); - } - } - - return entity.singleton; - } - - - protected ComponentInfo> getDao(String name) { - ComponentInfo> info = _daoMap.get(name); - if (info == null) { - throw new CloudRuntimeException("Unable to find DAO " + name); - } - - return info; - } - - public static synchronized Object getComponent(String componentName) { - synchronized(_hasCheckerRun) { - /* System Integrity checker will run before all components really loaded */ - if (!_hasCheckerRun && !componentName.equalsIgnoreCase(SystemIntegrityChecker.Name)) { - LegacyComponentLocator.getComponent(SystemIntegrityChecker.Name); - _hasCheckerRun = true; - } - } - - LegacyComponentLocator locator = s_locators.get(componentName); - if (locator == null) { - locator = LegacyComponentLocator.getLocator(componentName); - } - return locator._component; - } - - public > T getDao(Class clazz) { - ComponentInfo> info = getDao(clazz.getName()); - return info != null ? (T)info.instance : null; - } - - protected void instantiateManagers() { - Set>> entries = _managerMap.entrySet(); - for (Map.Entry> entry : entries) { - ComponentInfo info = entry.getValue(); - if (info.instance == null) { - s_logger.info("Instantiating Manager: " + info.name); - info.instance = (Manager)createInstance(info.clazz, false, info.singleton); - } - } - } - - protected void configureManagers() { - Set>> entries = _managerMap.entrySet(); - for (Map.Entry> entry : entries) { - ComponentInfo info = entry.getValue(); - if (info.singleton) { - Singleton s = s_singletons.get(info.clazz); - if (s.state == Singleton.State.Instantiated) { - s_logger.debug("Injecting singleton Manager: " + info.name); - inject(info.clazz, info.instance); - s.state = Singleton.State.Injected; - } - } else { - s_logger.info("Injecting Manager: " + info.name); - inject(info.clazz, info.instance); - } - } - for (Map.Entry> entry : entries) { - ComponentInfo info = entry.getValue(); - if (info.singleton) { - Singleton s = s_singletons.get(info.clazz); - if (s.state == Singleton.State.Injected) { - s_logger.info("Configuring singleton Manager: " + info.name); - try { - info.instance.configure(info.name, info.params); - } catch (ConfigurationException e) { - s_logger.error("Unable to configure manager: " + info.name, e); - System.exit(1); - } - s.state = Singleton.State.Configured; - } - } else { - s_logger.info("Configuring Manager: " + info.name); - try { - info.instance.configure(info.name, info.params); - } catch (ConfigurationException e) { - s_logger.error("Unable to configure manager: " + info.name, e); - System.exit(1); - } - } - } - } - - protected static void inject(Class clazz, Object entity) { - LegacyComponentLocator locator = LegacyComponentLocator.getCurrentLocator(); - - do { - Field[] fields = clazz.getDeclaredFields(); - for (Field field : fields) { - Inject inject = field.getAnnotation(Inject.class); - if (inject == null) { - com.cloud.utils.component.Inject oldInject = field.getAnnotation(com.cloud.utils.component.Inject.class); - if(inject != null) { - Class fc = field.getType(); - Object instance = null; - if (Manager.class.isAssignableFrom(fc)) { - s_logger.trace("Manager: " + fc.getName()); - instance = locator.getManager(fc); - } else if (GenericDao.class.isAssignableFrom(fc)) { - s_logger.trace("Dao:" + fc.getName()); - instance = locator.getDao((Class>)fc); - } else if (Adapters.class.isAssignableFrom(fc)) { - s_logger.trace("Adapter" + fc.getName()); - instance = locator.getAdapters(oldInject.adapter()); - } else { - s_logger.trace("Other:" + fc.getName()); - instance = locator.getManager(fc); - } - } - - continue; - } - - Class fc = field.getType(); - Object instance = null; - if (Manager.class.isAssignableFrom(fc)) { - s_logger.trace("Manager: " + fc.getName()); - instance = locator.getManager(fc); - } else if (GenericDao.class.isAssignableFrom(fc)) { - s_logger.trace("Dao:" + fc.getName()); - instance = locator.getDao((Class>)fc); - } else { - s_logger.trace("Other:" + fc.getName()); - instance = locator.getManager(fc); - } - - if (instance == null) { - throw new CloudRuntimeException("Unable to inject " + fc.getSimpleName() + " in " + clazz.getSimpleName()); - } - - try { - field.setAccessible(true); - field.set(entity, instance); - } catch (IllegalArgumentException e) { - throw new CloudRuntimeException("hmmm....is it really illegal?", e); - } catch (IllegalAccessException e) { - throw new CloudRuntimeException("what! what ! what!", e); - } - } - clazz = clazz.getSuperclass(); - } while (clazz != Object.class && clazz != null); - } - - protected void startManagers() { - Set>> entries = _managerMap.entrySet(); - for (Map.Entry> entry : entries) { - ComponentInfo info = entry.getValue(); - if (info.singleton) { - Singleton s = s_singletons.get(info.clazz); - if (s.state == Singleton.State.Configured) { - s_logger.info("Starting singleton Manager: " + info.name); - if (!info.instance.start()) { - throw new CloudRuntimeException("Incorrect Configuration: " + info.name); - } - if (info.instance instanceof ManagementBean) { - registerMBean((ManagementBean) info.instance); - } - s_logger.info("Started Manager: " + info.name); - s.state = Singleton.State.Started; - } - } else { - s_logger.info("Starting Manager: " + info.name); - if (!info.instance.start()) { - throw new CloudRuntimeException("Incorrect Configuration: " + info.name); - } - if (info.instance instanceof ManagementBean) { - registerMBean((ManagementBean) info.instance); - } - s_logger.info("Started Manager: " + info.name); - } - } - } - - protected void registerMBean(ManagementBean mbean) { - try { - JmxUtil.registerMBean(mbean); - } catch (MalformedObjectNameException e) { - s_logger.warn("Unable to register MBean: " + mbean.getName(), e); - } catch (InstanceAlreadyExistsException e) { - s_logger.warn("Unable to register MBean: " + mbean.getName(), e); - } catch (MBeanRegistrationException e) { - s_logger.warn("Unable to register MBean: " + mbean.getName(), e); - } catch (NotCompliantMBeanException e) { - s_logger.warn("Unable to register MBean: " + mbean.getName(), e); - } - s_logger.info("Registered MBean: " + mbean.getName()); - } - - protected ComponentInfo getManager(String name) { - ComponentInfo mgr = _managerMap.get(name); - return mgr; - } - - public T getManager(Class clazz) { - ComponentInfo info = getManager(clazz.getName()); - if (info == null) { - return null; - } - if (info.instance == null) { - info.instance = (Manager)createInstance(info.clazz, false, info.singleton); - } - return (T)info.instance; - } - - protected void configureAdapters() { - for (Adapters adapters : _adapterMap.values()) { - List> infos = adapters._infos; - for (ComponentInfo info : infos) { - try { - if (info.singleton) { - Singleton singleton = s_singletons.get(info.clazz); - if (singleton.state == Singleton.State.Instantiated) { - s_logger.info("Injecting singleton Adapter: " + info.getName()); - inject(info.clazz, info.instance); - singleton.state = Singleton.State.Injected; - } - if (singleton.state == Singleton.State.Injected) { - s_logger.info("Configuring singleton Adapter: " + info.getName()); - if (!info.instance.configure(info.name, info.params)) { - s_logger.error("Unable to configure adapter: " + info.name); - System.exit(1); - } - singleton.state = Singleton.State.Configured; - } - } else { - s_logger.info("Injecting Adapter: " + info.getName()); - inject(info.clazz, info.instance); - s_logger.info("Configuring singleton Adapter: " + info.getName()); - if (!info.instance.configure(info.name, info.params)) { - s_logger.error("Unable to configure adapter: " + info.name); - System.exit(1); - } - } - } catch (ConfigurationException e) { - s_logger.error("Unable to configure adapter: " + info.name, e); - System.exit(1); - } catch (Exception e) { - s_logger.error("Unable to configure adapter: " + info.name, e); - System.exit(1); - } - } - } - } - - protected void populateAdapters(Map>> map) { - Set>>> entries = map.entrySet(); - for (Map.Entry>> entry : entries) { - for (ComponentInfo info : entry.getValue()) { - s_logger.info("Instantiating Adapter: " + info.name); - info.instance = (Adapter)createInstance(info.clazz, false, info.singleton); - } - Adapters adapters = new Adapters(entry.getKey(), entry.getValue()); - _adapterMap.put(entry.getKey(), adapters); - } - } - - protected void instantiateAdapters(Map>> map) { - Set>>> entries = map.entrySet(); - for (Map.Entry>> entry : entries) { - for (ComponentInfo info : entry.getValue()) { - s_logger.info("Instantiating Adapter: " + info.name); - info.instance = (Adapter)createInstance(info.clazz, false, info.singleton); - } - Adapters adapters = new Adapters(entry.getKey(), entry.getValue()); - _adapterMap.put(entry.getKey(), adapters); - } - } - - protected void startAdapters() { - for (Map.Entry> entry : _adapterMap.entrySet()) { - for (ComponentInfo adapter : entry.getValue()._infos) { - if (adapter.singleton) { - Singleton s = s_singletons.get(adapter.clazz); - if (s.state == Singleton.State.Configured) { - s_logger.info("Starting singleton Adapter: " + adapter.getName()); - if (!adapter.instance.start()) { - throw new CloudRuntimeException("Unable to start adapter: " + adapter.getName()); - } - if (adapter.instance instanceof ManagementBean) { - registerMBean((ManagementBean)adapter.instance); - } - s_logger.info("Started Adapter: " + adapter.instance.getName()); - } - s.state = Singleton.State.Started; - } else { - s_logger.info("Starting Adapter: " + adapter.getName()); - if (!adapter.instance.start()) { - throw new CloudRuntimeException("Unable to start adapter: " + adapter.getName()); - } - if (adapter.instance instanceof ManagementBean) { - registerMBean((ManagementBean)adapter.instance); - } - s_logger.info("Started Adapter: " + adapter.instance.getName()); - } - } - } - } - - protected void instantiatePluggableServices() { - Set>> entries = _pluginsMap.entrySet(); - for (Map.Entry> entry : entries) { - ComponentInfo info = entry.getValue(); - if (info.instance == null) { - s_logger.info("Instantiating PluggableService: " + info.name); - info.instance = (PluggableService)createInstance(info.clazz, false, info.singleton); - - if (info.instance instanceof Plugin) { - Plugin plugin = (Plugin)info.instance; - - ComponentLibrary lib = plugin.getComponentLibrary(); - _managerMap.putAll(lib.getManagers()); - _daoMap.putAll(lib.getDaos()); - } - } - } - } - - protected ComponentInfo getPluggableService(String name) { - ComponentInfo mgr = _pluginsMap.get(name); - return mgr; - } - - public T getPluggableService(Class clazz) { - ComponentInfo info = getPluggableService(clazz.getName()); - if (info == null) { - return null; - } - if (info.instance == null) { - info.instance = (PluggableService)createInstance(info.clazz, false, info.singleton); - } - return (T)info.instance; - } - - public List getAllPluggableServices() { - List services = new ArrayList(); - Set>> entries = _pluginsMap.entrySet(); - for (Map.Entry> entry : entries) { - ComponentInfo info = entry.getValue(); - if (info.instance == null) { - s_logger.info("Instantiating PluggableService: " + info.name); - info.instance = (PluggableService)createInstance(info.clazz, false, info.singleton); - } - services.add((T) info.instance); - } - return services; - } - - public static T inject(Class clazz) { - return (T)createInstance(clazz, true, false); - } - - public T createInstance(Class clazz) { - Class impl = (Class)_factories.get(clazz); - if (impl == null) { - throw new CloudRuntimeException("Unable to find a factory for " + clazz); - } - return inject(impl); - } - - public static T inject(Class clazz, Object... args) { - return (T)createInstance(clazz, true, false, args); - } - - @Override - public Map> getAdapterNames() { - HashMap> result = new HashMap>(); - for (Map.Entry> entry : _adapterMap.entrySet()) { - Adapters adapters = entry.getValue(); - Enumeration en = adapters.enumeration(); - List lst = new ArrayList(); - while (en.hasMoreElements()) { - Adapter adapter = en.nextElement(); - lst.add(adapter.getName() + "-" + adapter.getClass().getName()); - } - result.put(entry.getKey(), lst); - } - return result; - } - - public Map> getAllAccessibleAdapters() { - Map> parentResults = new HashMap>(); - Map> results = getAdapterNames(); - parentResults.putAll(results); - return parentResults; - } - - @Override - public Collection getManagerNames() { - Collection names = new HashSet(); - for (Map.Entry> entry : _managerMap.entrySet()) { - names.add(entry.getValue().name); - } - return names; - } - - @Override - public Collection getDaoNames() { - Collection names = new HashSet(); - for (Map.Entry>> entry : _daoMap.entrySet()) { - names.add(entry.getValue().name); - } - return names; - } - - public Adapters getAdapters(Class clazz) { - return (Adapters)getAdapters(clazz.getName()); - } - - public Adapters getAdapters(String key) { - Adapters adapters = _adapterMap.get(key); - if (adapters != null) { - return adapters; - } - return new Adapters(key, new ArrayList>()); - } - - protected void resetInterceptors(InterceptorLibrary library) { - library.addInterceptors(s_interceptors); - if (s_interceptors.size() > 0) { - s_callbacks = new Callback[s_interceptors.size() + 2]; - int i = 0; - s_callbacks[i++] = NoOp.INSTANCE; - s_callbacks[i++] = new InterceptorDispatcher(); - for (AnnotationInterceptor interceptor : s_interceptors) { - s_callbacks[i++] = interceptor.getCallback(); - } - s_callbackFilter = new InterceptorFilter(); - } - } - - protected static LegacyComponentLocator getLocatorInternal(String server, boolean setInThreadLocal, String configFileName, String log4jFilename) { - synchronized(s_once) { - if (!s_once) { - File file = PropertiesUtil.findConfigFile(log4jFilename + ".xml"); - if (file != null) { - s_logger.info("log4j configuration found at " + file.getAbsolutePath()); - DOMConfigurator.configureAndWatch(file.getAbsolutePath()); - } else { - file = PropertiesUtil.findConfigFile(log4jFilename + ".properties"); - if (file != null) { - s_logger.info("log4j configuration found at " + file.getAbsolutePath()); - PropertyConfigurator.configureAndWatch(file.getAbsolutePath()); - } - } - s_once = true; - } - } - - LegacyComponentLocator locator; - synchronized (s_locators) { - locator = s_locators.get(server); - if (locator == null) { - locator = new LegacyComponentLocator(server); - s_locators.put(server, locator); - if (setInThreadLocal) { - s_tl.set(locator); - } - locator.parse(configFileName); - } else { - if (setInThreadLocal) { - s_tl.set(locator); - } - } - } - - return locator; - } - - public static LegacyComponentLocator getLocator(String server, String configFileName, String log4jFilename) { - return getLocatorInternal(server, true, configFileName, log4jFilename); - } - - public static LegacyComponentLocator getLocator(String server) { - String configFile = null; - try { - final File propsFile = PropertiesUtil.findConfigFile("environment.properties"); - if (propsFile == null) { - s_logger.debug("environment.properties could not be opened"); - } else { - final FileInputStream finputstream = new FileInputStream(propsFile); - final Properties props = new Properties(); - props.load(finputstream); - finputstream.close(); - configFile = props.getProperty("cloud-stack-components-specification"); - } - } catch (IOException e) { - s_logger.debug("environment.properties could not be loaded:" + e.toString()); - } - - if (configFile == null || PropertiesUtil.findConfigFile(configFile) == null) { - configFile = "components.xml"; - if (PropertiesUtil.findConfigFile(configFile) == null){ - s_logger.debug("Can not find components.xml"); - } - } - return getLocatorInternal(server, true, configFile, "log4j-cloud"); - } - - public static LegacyComponentLocator getCurrentLocator() { - return s_tl.get(); - } - - public static class ComponentInfo { - Class clazz; - HashMap params = new HashMap(); - String name; - List keys = new ArrayList(); - T instance; - boolean singleton = true; - - protected ComponentInfo() { - } - - public List getKeys() { - return keys; - } - - public String getName() { - return name; - } - - public ComponentInfo(String name, Class clazz) { - this(name, clazz, new ArrayList>(0)); - } - - public ComponentInfo(String name, Class clazz, T instance) { - this(name, clazz); - this.instance = instance; - } - - public ComponentInfo(String name, Class clazz, List> params) { - this(name, clazz, params, true); - } - - public ComponentInfo(String name, Class clazz, List> params, boolean singleton) { - this.name = name; - this.clazz = clazz; - this.singleton = singleton; - for (Pair param : params) { - this.params.put(param.first(), param.second()); - } - fillInfo(); - } - - protected void fillInfo() { - String clazzName = clazz.getName(); - - Local local = clazz.getAnnotation(Local.class); - if (local == null) { - throw new CloudRuntimeException("Unable to find Local annotation for class " + clazzName); - } - - // Verify that all interfaces specified in the Local annotation is implemented by the class. - Class[] classes = local.value(); - for (int i = 0; i < classes.length; i++) { - if (!classes[i].isInterface()) { - throw new CloudRuntimeException(classes[i].getName() + " is not an interface"); - } - if (classes[i].isAssignableFrom(clazz)) { - keys.add(classes[i].getName()); - s_logger.info("Found component: " + classes[i].getName() + " in " + clazzName + " - " + name); - } else { - throw new CloudRuntimeException(classes[i].getName() + " is not implemented by " + clazzName); - } - } - } - - public void addParameter(String name, String value) { - params.put(name, value); - } - } - - /** - * XmlHandler is used by AdapterManager to handle the SAX parser callbacks. - * It builds a hash map of lists of adapters and a hash map of managers. - **/ - protected class XmlHandler extends DefaultHandler { - public HashMap>> adapters; - public HashMap> managers; - public LinkedHashMap> checkers; - public LinkedHashMap>> daos; - public HashMap> pluggableServices; - public String parent; - public String library; - - List> lst; - String paramName; - StringBuilder value; - String serverName; - boolean parse; - ComponentInfo currentInfo; - Class componentClass; - - public XmlHandler(String serverName) { - this.serverName = serverName; - parse = false; - adapters = new HashMap>>(); - managers = new HashMap>(); - checkers = new LinkedHashMap>(); - daos = new LinkedHashMap>>(); - pluggableServices = new HashMap>(); - value = null; - parent = null; - } - - protected void fillInfo(Attributes atts, Class interphace, ComponentInfo info) { - String clazzName = getAttribute(atts, "class"); - if (clazzName == null) { - throw new CloudRuntimeException("Missing class attribute for " + interphace.getName()); - } - info.name = getAttribute(atts, "name"); - if (info.name == null) { - throw new CloudRuntimeException("Missing name attribute for " + interphace.getName()); - } - s_logger.debug("Looking for class " + clazzName); - try { - info.clazz = Class.forName(clazzName); - } catch (ClassNotFoundException e) { - throw new CloudRuntimeException("Unable to find class: " + clazzName); - } catch (Throwable e) { - throw new CloudRuntimeException("Caught throwable: ", e); - } - - if (!interphace.isAssignableFrom(info.clazz)) { - throw new CloudRuntimeException("Class " + info.clazz.toString() + " does not implment " + interphace); - } - String singleton = getAttribute(atts, "singleton"); - if (singleton != null) { - info.singleton = Boolean.parseBoolean(singleton); - } - - info.fillInfo(); - } - - @Override - public void startElement(String namespaceURI, String localName, String qName, Attributes atts) - throws SAXException { - if (qName.equals("interceptor") && s_interceptors.size() == 0) { - synchronized(s_interceptors){ - if (s_interceptors.size() == 0) { - String libraryName = getAttribute(atts, "library"); - try { - Class libraryClazz = Class.forName(libraryName); - InterceptorLibrary library = (InterceptorLibrary)libraryClazz.newInstance(); - resetInterceptors(library); - } catch (ClassNotFoundException e) { - throw new CloudRuntimeException("Unable to find " + libraryName, e); - } catch (InstantiationException e) { - throw new CloudRuntimeException("Unable to instantiate " + libraryName, e); - } catch (IllegalAccessException e) { - throw new CloudRuntimeException("Illegal access " + libraryName, e); - } - } - } - } - if (!parse) { - if (qName.equals(_serverName)) { - parse = true; - parent = getAttribute(atts, "extends"); - String implementationClass = getAttribute(atts, "class"); - if (implementationClass != null) { - try { - componentClass = Class.forName(implementationClass); - } catch (ClassNotFoundException e) { - throw new CloudRuntimeException("Unable to find " + implementationClass, e); - } - } - - library = getAttribute(atts, "library"); - } - } else if (qName.equals("adapters")) { - lst = new ArrayList>(); - String key = getAttribute(atts, "key"); - if (key == null) { - throw new CloudRuntimeException("Missing key attribute for adapters"); - } - adapters.put(key, lst); - } else if (qName.equals("adapter")) { - ComponentInfo info = new ComponentInfo(); - fillInfo(atts, Adapter.class, info); - lst.add(info); - currentInfo = info; - } else if (qName.equals("manager")) { - ComponentInfo info = new ComponentInfo(); - fillInfo(atts, Manager.class, info); - s_logger.info("Adding Manager: " + info.name); - for (String key : info.keys) { - s_logger.info("Linking " + key + " to " + info.name); - managers.put(key, info); - } - currentInfo = info; - } else if (qName.equals("param")) { - paramName = getAttribute(atts, "name"); - value = new StringBuilder(); - } else if (qName.equals("dao")) { - ComponentInfo> info = new ComponentInfo>(); - fillInfo(atts, GenericDao.class, info); - for (String key : info.keys) { - daos.put(key, info); - } - currentInfo = info; - } else if (qName.equals("checker")) { - ComponentInfo info = new ComponentInfo(); - fillInfo(atts, SystemIntegrityChecker.class, info); - checkers.put(info.name, info); - s_logger.info("Adding system integrity checker: " + info.name); - currentInfo = info; - } else if (qName.equals("pluggableservice") || qName.equals("plugin")) { - ComponentInfo info = new ComponentInfo(); - fillInfo(atts, PluggableService.class, info); - s_logger.info("Adding PluggableService: " + info.name); - String key = getAttribute(atts, "key"); - if (key == null) { - throw new CloudRuntimeException("Missing key attribute for pluggableservice: "+info.name); - } - s_logger.info("Linking " + key + " to " + info.name); - pluggableServices.put(key, info); - currentInfo = info; - } else { - // ignore - } - } - - protected String getAttribute(Attributes atts, String name) { - for (int att = 0; att < atts.getLength(); att++) { - String attName = atts.getQName(att); - if (attName.equals(name)) { - return atts.getValue(att); - } - } - return null; - } - - @Override - public void endElement(String namespaceURI, String localName, String qName) throws SAXException { - if (!parse) { - return; - } - - if (qName.equals(_serverName)) { - parse = false; - } else if (qName.equals("adapters")) { - } else if (qName.equals("adapter")) { - } else if (qName.equals("manager")) { - } else if (qName.equals("dao")) { - } else if (qName.equals("pluggableservice")) { - } else if (qName.equals("param")) { - currentInfo.params.put(paramName, value.toString()); - paramName = null; - value = null; - } else { - // ignore - } - } - - @Override - public void characters(char[] ch, int start, int length) throws SAXException { - if (parse && value != null) { - value.append(ch, start, length); - } - } - } - - protected static class InjectInfo { - public Factory factory; - public Enhancer enhancer; - - public InjectInfo(Enhancer enhancer, Factory factory) { - this.factory = factory; - this.enhancer = enhancer; - } - } - - protected static class CleanupThread extends Thread { - @Override - public void run() { - synchronized (CleanupThread.class) { - for (LegacyComponentLocator locator : s_locators.values()) { - Iterator> itAdapters = locator._adapterMap.values().iterator(); - while (itAdapters.hasNext()) { - Adapters adapters = itAdapters.next(); - itAdapters.remove(); - for (ComponentInfo adapter : adapters._infos) { - if (adapter.singleton) { - Singleton singleton = s_singletons.get(adapter.clazz); - if (singleton.state == Singleton.State.Started) { - s_logger.info("Asking " + adapter.getName() + " to shutdown."); - adapter.instance.stop(); - singleton.state = Singleton.State.Stopped; - } else { - s_logger.debug("Skippng " + adapter.getName() + " because it has already stopped"); - } - } else { - s_logger.info("Asking " + adapter.getName() + " to shutdown."); - adapter.instance.stop(); - } - } - } - } - - for (LegacyComponentLocator locator : s_locators.values()) { - Iterator> itManagers = locator._managerMap.values().iterator(); - while (itManagers.hasNext()) { - ComponentInfo manager = itManagers.next(); - itManagers.remove(); - if (manager.singleton == true) { - Singleton singleton = s_singletons.get(manager.clazz); - if (singleton != null && singleton.state == Singleton.State.Started) { - s_logger.info("Asking Manager " + manager.getName() + " to shutdown."); - manager.instance.stop(); - singleton.state = Singleton.State.Stopped; - } else { - s_logger.info("Skipping Manager " + manager.getName() + " because it is not in a state to shutdown."); - } - } - } - } - } - } - } - - static class Singleton { - public enum State { - Instantiated, - Injected, - Configured, - Started, - Stopped - } - - public Object singleton; - public State state; - - public Singleton(Object singleton) { - this.singleton = singleton; - this.state = State.Instantiated; - } - } - - protected class InterceptorDispatcher implements MethodInterceptor { - - @Override - public Object intercept(Object object, Method method, Object[] args, MethodProxy methodProxy) throws Throwable { - ArrayList, Object>> interceptors = new ArrayList, Object>>(); - for (AnnotationInterceptor interceptor : s_interceptors) { - if (interceptor.needToIntercept(method)) { - Object obj = interceptor.interceptStart(method); - interceptors.add(new Pair, Object>((AnnotationInterceptor)interceptor, obj)); - } - } - boolean success = false; - try { - Object obj = methodProxy.invokeSuper(object, args); - success = true; - return obj; - } finally { - for (Pair, Object> interceptor : interceptors) { - if (success) { - interceptor.first().interceptComplete(method, interceptor.second()); - } else { - interceptor.first().interceptException(method, interceptor.second()); - } - } - } - } - } - - protected static class InterceptorFilter implements CallbackFilter { - @Override - public int accept(Method method) { - int index = 0; - for (int i = 2; i < s_callbacks.length; i++) { - AnnotationInterceptor interceptor = (AnnotationInterceptor)s_callbacks[i]; - if (interceptor.needToIntercept(method)) { - if (index == 0) { - index = i; - } else { - return 1; - } - } - } - - return index; - } - } -} diff --git a/utils/src/com/cloud/utils/component/PluggableService.java b/utils/src/com/cloud/utils/component/PluggableService.java index d2199394a69..f6f72a904d0 100644 --- a/utils/src/com/cloud/utils/component/PluggableService.java +++ b/utils/src/com/cloud/utils/component/PluggableService.java @@ -16,9 +16,11 @@ // under the License. package com.cloud.utils.component; +import java.util.Map; + // This interface defines methods for pluggable code within the Cloud Stack. public interface PluggableService { // The config command properties filenames that lists allowed API commands // and role masks supported by this pluggable service - String[] getPropertiesFiles(); + Map getProperties(); } diff --git a/utils/src/com/cloud/utils/component/Plugin.java b/utils/src/com/cloud/utils/component/Plugin.java deleted file mode 100755 index ffd704c7558..00000000000 --- a/utils/src/com/cloud/utils/component/Plugin.java +++ /dev/null @@ -1,64 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package com.cloud.utils.component; - -import java.util.List; - -import com.cloud.utils.Pair; - - -/** - * CloudStack uses Adapters to implement different capabilities. - * There are different Adapters such as NetworkGuru, NetworkElement, - * HypervisorGuru, DeploymentPlanner, etc. However, Adapters only - * defines what CloudStack needs from the implementation. What about - * what the Adapter itself needs, such as configurations and administrative - * operations, and what if one implementation can - * implement two different Adapters? - * - * Plugin is a CloudStack container for Adapters. It rolls the following - * capabilities into the one package for CloudStack to load at runtime. - * - REST API commands supported by the Plugin. - * - Components needed by the Plugin. - * - Adapters implemented by the Plugin. - * - Database operations - * - */ -public interface Plugin extends PluggableService { - - /** - * Retrieves the component libraries needed by this Plugin. - * ComponentLocator put these components and add them to the startup - * and shutdown processes of CloudStack. This is only needed if the - * Plugin uses ComponentLocator to inject what it needs. If the - * Plugin uses other mechanisms, then it can return null here. - * - * @return a component library that contains the components this Plugin - * contains and needs. - */ - ComponentLibrary getComponentLibrary(); - - /** - * Retrieves the list of Adapters and the interface they implement. It - * can be an empty list if the Plugin does not implement any. - * - * @return list of pairs where the first is the interface and the second - * is the adapter. - */ - List, Class>> getAdapterImplementations(); -} diff --git a/utils/src/com/cloud/utils/crypt/EncryptionSecretKeyChecker.java b/utils/src/com/cloud/utils/crypt/EncryptionSecretKeyChecker.java index 78d3200d113..a9c670d6aa9 100755 --- a/utils/src/com/cloud/utils/crypt/EncryptionSecretKeyChecker.java +++ b/utils/src/com/cloud/utils/crypt/EncryptionSecretKeyChecker.java @@ -40,108 +40,108 @@ import com.cloud.utils.exception.CloudRuntimeException; @Local(value = {SystemIntegrityChecker.class}) public class EncryptionSecretKeyChecker implements SystemIntegrityChecker { - - private static final Logger s_logger = Logger.getLogger(EncryptionSecretKeyChecker.class); - + + private static final Logger s_logger = Logger.getLogger(EncryptionSecretKeyChecker.class); + private static final String s_keyFile = "/etc/cloud/management/key"; private static final String s_envKey = "CLOUD_SECRET_KEY"; private static StandardPBEStringEncryptor s_encryptor = new StandardPBEStringEncryptor(); private static boolean s_useEncryption = false; - + @Override public void check() { - //Get encryption type from db.properties - final File dbPropsFile = PropertiesUtil.findConfigFile("db.properties"); + //Get encryption type from db.properties + final File dbPropsFile = PropertiesUtil.findConfigFile("db.properties"); final Properties dbProps = new Properties(); try { - dbProps.load(new FileInputStream(dbPropsFile)); + dbProps.load(new FileInputStream(dbPropsFile)); - final String encryptionType = dbProps.getProperty("db.cloud.encryption.type"); - - s_logger.debug("Encryption Type: "+ encryptionType); + final String encryptionType = dbProps.getProperty("db.cloud.encryption.type"); - if(encryptionType == null || encryptionType.equals("none")){ - return; - } - - s_encryptor.setAlgorithm("PBEWithMD5AndDES"); - String secretKey = null; - - SimpleStringPBEConfig stringConfig = new SimpleStringPBEConfig(); - - if(encryptionType.equals("file")){ - try { - BufferedReader in = new BufferedReader(new FileReader(s_keyFile)); - secretKey = in.readLine(); - //Check for null or empty secret key - } catch (FileNotFoundException e) { - throw new CloudRuntimeException("File containing secret key not found: "+s_keyFile, e); - } catch (IOException e) { - throw new CloudRuntimeException("Error while reading secret key from: "+s_keyFile, e); - } - - if(secretKey == null || secretKey.isEmpty()){ - throw new CloudRuntimeException("Secret key is null or empty in file "+s_keyFile); - } - - } else if(encryptionType.equals("env")){ - secretKey = System.getenv(s_envKey); - if(secretKey == null || secretKey.isEmpty()){ - throw new CloudRuntimeException("Environment variable "+s_envKey+" is not set or empty"); - } - } else if(encryptionType.equals("web")){ - ServerSocket serverSocket = null; - int port = 8097; - try { + s_logger.debug("Encryption Type: "+ encryptionType); + + if(encryptionType == null || encryptionType.equals("none")){ + return; + } + + s_encryptor.setAlgorithm("PBEWithMD5AndDES"); + String secretKey = null; + + SimpleStringPBEConfig stringConfig = new SimpleStringPBEConfig(); + + if(encryptionType.equals("file")){ + try { + BufferedReader in = new BufferedReader(new FileReader(s_keyFile)); + secretKey = in.readLine(); + //Check for null or empty secret key + } catch (FileNotFoundException e) { + throw new CloudRuntimeException("File containing secret key not found: "+s_keyFile, e); + } catch (IOException e) { + throw new CloudRuntimeException("Error while reading secret key from: "+s_keyFile, e); + } + + if(secretKey == null || secretKey.isEmpty()){ + throw new CloudRuntimeException("Secret key is null or empty in file "+s_keyFile); + } + + } else if(encryptionType.equals("env")){ + secretKey = System.getenv(s_envKey); + if(secretKey == null || secretKey.isEmpty()){ + throw new CloudRuntimeException("Environment variable "+s_envKey+" is not set or empty"); + } + } else if(encryptionType.equals("web")){ + ServerSocket serverSocket = null; + int port = 8097; + try { serverSocket = new ServerSocket(port); } catch (IOException ioex) { - throw new CloudRuntimeException("Error initializing secret key reciever", ioex); + throw new CloudRuntimeException("Error initializing secret key reciever", ioex); } - s_logger.info("Waiting for admin to send secret key on port "+port); - Socket clientSocket = null; - try { - clientSocket = serverSocket.accept(); - } catch (IOException e) { - throw new CloudRuntimeException("Accept failed on "+port); - } - PrintWriter out = new PrintWriter(clientSocket.getOutputStream(), true); - BufferedReader in = new BufferedReader(new InputStreamReader(clientSocket.getInputStream())); - String inputLine, outputLine; - if ((inputLine = in.readLine()) != null) { - secretKey = inputLine; - } - out.close(); - in.close(); - clientSocket.close(); - serverSocket.close(); - } else { - throw new CloudRuntimeException("Invalid encryption type: "+encryptionType); - } + s_logger.info("Waiting for admin to send secret key on port "+port); + Socket clientSocket = null; + try { + clientSocket = serverSocket.accept(); + } catch (IOException e) { + throw new CloudRuntimeException("Accept failed on "+port); + } + PrintWriter out = new PrintWriter(clientSocket.getOutputStream(), true); + BufferedReader in = new BufferedReader(new InputStreamReader(clientSocket.getInputStream())); + String inputLine; + if ((inputLine = in.readLine()) != null) { + secretKey = inputLine; + } + out.close(); + in.close(); + clientSocket.close(); + serverSocket.close(); + } else { + throw new CloudRuntimeException("Invalid encryption type: "+encryptionType); + } - stringConfig.setPassword(secretKey); - s_encryptor.setConfig(stringConfig); - s_useEncryption = true; + stringConfig.setPassword(secretKey); + s_encryptor.setConfig(stringConfig); + s_useEncryption = true; } catch (FileNotFoundException e) { - throw new CloudRuntimeException("File db.properties not found", e); + throw new CloudRuntimeException("File db.properties not found", e); } catch (IOException e) { - throw new CloudRuntimeException("Error while reading db.properties", e); + throw new CloudRuntimeException("Error while reading db.properties", e); } } - + public static StandardPBEStringEncryptor getEncryptor() { return s_encryptor; } - + public static boolean useEncryption(){ - return s_useEncryption; + return s_useEncryption; } - + //Initialize encryptor for migration during secret key change public static void initEncryptorForMigration(String secretKey){ - s_encryptor.setAlgorithm("PBEWithMD5AndDES"); - SimpleStringPBEConfig stringConfig = new SimpleStringPBEConfig(); - stringConfig.setPassword(secretKey); - s_encryptor.setConfig(stringConfig); - s_useEncryption = true; + s_encryptor.setAlgorithm("PBEWithMD5AndDES"); + SimpleStringPBEConfig stringConfig = new SimpleStringPBEConfig(); + stringConfig.setPassword(secretKey); + s_encryptor.setConfig(stringConfig); + s_useEncryption = true; } } diff --git a/utils/src/com/cloud/utils/crypt/EncryptionSecretKeySender.java b/utils/src/com/cloud/utils/crypt/EncryptionSecretKeySender.java index 390443768e1..2dc865cfec0 100755 --- a/utils/src/com/cloud/utils/crypt/EncryptionSecretKeySender.java +++ b/utils/src/com/cloud/utils/crypt/EncryptionSecretKeySender.java @@ -16,8 +16,6 @@ // under the License. package com.cloud.utils.crypt; -import java.io.BufferedReader; -import java.io.InputStreamReader; import java.io.PrintWriter; import java.net.InetAddress; import java.net.Socket; @@ -26,39 +24,37 @@ import com.cloud.utils.NumbersUtil; public class EncryptionSecretKeySender { - public static void main(String args[]){ - try { + public static void main(String args[]){ + try { - // Create a socket to the host - String hostname = "localhost"; - int port = 8097; - - if(args.length == 2){ - hostname = args[0]; - port = NumbersUtil.parseInt(args[1], port); - } - - - InetAddress addr = InetAddress.getByName(hostname); - Socket socket = new Socket(addr, port); - PrintWriter out = new PrintWriter(socket.getOutputStream(), true); - BufferedReader in = new BufferedReader(new InputStreamReader( - socket.getInputStream())); - java.io.BufferedReader stdin = new java.io.BufferedReader(new java.io.InputStreamReader(System.in)); - String validationWord = "cloudnine"; - String validationInput = ""; - while(!validationWord.equals(validationInput)){ - System.out.print("Enter Validation Word:"); - validationInput = stdin.readLine(); - System.out.println(); - } - System.out.print("Enter Secret Key:"); - String input = stdin.readLine(); - if (input != null) { - out.println(input); - } - } catch (Exception e) { - System.out.print("Exception while sending secret key "+e); - } - } + // Create a socket to the host + String hostname = "localhost"; + int port = 8097; + + if(args.length == 2){ + hostname = args[0]; + port = NumbersUtil.parseInt(args[1], port); + } + + + InetAddress addr = InetAddress.getByName(hostname); + Socket socket = new Socket(addr, port); + PrintWriter out = new PrintWriter(socket.getOutputStream(), true); + java.io.BufferedReader stdin = new java.io.BufferedReader(new java.io.InputStreamReader(System.in)); + String validationWord = "cloudnine"; + String validationInput = ""; + while(!validationWord.equals(validationInput)){ + System.out.print("Enter Validation Word:"); + validationInput = stdin.readLine(); + System.out.println(); + } + System.out.print("Enter Secret Key:"); + String input = stdin.readLine(); + if (input != null) { + out.println(input); + } + } catch (Exception e) { + System.out.print("Exception while sending secret key "+e); + } + } } diff --git a/utils/src/com/cloud/utils/db/GenericDao.java b/utils/src/com/cloud/utils/db/GenericDao.java index 2fae1afe43d..15d04b76a1c 100755 --- a/utils/src/com/cloud/utils/db/GenericDao.java +++ b/utils/src/com/cloud/utils/db/GenericDao.java @@ -56,7 +56,7 @@ public interface GenericDao { T findById(ID id, boolean fresh); // Finds one unique VO using uuid - T findByUuid(ID uuid); + T findByUuid(String uuid); /** * @return VO object ready to be used for update. It won't have any fields filled in. diff --git a/utils/src/com/cloud/utils/db/GenericDaoBase.java b/utils/src/com/cloud/utils/db/GenericDaoBase.java index 92e9e1c4405..880e9de22a8 100755 --- a/utils/src/com/cloud/utils/db/GenericDaoBase.java +++ b/utils/src/com/cloud/utils/db/GenericDaoBase.java @@ -915,7 +915,7 @@ public abstract class GenericDaoBase implements Gene @Override @DB(txn=false) @SuppressWarnings("unchecked") - public T findByUuid(final ID uuid) { + public T findByUuid(final String uuid) { SearchCriteria sc = createSearchCriteria(); sc.addAnd("uuid", SearchCriteria.Op.EQ, uuid); return findOneBy(sc); diff --git a/utils/src/com/cloud/utils/exception/RuntimeCloudException.java b/utils/src/com/cloud/utils/exception/RuntimeCloudException.java index 233469678df..a2de5161596 100644 --- a/utils/src/com/cloud/utils/exception/RuntimeCloudException.java +++ b/utils/src/com/cloud/utils/exception/RuntimeCloudException.java @@ -16,10 +16,10 @@ // under the License. package com.cloud.utils.exception; -import com.cloud.utils.AnnotationHelper; -import com.cloud.utils.IdentityProxy; import java.util.ArrayList; +import com.cloud.utils.AnnotationHelper; + /** * by the API response serializer. Any exceptions that are thrown by * class, which extends Exception instead of RuntimeException like this @@ -28,20 +28,22 @@ import java.util.ArrayList; public class RuntimeCloudException extends RuntimeException { - // This holds a list of uuids and their names. Add uuid:fieldname pairs - protected ArrayList idList = new ArrayList(); + private static final long serialVersionUID = 1783478684819198850L; - protected int csErrorCode; + // This holds a list of uuids and their names. Add uuid:fieldname pairs + protected ArrayList idList = new ArrayList(); - public void addProxyObject(String uuid) { - idList.add(uuid); - return; - } + protected int csErrorCode; - public RuntimeCloudException(String message) { - super(message); - setCSErrorCode(CSExceptionErrorCode.getCSErrCode(this.getClass().getName())); - } + public void addProxyObject(String uuid) { + idList.add(uuid); + return; + } + + public RuntimeCloudException(String message) { + super(message); + setCSErrorCode(CSExceptionErrorCode.getCSErrCode(this.getClass().getName())); + } public RuntimeCloudException(String message, Throwable cause) { super(message, cause); @@ -49,28 +51,28 @@ public class RuntimeCloudException extends RuntimeException { } public void addProxyObject(Object voObj, Long id, String idFieldName) { - // Get the VO object's table name. - String tablename = AnnotationHelper.getTableName(voObj); - if (tablename != null) { - addProxyObject(tablename, id, idFieldName); - } - return; + // Get the VO object's table name. + String tablename = AnnotationHelper.getTableName(voObj); + if (tablename != null) { + addProxyObject(tablename, id, idFieldName); + } + return; } - public RuntimeCloudException() { - super(); - setCSErrorCode(CSExceptionErrorCode.getCSErrCode(this.getClass().getName())); - } + public RuntimeCloudException() { + super(); + setCSErrorCode(CSExceptionErrorCode.getCSErrCode(this.getClass().getName())); + } - public ArrayList getIdProxyList() { - return idList; - } + public ArrayList getIdProxyList() { + return idList; + } - public void setCSErrorCode(int cserrcode) { - this.csErrorCode = cserrcode; - } + public void setCSErrorCode(int cserrcode) { + this.csErrorCode = cserrcode; + } - public int getCSErrorCode() { - return this.csErrorCode; - } + public int getCSErrorCode() { + return this.csErrorCode; + } } diff --git a/utils/src/com/cloud/utils/fsm/FiniteState2.java b/utils/src/com/cloud/utils/fsm/FiniteState2.java index 585521d4bf3..0147ba45786 100755 --- a/utils/src/com/cloud/utils/fsm/FiniteState2.java +++ b/utils/src/com/cloud/utils/fsm/FiniteState2.java @@ -22,7 +22,7 @@ import java.util.Set; public interface FiniteState2 { - StateMachine2 getStateMachine(); + StateMachine2> getStateMachine(); T getNextState(ChangeEvent e) throws NoTransitionException; diff --git a/utils/src/com/cloud/utils/log/CglibThrowableRenderer.java b/utils/src/com/cloud/utils/log/CglibThrowableRenderer.java index 06cd0c36906..83c1dce4a52 100644 --- a/utils/src/com/cloud/utils/log/CglibThrowableRenderer.java +++ b/utils/src/com/cloud/utils/log/CglibThrowableRenderer.java @@ -17,7 +17,6 @@ package com.cloud.utils.log; import java.io.PrintWriter; -import java.lang.reflect.Method; import java.util.ArrayList; import org.apache.log4j.spi.ThrowableRenderer; @@ -35,26 +34,11 @@ import org.apache.log4j.spi.ThrowableRenderer; * */ public class CglibThrowableRenderer implements ThrowableRenderer { - /** - * Throwable.getStackTrace() method. - */ - private Method getStackTraceMethod; - /** - * StackTraceElement.getClassName() method. - */ - private Method getClassNameMethod; - /** * Construct new instance. */ public CglibThrowableRenderer() { - try { - Class[] noArgs = null; - getStackTraceMethod = Throwable.class.getMethod("getStackTrace", noArgs); - Class ste = Class.forName("java.lang.StackTraceElement"); - getClassNameMethod = ste.getMethod("getClassName", noArgs); - } catch (Exception ex) { - } + super(); } @Override @@ -94,24 +78,4 @@ public class CglibThrowableRenderer implements ThrowableRenderer { return null; } } - - /** - * Find class given class name. - * - * @param className class name, may not be null. - * @return class, will not be null. - * @throws ClassNotFoundException thrown if class can not be found. - */ - private Class findClass(final String className) throws ClassNotFoundException { - try { - return Thread.currentThread().getContextClassLoader().loadClass(className); - } catch (ClassNotFoundException e) { - try { - return Class.forName(className); - } catch (ClassNotFoundException e1) { - return getClass().getClassLoader().loadClass(className); - } - } - } - } diff --git a/utils/src/com/cloud/utils/net/MacAddress.java b/utils/src/com/cloud/utils/net/MacAddress.java index f81127c6660..15350c8b4d3 100755 --- a/utils/src/com/cloud/utils/net/MacAddress.java +++ b/utils/src/com/cloud/utils/net/MacAddress.java @@ -60,16 +60,16 @@ public class MacAddress { StringBuilder buff = new StringBuilder(); Formatter formatter = new Formatter(buff); formatter.format("%02x%s%02x%s%02x%s%02x%s%02x%s%02x", - _addr >> 40 & 0xff, separator, - _addr >> 32 & 0xff, separator, - _addr >> 24 & 0xff, separator, - _addr >> 16 & 0xff, separator, - _addr >> 8 & 0xff, separator, - _addr & 0xff); + _addr >> 40 & 0xff, separator, + _addr >> 32 & 0xff, separator, + _addr >> 24 & 0xff, separator, + _addr >> 16 & 0xff, separator, + _addr >> 8 & 0xff, separator, + _addr & 0xff); return buff.toString(); - + /* - + String str = Long.toHexString(_addr); for (int i = str.length() - 1; i >= 0; i--) { @@ -79,11 +79,11 @@ public class MacAddress { } } return buff.reverse().toString(); - */ + */ } @Override - public String toString() { + public String toString() { return toString(":"); } @@ -102,7 +102,7 @@ public class MacAddress { } else if (osname.startsWith("Solaris") || osname.startsWith("SunOS")) { // Solaris code must appear before the generic code String hostName = MacAddress.getFirstLineOfCommand(new String[] { "uname", - "-n"}); + "-n"}); if (hostName != null) { p = Runtime.getRuntime().exec(new String[] { "/usr/sbin/arp", hostName}, null); } @@ -163,7 +163,7 @@ public class MacAddress { clockSeqAndNode |= (long) (Math.random() * 0x7FFFFFFF); } } - + s_address = new MacAddress(clockSeqAndNode); } @@ -262,9 +262,6 @@ public class MacAddress { System.out.println("addr in char is " + addr.toString(":")); } -private static final char[] DIGITS = { '0', '1', '2', '3', '4', '5', '6', - '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; - /** * Parses a long from a hex encoded number. This method will skip * all characters that are not 0-9 and a-f (the String is lower cased first). diff --git a/utils/src/com/cloud/utils/net/NetUtils.java b/utils/src/com/cloud/utils/net/NetUtils.java index c456cdcca8e..005fe239c49 100755 --- a/utils/src/com/cloud/utils/net/NetUtils.java +++ b/utils/src/com/cloud/utils/net/NetUtils.java @@ -17,15 +17,12 @@ package com.cloud.utils.net; import java.io.BufferedReader; -import java.io.File; import java.io.InputStreamReader; import java.lang.reflect.Array; import java.net.InetAddress; import java.net.InterfaceAddress; import java.net.NetworkInterface; import java.net.SocketException; -import java.net.URISyntaxException; -import java.net.URL; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Formatter; @@ -39,7 +36,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.log4j.Logger; -import org.apache.log4j.xml.DOMConfigurator; import com.cloud.utils.IteratorUtil; import com.cloud.utils.Pair; @@ -681,7 +677,7 @@ public class NetUtils { if (avoid.size() >= range) { return -1; } - + //Reduce the range by the size of the avoid set //e.g., cidr = 192.168.10.0, size = /24, avoid = 192.168.10.1, 192.168.10.20, 192.168.10.254 // range = 2^8 - 1 - 3 = 252 @@ -690,9 +686,9 @@ public class NetUtils { long ip = startIp + next; for (Long avoidable : avoid) { if (ip >= avoidable) { - ip++; + ip++; } else { - break; + break; } } @@ -796,7 +792,7 @@ public class NetUtils { long shift = 32 - cidrBLong[1]; return ((cidrALong[0] >> shift) == (cidrBLong[0] >> shift)); } - + public static Long[] cidrToLong(String cidr) { if (cidr == null || cidr.isEmpty()) { return null; @@ -960,26 +956,6 @@ public class NetUtils { return Integer.toString(portRange[0]) + ":" + Integer.toString(portRange[1]); } - // test only - private static void configLog4j() { - URL configUrl = System.class.getResource("/conf/log4j-cloud.xml"); - if (configUrl != null) { - System.out.println("Configure log4j using log4j-cloud.xml"); - - try { - File file = new File(configUrl.toURI()); - - System.out.println("Log4j configuration from : " + file.getAbsolutePath()); - DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); - } catch (URISyntaxException e) { - System.out.println("Unable to convert log4j configuration Url to URI"); - } - // DOMConfigurator.configure(configUrl); - } else { - System.out.println("Configure log4j with default properties"); - } - } - public static boolean verifyDomainNameLabel(String hostName, boolean isHostName) { // must be between 1 and 63 characters long and may contain only the ASCII letters 'a' through 'z' (in a @@ -1068,7 +1044,7 @@ public class NetUtils { return true; } - + public static boolean isNetworksOverlap(String cidrA, String cidrB) { Long[] cidrALong = cidrToLong(cidrA); Long[] cidrBLong = cidrToLong(cidrB); @@ -1122,7 +1098,7 @@ public class NetUtils { } return true; } - + public static boolean validateIcmpType(long icmpType) { //Source - http://www.erg.abdn.ac.uk/~gorry/course/inet-pages/icmp-code.html if(!(icmpType >=0 && icmpType <=255)) { @@ -1131,15 +1107,15 @@ public class NetUtils { } return true; } - + public static boolean validateIcmpCode(long icmpCode) { - + //Source - http://www.erg.abdn.ac.uk/~gorry/course/inet-pages/icmp-code.html if(!(icmpCode >=0 && icmpCode <=15)) { s_logger.warn("Icmp code should be within 0-15 range"); return false; } - + return true; } } diff --git a/utils/src/com/cloud/utils/net/NfsUtils.java b/utils/src/com/cloud/utils/net/NfsUtils.java index 7318383e9ed..19ff05594a0 100644 --- a/utils/src/com/cloud/utils/net/NfsUtils.java +++ b/utils/src/com/cloud/utils/net/NfsUtils.java @@ -21,18 +21,17 @@ import java.net.URI; import java.net.URISyntaxException; public class NfsUtils { - + public static String url2Mount(String urlStr) throws URISyntaxException { URI url; url = new URI(urlStr); - int port = url.getPort(); return url.getHost() + ":" + url.getPath(); } - + public static String uri2Mount(URI uri) { return uri.getHost() + ":" + uri.getPath(); } - + public static String url2PathSafeString(String urlStr) { String safe = urlStr.replace(File.separatorChar, '-'); safe = safe.replace("?", ""); @@ -41,13 +40,13 @@ public class NfsUtils { safe = safe.replace("/", ""); return safe; } - + public static String getHostPart(String nfsPath) { - String toks[] = nfsPath.split(":"); - if (toks != null && toks.length == 2) { - return toks[0]; - } - return null; + String toks[] = nfsPath.split(":"); + if (toks != null && toks.length == 2) { + return toks[0]; + } + return null; } } diff --git a/utils/src/com/cloud/utils/nio/HandlerFactory.java b/utils/src/com/cloud/utils/nio/HandlerFactory.java index 0dcc83fb442..9cf218d99eb 100755 --- a/utils/src/com/cloud/utils/nio/HandlerFactory.java +++ b/utils/src/com/cloud/utils/nio/HandlerFactory.java @@ -16,7 +16,6 @@ // under the License. package com.cloud.utils.nio; -import java.util.List; /** * WorkerFactory creates and selects workers. diff --git a/utils/src/com/cloud/utils/nio/Link.java b/utils/src/com/cloud/utils/nio/Link.java index 3e3da6c11d5..4b041f5b265 100755 --- a/utils/src/com/cloud/utils/nio/Link.java +++ b/utils/src/com/cloud/utils/nio/Link.java @@ -16,18 +16,15 @@ // under the License. package com.cloud.utils.nio; -import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import java.nio.channels.Channels; import java.nio.channels.ClosedChannelException; import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; -import java.nio.channels.WritableByteChannel; import java.security.KeyStore; import java.util.concurrent.ConcurrentLinkedQueue; @@ -35,10 +32,10 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLEngineResult; +import javax.net.ssl.SSLEngineResult.HandshakeStatus; import javax.net.ssl.SSLSession; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; -import javax.net.ssl.SSLEngineResult.HandshakeStatus; import org.apache.log4j.Logger; @@ -48,7 +45,7 @@ import com.cloud.utils.PropertiesUtil; */ public class Link { private static final Logger s_logger = Logger.getLogger(Link.class); - + private final InetSocketAddress _addr; private final NioConnection _connection; private SelectionKey _key; @@ -58,7 +55,7 @@ public class Link { private Object _attach; private boolean _readHeader; private boolean _gotFollowingPacket; - + private SSLEngine _sslEngine; public Link(InetSocketAddress addr, NioConnection connection) { @@ -71,23 +68,23 @@ public class Link { _readHeader = true; _gotFollowingPacket = false; } - + public Link (Link link) { this(link._addr, link._connection); } - + public Object attachment() { return _attach; } - + public void attach(Object attach) { _attach = attach; } - + public void setKey(SelectionKey key) { _key = key; } - + public void setSSLEngine(SSLEngine sslEngine) { _sslEngine = sslEngine; } @@ -105,19 +102,19 @@ public class Link { synchronized(buff) { buff.clear(); buff.limit(4); - + while (buff.hasRemaining()) { if (ch.read(buff) == -1) { throw new IOException("Connection closed with -1 on reading size."); } } - + buff.flip(); - + int length = buff.getInt(); ByteArrayOutputStream output = new ByteArrayOutputStream(length); WritableByteChannel outCh = Channels.newChannel(output); - + int count = 0; while (count < length) { buff.clear(); @@ -129,19 +126,19 @@ public class Link { buff.flip(); outCh.write(buff); } - + return output.toByteArray(); } } - */ - + */ + private static void doWrite(SocketChannel ch, ByteBuffer[] buffers, SSLEngine sslEngine) throws IOException { SSLSession sslSession = sslEngine.getSession(); ByteBuffer pkgBuf = ByteBuffer.allocate(sslSession.getPacketBufferSize() + 40); SSLEngineResult engResult; ByteBuffer headBuf = ByteBuffer.allocate(4); - + int totalLen = 0; for (ByteBuffer buffer : buffers) { totalLen += buffer.limit(); @@ -157,7 +154,7 @@ public class Link { engResult.getStatus() != SSLEngineResult.Status.OK) { throw new IOException("SSL: SSLEngine return bad result! " + engResult); } - + processedLen = 0; for (ByteBuffer buffer : buffers) { processedLen += buffer.position(); @@ -189,7 +186,7 @@ public class Link { } } } - + /** * write method to write to a socket. This method writes to completion so * it doesn't follow the nio standard. We use this to make sure we write @@ -204,21 +201,21 @@ public class Link { doWrite(ch, buffers, sslEngine); } } - + /* SSL has limitation of 16k, we may need to split packets. 18000 is 16k + some extra SSL informations */ protected static final int MAX_SIZE_PER_PACKET = 18000; protected static final int HEADER_FLAG_FOLLOWING = 0x10000; - + public byte[] read(SocketChannel ch) throws IOException { if (_readHeader) { // Start of a packet if (_readBuffer.position() == 0) { _readBuffer.limit(4); } - + if (ch.read(_readBuffer) == -1) { throw new IOException("Connection closed with -1 on reading size."); } - + if (_readBuffer.hasRemaining()) { s_logger.trace("Need to read the rest of the packet length"); return null; @@ -229,24 +226,24 @@ public class Link { if (s_logger.isTraceEnabled()) { s_logger.trace("Packet length is " + readSize); } - + if (readSize > MAX_SIZE_PER_PACKET) { - throw new IOException("Wrong packet size: " + readSize); + throw new IOException("Wrong packet size: " + readSize); } - + if (!_gotFollowingPacket) { _plaintextBuffer = ByteBuffer.allocate(2000); } - + if ((header & HEADER_FLAG_FOLLOWING) != 0) { _gotFollowingPacket = true; } else { _gotFollowingPacket = false; } - + _readBuffer.clear(); _readHeader = false; - + if (_readBuffer.capacity() < readSize) { if (s_logger.isTraceEnabled()) { s_logger.trace("Resizing the byte buffer from " + _readBuffer.capacity()); @@ -255,18 +252,18 @@ public class Link { } _readBuffer.limit(readSize); } - + if (ch.read(_readBuffer) == -1) { throw new IOException("Connection closed with -1 on read."); } - + if (_readBuffer.hasRemaining()) { // We're not done yet. if (s_logger.isTraceEnabled()) { s_logger.trace("Still has " + _readBuffer.remaining()); } return null; } - + _readBuffer.flip(); ByteBuffer appBuf; @@ -287,7 +284,7 @@ public class Link { if (remaining == _readBuffer.remaining()) { throw new IOException("SSL: Unable to unwrap received data! still remaining " + remaining + "bytes!"); } - + appBuf.flip(); if (_plaintextBuffer.remaining() < appBuf.limit()) { // We need to expand _plaintextBuffer for more data @@ -301,10 +298,10 @@ public class Link { s_logger.trace("Done with packet: " + appBuf.limit()); } } - + _readBuffer.clear(); _readHeader = true; - + if (!_gotFollowingPacket) { _plaintextBuffer.flip(); byte[] result = new byte[_plaintextBuffer.limit()]; @@ -317,15 +314,15 @@ public class Link { return null; } } - + public void send(byte[] data) throws ClosedChannelException { send(data, false); } - + public void send(byte[] data, boolean close) throws ClosedChannelException { send(new ByteBuffer[] { ByteBuffer.wrap(data) }, close); } - + public void send(ByteBuffer[] data, boolean close) throws ClosedChannelException { ByteBuffer[] item = new ByteBuffer[data.length + 1]; int remaining = 0; @@ -333,15 +330,15 @@ public class Link { remaining += data[i].remaining(); item[i + 1] = data[i]; } - + item[0] = ByteBuffer.allocate(4); item[0].putInt(remaining); item[0].flip(); - + if (s_logger.isTraceEnabled()) { s_logger.trace("Sending packet of length " + remaining); } - + _writeQueue.add(item); if (close) { _writeQueue.add(new ByteBuffer[0]); @@ -353,17 +350,17 @@ public class Link { _connection.change(SelectionKey.OP_WRITE, _key, null); } } - + public void send(ByteBuffer[] data) throws ClosedChannelException { send(data, false); } - + public synchronized void close() { if (_key != null) { _connection.close(_key); } } - + public boolean write(SocketChannel ch) throws IOException { ByteBuffer[] data = null; while ((data = _writeQueue.poll()) != null) { @@ -381,26 +378,26 @@ public class Link { } return false; } - + public InetSocketAddress getSocketAddress() { return _addr; } - + public String getIpAddress() { return _addr.getAddress().toString(); } - + public synchronized void terminated() { _key = null; } - + public synchronized void schedule(Task task) throws ClosedChannelException { if (_key == null) { throw new ClosedChannelException(); } _connection.scheduleTask(task); } - + public static SSLContext initSSLContext(boolean isClient) throws Exception { InputStream stream; SSLContext sslContext = null; @@ -408,42 +405,42 @@ public class Link { TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509"); KeyStore ks = KeyStore.getInstance("JKS"); TrustManager[] tms; - + if (!isClient) { - char[] passphrase = "vmops.com".toCharArray(); - File confFile= PropertiesUtil.findConfigFile("db.properties"); - /* This line may throw a NPE, but that's due to fail to find db.properities, meant some bugs in the other places */ - String confPath = confFile.getParent(); - String keystorePath = confPath + "/cloud.keystore"; - if (new File(keystorePath).exists()) { - stream = new FileInputStream(keystorePath); - } else { - s_logger.warn("SSL: Fail to find the generated keystore. Loading fail-safe one to continue."); - stream = NioConnection.class.getResourceAsStream("/cloud.keystore"); - } - ks.load(stream, passphrase); - stream.close(); - kmf.init(ks, passphrase); - tmf.init(ks); - tms = tmf.getTrustManagers(); + char[] passphrase = "vmops.com".toCharArray(); + File confFile= PropertiesUtil.findConfigFile("db.properties"); + /* This line may throw a NPE, but that's due to fail to find db.properities, meant some bugs in the other places */ + String confPath = confFile.getParent(); + String keystorePath = confPath + "/cloud.keystore"; + if (new File(keystorePath).exists()) { + stream = new FileInputStream(keystorePath); + } else { + s_logger.warn("SSL: Fail to find the generated keystore. Loading fail-safe one to continue."); + stream = NioConnection.class.getResourceAsStream("/cloud.keystore"); + } + ks.load(stream, passphrase); + stream.close(); + kmf.init(ks, passphrase); + tmf.init(ks); + tms = tmf.getTrustManagers(); } else { - ks.load(null, null); - kmf.init(ks, null); - tms = new TrustManager[1]; - tms[0] = new TrustAllManager(); + ks.load(null, null); + kmf.init(ks, null); + tms = new TrustManager[1]; + tms[0] = new TrustAllManager(); } - + sslContext = SSLContext.getInstance("TLS"); sslContext.init(kmf.getKeyManagers(), tms, null); if (s_logger.isTraceEnabled()) { - s_logger.trace("SSL: SSLcontext has been initialized"); + s_logger.trace("SSL: SSLcontext has been initialized"); } return sslContext; } public static void doHandshake(SocketChannel ch, SSLEngine sslEngine, - boolean isClient) throws IOException { + boolean isClient) throws IOException { if (s_logger.isTraceEnabled()) { s_logger.trace("SSL: begin Handshake, isClient: " + isClient); } @@ -452,13 +449,13 @@ public class Link { SSLSession sslSession = sslEngine.getSession(); HandshakeStatus hsStatus; ByteBuffer in_pkgBuf = - ByteBuffer.allocate(sslSession.getPacketBufferSize() + 40); + ByteBuffer.allocate(sslSession.getPacketBufferSize() + 40); ByteBuffer in_appBuf = - ByteBuffer.allocate(sslSession.getApplicationBufferSize() + 40); + ByteBuffer.allocate(sslSession.getApplicationBufferSize() + 40); ByteBuffer out_pkgBuf = - ByteBuffer.allocate(sslSession.getPacketBufferSize() + 40); + ByteBuffer.allocate(sslSession.getPacketBufferSize() + 40); ByteBuffer out_appBuf = - ByteBuffer.allocate(sslSession.getApplicationBufferSize() + 40); + ByteBuffer.allocate(sslSession.getApplicationBufferSize() + 40); int count; if (isClient) { @@ -498,7 +495,7 @@ public class Link { } engResult = sslEngine.unwrap(in_pkgBuf, in_appBuf); ByteBuffer tmp_pkgBuf = - ByteBuffer.allocate(sslSession.getPacketBufferSize() + 40); + ByteBuffer.allocate(sslSession.getPacketBufferSize() + 40); int loop_count = 0; while (engResult.getStatus() == SSLEngineResult.Status.BUFFER_UNDERFLOW) { // The client is too slow? Cut it and let it reconnect @@ -515,13 +512,13 @@ public class Link { throw new IOException("Connection closed with -1 on reading size."); } tmp_pkgBuf.flip(); - + in_pkgBuf.mark(); in_pkgBuf.position(in_pkgBuf.limit()); in_pkgBuf.limit(in_pkgBuf.limit() + tmp_pkgBuf.limit()); in_pkgBuf.put(tmp_pkgBuf); in_pkgBuf.reset(); - + in_appBuf.clear(); engResult = sslEngine.unwrap(in_pkgBuf, in_appBuf); loop_count ++; diff --git a/utils/src/com/cloud/utils/security/CertificateHelper.java b/utils/src/com/cloud/utils/security/CertificateHelper.java index 327734ac133..8344d725bc6 100644 --- a/utils/src/com/cloud/utils/security/CertificateHelper.java +++ b/utils/src/com/cloud/utils/security/CertificateHelper.java @@ -19,7 +19,6 @@ package com.cloud.utils.security; import java.io.BufferedInputStream; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.security.Key; import java.security.KeyFactory; @@ -38,72 +37,72 @@ import org.apache.commons.codec.binary.Base64; import com.cloud.utils.Ternary; public class CertificateHelper { - public static byte[] buildAndSaveKeystore(String alias, String cert, String privateKey, String storePassword) throws KeyStoreException, CertificateException, - NoSuchAlgorithmException, InvalidKeySpecException, IOException { - KeyStore ks = buildKeystore(alias, cert, privateKey, storePassword); - - ByteArrayOutputStream os = new ByteArrayOutputStream(); - ks.store(os, storePassword != null ? storePassword.toCharArray() : null); - os.close(); - return os.toByteArray(); - } - - public static byte[] buildAndSaveKeystore(List> certs, String storePassword) throws KeyStoreException, NoSuchAlgorithmException, CertificateException, IOException, InvalidKeySpecException { - KeyStore ks = KeyStore.getInstance("JKS"); - ks.load(null, storePassword != null ? storePassword.toCharArray() : null); + public static byte[] buildAndSaveKeystore(String alias, String cert, String privateKey, String storePassword) throws KeyStoreException, CertificateException, + NoSuchAlgorithmException, InvalidKeySpecException, IOException { + KeyStore ks = buildKeystore(alias, cert, privateKey, storePassword); - //name,cert,key - for (Ternary cert : certs) { - if (cert.third() == null) { - Certificate c = buildCertificate(cert.second()); - ks.setCertificateEntry(cert.first(), c); - } else { - Certificate[] c = new Certificate[certs.size()]; - int i = certs.size(); - for (Ternary ct : certs) { - c[i - 1] = buildCertificate(ct.second()); - i--; - } - ks.setKeyEntry(cert.first(), buildPrivateKey(cert.third()), storePassword != null ? storePassword.toCharArray() : null, c ); - } - } - - ByteArrayOutputStream os = new ByteArrayOutputStream(); - ks.store(os, storePassword != null ? storePassword.toCharArray() : null); - os.close(); - return os.toByteArray(); - } - - public static KeyStore loadKeystore(byte[] ksData, String storePassword) throws KeyStoreException, CertificateException, NoSuchAlgorithmException, IOException { - assert(ksData != null); - KeyStore ks = KeyStore.getInstance("JKS"); - ks.load(new ByteArrayInputStream(ksData), storePassword != null ? storePassword.toCharArray() : null); + ByteArrayOutputStream os = new ByteArrayOutputStream(); + ks.store(os, storePassword != null ? storePassword.toCharArray() : null); + os.close(); + return os.toByteArray(); + } - return ks; - } - - public static KeyStore buildKeystore(String alias, String cert, String privateKey, String storePassword) throws KeyStoreException, CertificateException, - NoSuchAlgorithmException, InvalidKeySpecException, IOException { - - KeyStore ks = KeyStore.getInstance("JKS"); - ks.load(null, storePassword != null ? storePassword.toCharArray() : null); - Certificate[] certs = new Certificate[1]; - certs[0] = buildCertificate(cert); - ks.setKeyEntry(alias, buildPrivateKey(privateKey), storePassword != null ? storePassword.toCharArray() : null, certs ); - return ks; - } + public static byte[] buildAndSaveKeystore(List> certs, String storePassword) throws KeyStoreException, NoSuchAlgorithmException, CertificateException, IOException, InvalidKeySpecException { + KeyStore ks = KeyStore.getInstance("JKS"); + ks.load(null, storePassword != null ? storePassword.toCharArray() : null); - public static Certificate buildCertificate(String content) throws CertificateException { - assert(content != null); - - BufferedInputStream bis = new BufferedInputStream(new ByteArrayInputStream(content.getBytes())); - CertificateFactory cf = CertificateFactory.getInstance("X.509"); - return cf.generateCertificate(bis); - } + //name,cert,key + for (Ternary cert : certs) { + if (cert.third() == null) { + Certificate c = buildCertificate(cert.second()); + ks.setCertificateEntry(cert.first(), c); + } else { + Certificate[] c = new Certificate[certs.size()]; + int i = certs.size(); + for (Ternary ct : certs) { + c[i - 1] = buildCertificate(ct.second()); + i--; + } + ks.setKeyEntry(cert.first(), buildPrivateKey(cert.third()), storePassword != null ? storePassword.toCharArray() : null, c ); + } + } - public static Key buildPrivateKey(String base64EncodedKeyContent) throws NoSuchAlgorithmException, InvalidKeySpecException, IOException { - KeyFactory kf = KeyFactory.getInstance("RSA"); - PKCS8EncodedKeySpec keysp = new PKCS8EncodedKeySpec (Base64.decodeBase64(base64EncodedKeyContent)); - return kf.generatePrivate (keysp); - } + ByteArrayOutputStream os = new ByteArrayOutputStream(); + ks.store(os, storePassword != null ? storePassword.toCharArray() : null); + os.close(); + return os.toByteArray(); + } + + public static KeyStore loadKeystore(byte[] ksData, String storePassword) throws KeyStoreException, CertificateException, NoSuchAlgorithmException, IOException { + assert(ksData != null); + KeyStore ks = KeyStore.getInstance("JKS"); + ks.load(new ByteArrayInputStream(ksData), storePassword != null ? storePassword.toCharArray() : null); + + return ks; + } + + public static KeyStore buildKeystore(String alias, String cert, String privateKey, String storePassword) throws KeyStoreException, CertificateException, + NoSuchAlgorithmException, InvalidKeySpecException, IOException { + + KeyStore ks = KeyStore.getInstance("JKS"); + ks.load(null, storePassword != null ? storePassword.toCharArray() : null); + Certificate[] certs = new Certificate[1]; + certs[0] = buildCertificate(cert); + ks.setKeyEntry(alias, buildPrivateKey(privateKey), storePassword != null ? storePassword.toCharArray() : null, certs ); + return ks; + } + + public static Certificate buildCertificate(String content) throws CertificateException { + assert(content != null); + + BufferedInputStream bis = new BufferedInputStream(new ByteArrayInputStream(content.getBytes())); + CertificateFactory cf = CertificateFactory.getInstance("X.509"); + return cf.generateCertificate(bis); + } + + public static Key buildPrivateKey(String base64EncodedKeyContent) throws NoSuchAlgorithmException, InvalidKeySpecException, IOException { + KeyFactory kf = KeyFactory.getInstance("RSA"); + PKCS8EncodedKeySpec keysp = new PKCS8EncodedKeySpec (Base64.decodeBase64(base64EncodedKeyContent)); + return kf.generatePrivate (keysp); + } } diff --git a/utils/test/com/cloud/utils/component/MockComponentLocator.java b/utils/test/com/cloud/utils/component/MockComponentLocator.java deleted file mode 100755 index d95d2629d06..00000000000 --- a/utils/test/com/cloud/utils/component/MockComponentLocator.java +++ /dev/null @@ -1,121 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.utils.component; - -import java.io.Serializable; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -import net.sf.cglib.proxy.Callback; -import net.sf.cglib.proxy.NoOp; - -import com.cloud.utils.Pair; -import com.cloud.utils.db.DatabaseCallback; -import com.cloud.utils.db.DatabaseCallbackFilter; -import com.cloud.utils.db.GenericDao; - -/** - * defining mock components. - */ -public class MockComponentLocator extends LegacyComponentLocator { - MockComponentLibrary _library = new MockComponentLibrary(); - - public MockComponentLocator(String server) { - super(server); - } - - public ComponentInfo> addDao(String name, Class> dao) { - return _library.addDao(name, dao); - } - - public ComponentInfo addManager(String name, Class manager) { - return _library.addManager(name, manager); - } - - public ComponentInfo addOneAdapter(Class interphace, String name, Class adapterClass) { - return _library.addOneAdapter(interphace, name, adapterClass); - } - - public List> addAdapterChain(Class interphace, List>> adapters) { - return _library.addAdapterChain(interphace, adapters); - } - - public ComponentInfo addService(String name, Class serviceInterphace, Class service) { - return _library.addService(name, serviceInterphace, service); - } - - @Override - protected Pair>>> parse2(String filename) { - Pair>>> result = new Pair>>>(new XmlHandler("fake"), new HashMap>>()); - _daoMap = new LinkedHashMap>>(); - _managerMap = new LinkedHashMap>(); - _checkerMap = new LinkedHashMap>(); - _adapterMap = new HashMap>(); - _pluginsMap = new HashMap>(); - _factories = new HashMap, Class>(); - _daoMap.putAll(_library.getDaos()); - _managerMap.putAll(_library.getManagers()); - result.second().putAll(_library.getAdapters()); - _factories.putAll(_library.getFactories()); - _pluginsMap.putAll(_library.getPluggableServices()); - return result; - } - - public void makeActive(InterceptorLibrary interceptors) { - s_singletons.clear(); - s_locators.clear(); - s_factories.clear(); - s_callbacks = new Callback[] { NoOp.INSTANCE, new DatabaseCallback()}; - s_callbackFilter = new DatabaseCallbackFilter(); - s_interceptors.clear(); - if (interceptors != null) { - resetInterceptors(interceptors); - } - s_tl.set(this); - parse("fake file"); - } - - protected class MockComponentLibrary extends ComponentLibraryBase implements ComponentLibrary { - - @Override - public Map>> getAdapters() { - return _adapters; - } - - @Override - public Map, Class> getFactories() { - return new HashMap, Class>(); - } - - @Override - public Map>> getDaos() { - return _daos; - } - - @Override - public Map> getManagers() { - return _managers; - } - - @Override - public Map> getPluggableServices() { - return _pluggableServices; - } - } -} diff --git a/utils/test/com/cloud/utils/db/TransactionTest.java b/utils/test/com/cloud/utils/db/TransactionTest.java index 96d31b40597..b952be2c28b 100644 --- a/utils/test/com/cloud/utils/db/TransactionTest.java +++ b/utils/test/com/cloud/utils/db/TransactionTest.java @@ -26,7 +26,7 @@ import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; -import com.cloud.utils.component.ComponentLocator; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CloudRuntimeException; /** @@ -34,7 +34,7 @@ import com.cloud.utils.exception.CloudRuntimeException; * all its testcases to set up a test db table, and then tear down these test db artifacts after all testcases are run. * * @author Min Chen - * + * */ public class TransactionTest { @@ -76,7 +76,7 @@ public class TransactionTest { * that the same db connection is reused rather than acquiring a new one each time in typical transaction model. */ public void testUserManagedConnection() { - DbTestDao testDao = ComponentLocator.inject(DbTestDao.class); + DbTestDao testDao = ComponentContext.inject(DbTestDao.class); Transaction txn = Transaction.open("SingleConnectionThread"); Connection conn = null; try { @@ -115,7 +115,7 @@ public class TransactionTest { * This test is simulating ClusterHeartBeat process, where the same transaction and db connection is reused. */ public void testTransactionReuse() { - DbTestDao testDao = ComponentLocator.inject(DbTestDao.class); + DbTestDao testDao = ComponentContext.inject(DbTestDao.class); // acquire a db connection and keep it Connection conn = null; try { diff --git a/utils/test/com/cloud/utils/log/CglibThrowableRendererTest.java b/utils/test/com/cloud/utils/log/CglibThrowableRendererTest.java index 5a9501dcc9c..59926f83e7c 100644 --- a/utils/test/com/cloud/utils/log/CglibThrowableRendererTest.java +++ b/utils/test/com/cloud/utils/log/CglibThrowableRendererTest.java @@ -18,14 +18,21 @@ package com.cloud.utils.log; import junit.framework.TestCase; -import org.apache.log4j.Logger; +import org.apache.log4j.*; -import com.cloud.utils.component.ComponentLocator; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.log4j.spi.RootLogger; +import org.apache.log4j.spi.ThrowableRenderer; + +import java.io.CharArrayWriter; +import java.io.Writer; public class CglibThrowableRendererTest extends TestCase { + static Logger another = Logger.getLogger("TEST"); + private final static Logger s_logger = Logger.getLogger(CglibThrowableRendererTest.class); public static class Test { @DB @@ -48,13 +55,40 @@ public class CglibThrowableRendererTest extends TestCase { } } } - + + private Logger getAlternateLogger(Writer writer, ThrowableRenderer renderer) { + Hierarchy hierarchy = new Hierarchy(new RootLogger(Level.INFO)); + if (renderer != null) { + hierarchy.setThrowableRenderer(renderer); + } + Logger alternateRoot = hierarchy.getRootLogger(); + alternateRoot.addAppender(new WriterAppender(new SimpleLayout(), writer)); + return alternateRoot; + } + public void testException() { - Test test = ComponentLocator.inject(Test.class); + Writer w = new CharArrayWriter(); + Logger alt = getAlternateLogger(w, null); + + Test test = ComponentContext.inject(Test.class); try { test.exception(); } catch (Exception e) { - s_logger.warn("exception caught", e); + alt.warn("exception caught", e); } + // first check that we actually have some call traces containing "" + assertTrue(w.toString().contains("")); + + w = new CharArrayWriter(); + alt = getAlternateLogger(w, new CglibThrowableRenderer()); + + try { + test.exception(); + } catch (Exception e) { + alt.warn("exception caught", e); + } + // then we check that CglibThrowableRenderer indeed remove those occurrences + assertFalse(w.toString().contains("")); + } } diff --git a/utils/test/com/cloud/utils/testcase/ComponentTestCase.java b/utils/test/com/cloud/utils/testcase/ComponentTestCase.java deleted file mode 100644 index 6fe7af10200..00000000000 --- a/utils/test/com/cloud/utils/testcase/ComponentTestCase.java +++ /dev/null @@ -1,44 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.utils.testcase; - -import java.lang.annotation.Annotation; - -import com.cloud.utils.component.LegacyComponentLocator; - -public class ComponentTestCase extends Log4jEnabledTestCase { - @Override - protected void setUp() { - super.setUp(); - - Annotation[] annotations = getClass().getAnnotations(); - if(annotations != null) { - for(Annotation annotation : annotations) { - if(annotation instanceof ComponentSetup) { - LegacyComponentLocator.getLocator( - ((ComponentSetup)annotation).managerName(), - ((ComponentSetup)annotation).setupXml(), - ((ComponentSetup)annotation).log4j() - ); - - break; - } - } - } - } -} - diff --git a/utils/test/resources/com/cloud/utils/QualifierTestContext.xml b/utils/test/resources/com/cloud/utils/QualifierTestContext.xml index c045f985b69..313e9d99d1a 100644 --- a/utils/test/resources/com/cloud/utils/QualifierTestContext.xml +++ b/utils/test/resources/com/cloud/utils/QualifierTestContext.xml @@ -1,3 +1,21 @@ +