Revert "Merge remote-tracking branch 'origin/javelin' into javelin"

This reverts commit f237759cd3, reversing
changes made to 96bd1d4172.
This commit is contained in:
Edison Su 2013-01-14 17:58:48 -08:00
parent 16376269c3
commit 110465b504
591 changed files with 26464 additions and 27842 deletions

View File

@ -53,10 +53,7 @@ import com.cloud.utils.ProcessUtil;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.backoff.BackoffAlgorithm;
import com.cloud.utils.backoff.impl.ConstantTimeBackoff;
import com.cloud.utils.component.Adapters;
import com.cloud.utils.component.LegacyComponentLocator;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.MacAddress;
import com.cloud.utils.script.Script;
public class AgentShell implements IAgentShell {
@ -146,6 +143,7 @@ public class AgentShell implements IAgentShell {
return _guid;
}
@Override
public Map<String, Object> getCmdLineProperties() {
return _cmdLineProperties;
}
@ -378,8 +376,6 @@ public class AgentShell implements IAgentShell {
public void init(String[] args) throws ConfigurationException {
final LegacyComponentLocator locator = LegacyComponentLocator.getLocator("agent");
final Class<?> c = this.getClass();
_version = c.getPackage().getImplementationVersion();
if (_version == null) {
@ -396,12 +392,9 @@ public class AgentShell implements IAgentShell {
s_logger.debug("Found property: " + property);
}
_storage = locator.getManager(StorageComponent.class);
if (_storage == null) {
s_logger.info("Defaulting to using properties file for storage");
_storage = new PropertiesStorage();
_storage.configure("Storage", new HashMap<String, Object>());
}
s_logger.info("Defaulting to using properties file for storage");
_storage = new PropertiesStorage();
_storage.configure("Storage", new HashMap<String, Object>());
// merge with properties from command line to let resource access
// command line parameters
@ -410,22 +403,9 @@ public class AgentShell implements IAgentShell {
_properties.put(cmdLineProp.getKey(), cmdLineProp.getValue());
}
final Adapters adapters = locator.getAdapters(BackoffAlgorithm.class);
final Enumeration en = adapters.enumeration();
while (en.hasMoreElements()) {
_backoff = (BackoffAlgorithm) en.nextElement();
break;
}
if (en.hasMoreElements()) {
s_logger.info("More than one backoff algorithm specified. Using the first one ");
}
if (_backoff == null) {
s_logger.info("Defaulting to the constant time backoff algorithm");
_backoff = new ConstantTimeBackoff();
_backoff.configure("ConstantTimeBackoff",
new HashMap<String, Object>());
}
s_logger.info("Defaulting to the constant time backoff algorithm");
_backoff = new ConstantTimeBackoff();
_backoff.configure("ConstantTimeBackoff", new HashMap<String, Object>());
}
private void launchAgent() throws ConfigurationException {
@ -469,6 +449,7 @@ public class AgentShell implements IAgentShell {
openPortWithIptables(port);
_consoleProxyMain = new Thread(new Runnable() {
@Override
public void run() {
try {
Class<?> consoleProxyClazz = Class.forName("com.cloud.consoleproxy.ConsoleProxy");
@ -522,7 +503,7 @@ public class AgentShell implements IAgentShell {
} catch (final SecurityException e) {
throw new ConfigurationException(
"Security excetion when loading resource: " + name
+ " due to: " + e.toString());
+ " due to: " + e.toString());
} catch (final NoSuchMethodException e) {
throw new ConfigurationException(
"Method not found excetion when loading resource: "
@ -534,7 +515,7 @@ public class AgentShell implements IAgentShell {
} catch (final InstantiationException e) {
throw new ConfigurationException(
"Instantiation excetion when loading resource: " + name
+ " due to: " + e.toString());
+ " due to: " + e.toString());
} catch (final IllegalAccessException e) {
throw new ConfigurationException(
"Illegal access exception when loading resource: "

View File

@ -23,7 +23,6 @@ import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -41,19 +40,15 @@ import com.cloud.agent.dao.impl.PropertiesStorage;
import com.cloud.agent.transport.Request;
import com.cloud.resource.ServerResource;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.ProcessUtil;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.backoff.BackoffAlgorithm;
import com.cloud.utils.backoff.impl.ConstantTimeBackoff;
import com.cloud.utils.component.Adapters;
import com.cloud.utils.component.LegacyComponentLocator;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.MacAddress;
import com.cloud.utils.nio.HandlerFactory;
import com.cloud.utils.nio.Link;
import com.cloud.utils.nio.NioServer;
import com.cloud.utils.nio.Task;
import com.cloud.utils.nio.Task.Type;
/**
* Implementation of agent shell to run the agents on System Center Virtual Machine manager
@ -61,7 +56,7 @@ import com.cloud.utils.nio.Task.Type;
public class VmmAgentShell implements IAgentShell, HandlerFactory {
private static final Logger s_logger = Logger.getLogger(VmmAgentShell.class.getName());
private static final Logger s_logger = Logger.getLogger(VmmAgentShell.class.getName());
private final Properties _properties = new Properties();
private final Map<String, Object> _cmdLineProperties = new HashMap<String, Object>();
private StorageComponent _storage;
@ -76,112 +71,112 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory {
private int _proxyPort;
private int _workers;
private String _guid;
static private NioServer _connection;
static private int _listenerPort=9000;
static private NioServer _connection;
static private int _listenerPort=9000;
private int _nextAgentId = 1;
private volatile boolean _exit = false;
private int _pingRetries;
private Thread _consoleProxyMain = null;
private final Thread _consoleProxyMain = null;
private final List<Agent> _agents = new ArrayList<Agent>();
public VmmAgentShell() {
}
@Override
public Properties getProperties() {
return _properties;
return _properties;
}
@Override
public BackoffAlgorithm getBackoffAlgorithm() {
return _backoff;
return _backoff;
}
@Override
public int getPingRetries() {
return _pingRetries;
return _pingRetries;
}
@Override
public String getZone() {
return _zone;
return _zone;
}
@Override
public String getPod() {
return _pod;
return _pod;
}
@Override
public String getHost() {
return _host;
return _host;
}
@Override
public String getPrivateIp() {
return _privateIp;
return _privateIp;
}
@Override
public int getPort() {
return _port;
return _port;
}
@Override
public int getProxyPort() {
return _proxyPort;
return _proxyPort;
}
@Override
public int getWorkers() {
return _workers;
return _workers;
}
@Override
public String getGuid() {
return _guid;
return _guid;
}
@Override
public void upgradeAgent(String url) {
// TODO Auto-generated method stub
}
@Override
public void upgradeAgent(String url) {
// TODO Auto-generated method stub
@Override
}
@Override
public String getVersion() {
return _version;
return _version;
}
@Override
public Map<String, Object> getCmdLineProperties() {
// TODO Auto-generated method stub
return _cmdLineProperties;
}
public String getProperty(String prefix, String name) {
if(prefix != null)
return _properties.getProperty(prefix + "." + name);
return _properties.getProperty(name);
@Override
public Map<String, Object> getCmdLineProperties() {
// TODO Auto-generated method stub
return _cmdLineProperties;
}
@Override
public String getPersistentProperty(String prefix, String name) {
if(prefix != null)
return _storage.get(prefix + "." + name);
return _storage.get(name);
}
@Override
public void setPersistentProperty(String prefix, String name, String value) {
if(prefix != null)
_storage.persist(prefix + "." + name, value);
else
_storage.persist(name, value);
}
public String getProperty(String prefix, String name) {
if(prefix != null)
return _properties.getProperty(prefix + "." + name);
private void loadProperties() throws ConfigurationException {
return _properties.getProperty(name);
}
@Override
public String getPersistentProperty(String prefix, String name) {
if(prefix != null)
return _storage.get(prefix + "." + name);
return _storage.get(name);
}
@Override
public void setPersistentProperty(String prefix, String name, String value) {
if(prefix != null)
_storage.persist(prefix + "." + name, value);
else
_storage.persist(name, value);
}
private void loadProperties() throws ConfigurationException {
final File file = PropertiesUtil.findConfigFile("agent.properties");
if (file == null) {
throw new ConfigurationException("Unable to find agent.properties.");
@ -197,7 +192,7 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory {
throw new CloudRuntimeException("IOException in reading " + file.getAbsolutePath(), ex);
}
}
protected boolean parseCommand(final String[] args) throws ConfigurationException {
String host = null;
String workers = null;
@ -211,7 +206,7 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory {
System.out.println("Invalid Parameter: " + args[i]);
continue;
}
// save command line properties
_cmdLineProperties.put(tokens[0], tokens[1]);
@ -222,14 +217,14 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory {
} else if (tokens[0].equalsIgnoreCase("host")) {
host = tokens[1];
} else if(tokens[0].equalsIgnoreCase("zone")) {
zone = tokens[1];
zone = tokens[1];
} else if(tokens[0].equalsIgnoreCase("pod")) {
pod = tokens[1];
pod = tokens[1];
} else if(tokens[0].equalsIgnoreCase("guid")) {
guid = tokens[1];
} else if(tokens[0].equalsIgnoreCase("eth1ip")) {
_privateIp = tokens[1];
}
guid = tokens[1];
} else if(tokens[0].equalsIgnoreCase("eth1ip")) {
_privateIp = tokens[1];
}
}
if (port == null) {
@ -237,7 +232,7 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory {
}
_port = NumbersUtil.parseInt(port, 8250);
_proxyPort = NumbersUtil.parseInt(getProperty(null, "consoleproxy.httpListenPort"), 443);
if (workers == null) {
@ -254,42 +249,42 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory {
host = "localhost";
}
_host = host;
if(zone != null)
_zone = zone;
_zone = zone;
else
_zone = getProperty(null, "zone");
_zone = getProperty(null, "zone");
if (_zone == null || (_zone.startsWith("@") && _zone.endsWith("@"))) {
_zone = "default";
_zone = "default";
}
if(pod != null)
_pod = pod;
_pod = pod;
else
_pod = getProperty(null, "pod");
_pod = getProperty(null, "pod");
if (_pod == null || (_pod.startsWith("@") && _pod.endsWith("@"))) {
_pod = "default";
_pod = "default";
}
if (_host == null || (_host.startsWith("@") && _host.endsWith("@"))) {
throw new ConfigurationException("Host is not configured correctly: " + _host);
}
final String retries = getProperty(null, "ping.retries");
_pingRetries = NumbersUtil.parseInt(retries, 5);
String value = getProperty(null, "developer");
boolean developer = Boolean.parseBoolean(value);
if(guid != null)
_guid = guid;
_guid = guid;
else
_guid = getProperty(null, "guid");
_guid = getProperty(null, "guid");
if (_guid == null) {
if (!developer) {
throw new ConfigurationException("Unable to find the guid");
}
_guid = MacAddress.getMacAddress().toString(":");
if (!developer) {
throw new ConfigurationException("Unable to find the guid");
}
_guid = MacAddress.getMacAddress().toString(":");
}
return true;
@ -303,63 +298,46 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory {
}
s_logger.trace("Launching agent based on type=" + typeInfo);
}
private void launchAgent() throws ConfigurationException {
String resourceClassNames = getProperty(null, "resource");
s_logger.trace("resource=" + resourceClassNames);
if(resourceClassNames != null) {
launchAgentFromClassInfo(resourceClassNames);
return;
launchAgentFromClassInfo(resourceClassNames);
return;
}
launchAgentFromTypeInfo();
}
private void init(String[] args) throws ConfigurationException{
final LegacyComponentLocator locator = LegacyComponentLocator.getLocator("agent");
final Class<?> c = this.getClass();
_version = c.getPackage().getImplementationVersion();
if (_version == null) {
throw new CloudRuntimeException("Unable to find the implementation version of this agent");
}
s_logger.info("Implementation Version is " + _version);
parseCommand(args);
_storage = locator.getManager(StorageComponent.class);
if (_storage == null) {
s_logger.info("Defaulting to using properties file for storage");
_storage = new PropertiesStorage();
_storage.configure("Storage", new HashMap<String, Object>());
}
s_logger.info("Defaulting to using properties file for storage");
_storage = new PropertiesStorage();
_storage.configure("Storage", new HashMap<String, Object>());
// merge with properties from command line to let resource access command line parameters
for(Map.Entry<String, Object> cmdLineProp : getCmdLineProperties().entrySet()) {
_properties.put(cmdLineProp.getKey(), cmdLineProp.getValue());
}
final Adapters adapters = locator.getAdapters(BackoffAlgorithm.class);
final Enumeration en = adapters.enumeration();
while (en.hasMoreElements()) {
_backoff = (BackoffAlgorithm)en.nextElement();
break;
}
if (en.hasMoreElements()) {
s_logger.info("More than one backoff algorithm specified. Using the first one ");
_properties.put(cmdLineProp.getKey(), cmdLineProp.getValue());
}
if (_backoff == null) {
s_logger.info("Defaulting to the constant time backoff algorithm");
_backoff = new ConstantTimeBackoff();
_backoff.configure("ConstantTimeBackoff", new HashMap<String, Object>());
}
s_logger.info("Defaulting to the constant time backoff algorithm");
_backoff = new ConstantTimeBackoff();
_backoff.configure("ConstantTimeBackoff", new HashMap<String, Object>());
}
private void launchAgentFromClassInfo(String resourceClassNames) throws ConfigurationException {
String[] names = resourceClassNames.split("\\|");
for(String name: names) {
String[] names = resourceClassNames.split("\\|");
for(String name: names) {
Class<?> impl;
try {
impl = Class.forName(name);
@ -368,41 +346,41 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory {
ServerResource resource = (ServerResource)constructor.newInstance();
launchAgent(getNextAgentId(), resource);
} catch (final ClassNotFoundException e) {
throw new ConfigurationException("Resource class not found: " + name);
throw new ConfigurationException("Resource class not found: " + name);
} catch (final SecurityException e) {
throw new ConfigurationException("Security excetion when loading resource: " + name);
throw new ConfigurationException("Security excetion when loading resource: " + name);
} catch (final NoSuchMethodException e) {
throw new ConfigurationException("Method not found excetion when loading resource: " + name);
throw new ConfigurationException("Method not found excetion when loading resource: " + name);
} catch (final IllegalArgumentException e) {
throw new ConfigurationException("Illegal argument excetion when loading resource: " + name);
throw new ConfigurationException("Illegal argument excetion when loading resource: " + name);
} catch (final InstantiationException e) {
throw new ConfigurationException("Instantiation excetion when loading resource: " + name);
throw new ConfigurationException("Instantiation excetion when loading resource: " + name);
} catch (final IllegalAccessException e) {
throw new ConfigurationException("Illegal access exception when loading resource: " + name);
throw new ConfigurationException("Illegal access exception when loading resource: " + name);
} catch (final InvocationTargetException e) {
throw new ConfigurationException("Invocation target exception when loading resource: " + name);
throw new ConfigurationException("Invocation target exception when loading resource: " + name);
}
}
}
}
private void launchAgent(int localAgentId, ServerResource resource) throws ConfigurationException {
// we don't track agent after it is launched for now
Agent agent = new Agent(this, localAgentId, resource);
_agents.add(agent);
agent.start();
// we don't track agent after it is launched for now
Agent agent = new Agent(this, localAgentId, resource);
_agents.add(agent);
agent.start();
}
public synchronized int getNextAgentId() {
return _nextAgentId++;
return _nextAgentId++;
}
private void run(String[] args) {
try {
private void run(String[] args) {
try {
System.setProperty("java.net.preferIPv4Stack","true");
loadProperties();
init(args);
loadProperties();
init(args);
String instance = getProperty(null, "instance");
if (instance == null) {
instance = "";
@ -413,22 +391,22 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory {
// TODO need to do this check. For Agentshell running on windows needs different approach
//final String run = "agent." + instance + "pid";
//s_logger.debug("Checking to see if " + run + "exists.");
//ProcessUtil.pidCheck(run);
//ProcessUtil.pidCheck(run);
// TODO: For Hyper-V agent.properties need to be revamped to support multiple agents
// corresponding to multiple clusters but running on a SCVMM host
// read the persistent storage and launch the agents
//launchAgent();
//launchAgent();
// FIXME get rid of this approach of agent listening for boot strap commands from the management server
// now listen for bootstrap request from the management server and launch agents
_connection = new NioServer("VmmAgentShell", _listenerPort, 1, this);
_connection.start();
s_logger.info("SCVMM agent is listening on port " +_listenerPort + " for bootstrap command from management server");
while(_connection.isRunning());
// now listen for bootstrap request from the management server and launch agents
_connection = new NioServer("VmmAgentShell", _listenerPort, 1, this);
_connection.start();
s_logger.info("SCVMM agent is listening on port " +_listenerPort + " for bootstrap command from management server");
while(_connection.isRunning());
} catch(final ConfigurationException e) {
s_logger.error("Unable to start agent: " + e.getMessage());
System.out.println("Unable to start agent: " + e.getMessage());
@ -438,89 +416,89 @@ public class VmmAgentShell implements IAgentShell, HandlerFactory {
System.out.println("Unable to start agent: " + e.getMessage());
System.exit(ExitStatus.Error.value());
}
}
}
@Override
public Task create(com.cloud.utils.nio.Task.Type type, Link link,
byte[] data) {
return new AgentBootStrapHandler(type, link, data);
}
@Override
public Task create(com.cloud.utils.nio.Task.Type type, Link link,
byte[] data) {
return new AgentBootStrapHandler(type, link, data);
}
public void stop() {
_exit = true;
if(_consoleProxyMain != null) {
_consoleProxyMain.interrupt();
}
}
public static void main(String[] args) {
VmmAgentShell shell = new VmmAgentShell();
Runtime.getRuntime().addShutdownHook(new ShutdownThread(shell));
shell.run(args);
}
public void stop() {
_exit = true;
if(_consoleProxyMain != null) {
_consoleProxyMain.interrupt();
}
}
// class to handle the bootstrap command from the management server
private class AgentBootStrapHandler extends Task {
public static void main(String[] args) {
public AgentBootStrapHandler(Task.Type type, Link link, byte[] data) {
super(type, link, data);
}
VmmAgentShell shell = new VmmAgentShell();
Runtime.getRuntime().addShutdownHook(new ShutdownThread(shell));
shell.run(args);
}
@Override
protected void doTask(Task task) throws Exception {
final Type type = task.getType();
s_logger.info("recieved task of type "+ type.toString() +" to handle in BootStrapTakHandler");
if (type == Task.Type.DATA)
{
final byte[] data = task.getData();
final Request request = Request.parse(data);
final Command cmd = request.getCommand();
if (cmd instanceof StartupVMMAgentCommand) {
// class to handle the bootstrap command from the management server
private class AgentBootStrapHandler extends Task {
StartupVMMAgentCommand vmmCmd = (StartupVMMAgentCommand) cmd;
public AgentBootStrapHandler(Task.Type type, Link link, byte[] data) {
super(type, link, data);
}
_zone = Long.toString(vmmCmd.getDataCenter());
_cmdLineProperties.put("zone", _zone);
@Override
protected void doTask(Task task) throws Exception {
final Type type = task.getType();
s_logger.info("recieved task of type "+ type.toString() +" to handle in BootStrapTakHandler");
if (type == Task.Type.DATA)
{
final byte[] data = task.getData();
final Request request = Request.parse(data);
final Command cmd = request.getCommand();
_pod = Long.toString(vmmCmd.getPod());
_cmdLineProperties.put("pod", _pod);
if (cmd instanceof StartupVMMAgentCommand) {
_cluster = vmmCmd.getClusterName();
_cmdLineProperties.put("cluster", _cluster);
StartupVMMAgentCommand vmmCmd = (StartupVMMAgentCommand) cmd;
_guid = vmmCmd.getGuid();
_cmdLineProperties.put("guid", _guid);
_zone = Long.toString(vmmCmd.getDataCenter());
_cmdLineProperties.put("zone", _zone);
_host = vmmCmd.getManagementServerIP();
_port = NumbersUtil.parseInt(vmmCmd.getport(), 8250);
_pod = Long.toString(vmmCmd.getPod());
_cmdLineProperties.put("pod", _pod);
s_logger.info("Recieved boot strap command from management server with parameters " +
" Zone:"+ _zone + " "+
" Cluster:"+ _cluster + " "+
" pod:"+_pod + " "+
" host:"+ _host +" "+
" port:"+_port);
_cluster = vmmCmd.getClusterName();
_cmdLineProperties.put("cluster", _cluster);
launchAgentFromClassInfo("com.cloud.hypervisor.hyperv.resource.HypervResource");
// TODO: persist the info in agent.properties for agent restarts
}
}
}
}
_guid = vmmCmd.getGuid();
_cmdLineProperties.put("guid", _guid);
_host = vmmCmd.getManagementServerIP();
_port = NumbersUtil.parseInt(vmmCmd.getport(), 8250);
s_logger.info("Recieved boot strap command from management server with parameters " +
" Zone:"+ _zone + " "+
" Cluster:"+ _cluster + " "+
" pod:"+_pod + " "+
" host:"+ _host +" "+
" port:"+_port);
launchAgentFromClassInfo("com.cloud.hypervisor.hyperv.resource.HypervResource");
// TODO: persist the info in agent.properties for agent restarts
}
}
}
}
private static class ShutdownThread extends Thread {
VmmAgentShell _shell;
VmmAgentShell _shell;
public ShutdownThread(VmmAgentShell shell) {
this._shell = shell;
}
@Override
public void run() {
_shell.stop();
}
}
}

View File

@ -1,76 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent.configuration;
import java.util.List;
import java.util.Map;
import com.cloud.utils.component.Adapter;
import com.cloud.utils.component.ComponentLibraryBase;
import com.cloud.utils.component.LegacyComponentLocator.ComponentInfo;
import com.cloud.utils.component.Manager;
import com.cloud.utils.component.PluggableService;
import com.cloud.utils.db.GenericDao;
public class AgentComponentLibraryBase extends ComponentLibraryBase {
@Override
public Map<String, ComponentInfo<GenericDao<?, ?>>> getDaos() {
return null;
}
@Override
public Map<String, ComponentInfo<Manager>> getManagers() {
if (_managers.size() == 0) {
populateManagers();
}
return _managers;
}
@Override
public Map<String, List<ComponentInfo<Adapter>>> getAdapters() {
if (_adapters.size() == 0) {
populateAdapters();
}
return _adapters;
}
@Override
public Map<Class<?>, Class<?>> getFactories() {
return null;
}
protected void populateManagers() {
// addManager("StackMaidManager", StackMaidManagerImpl.class);
}
protected void populateAdapters() {
}
protected void populateServices() {
}
@Override
public Map<String, ComponentInfo<PluggableService>> getPluggableServices() {
if (_pluggableServices.size() == 0) {
populateServices();
}
return _pluggableServices;
}
}

View File

@ -19,24 +19,23 @@ package com.cloud.agent;
import java.io.File;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.log4j.Logger;
import com.cloud.agent.AgentShell;
import com.cloud.utils.testcase.Log4jEnabledTestCase;
public class TestAgentShell extends Log4jEnabledTestCase {
public class TestAgentShell extends TestCase {
protected final static Logger s_logger = Logger.getLogger(TestAgentShell.class);
public void testWget() {
File file = null;
try {
file = File.createTempFile("wget", ".html");
AgentShell.wget("http://www.google.com/", file);
if (s_logger.isDebugEnabled()) {
s_logger.debug("file saved to " + file.getAbsolutePath());
}
} catch (final IOException e) {
s_logger.warn("Exception while downloading agent update package, ", e);
}

View File

@ -31,7 +31,7 @@ import com.cloud.exception.ResourceAllocationException;
import com.cloud.network.Network;
import com.cloud.user.UserContext;
@APICommand(description="Creates a private network", responseObject=NetworkResponse.class)
//@APICommand(description="Creates a private network", responseObject=NetworkResponse.class)
public class CreatePrivateNetworkCmd extends BaseAsyncCreateCmd {
public static final Logger s_logger = Logger.getLogger(CreatePrivateNetworkCmd.class.getName());
@ -153,6 +153,7 @@ public class CreatePrivateNetworkCmd extends BaseAsyncCreateCmd {
if (result != null) {
this.setEntityId(result.getId());
this.setEntityUuid(result.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create a Private network");
}
@ -190,8 +191,4 @@ public class CreatePrivateNetworkCmd extends BaseAsyncCreateCmd {
}
@Override
public String getEntityTable() {
return "networks";
}
}

View File

@ -25,7 +25,7 @@ import com.cloud.event.EventTypes;
import com.cloud.user.Account;
import com.cloud.user.UserContext;
@APICommand(description="Destroys console proxy", responseObject=SuccessResponse.class)
//@APICommand(description="Destroys console proxy", responseObject=SuccessResponse.class)
public class DestroyConsoleProxyCmd extends BaseAsyncCmd {
public static final Logger s_logger = Logger.getLogger(DestroyConsoleProxyCmd.class.getName());

View File

@ -27,7 +27,7 @@ import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.SnapshotScheduleResponse;
import com.cloud.storage.snapshot.SnapshotSchedule;
@APICommand(description="Lists recurring snapshot schedule", responseObject=SnapshotScheduleResponse.class)
//@APICommand(description="Lists recurring snapshot schedule", responseObject=SnapshotScheduleResponse.class)
public class ListRecurringSnapshotScheduleCmd extends BaseListCmd {
private static final String s_name = "listrecurringsnapshotscheduleresponse";

View File

@ -16,10 +16,8 @@
// under the License.
package com.cloud.exception;
import com.cloud.utils.IdentityProxy;
import java.util.ArrayList;
import com.cloud.utils.exception.CSExceptionErrorCode;
import com.cloud.utils.AnnotationHelper;
/**
* by the API response serializer. Any exceptions that are thrown by
@ -56,6 +54,7 @@ public class CloudException extends Exception {
return;
}
public ArrayList<String> getIdProxyList() {
return idList;
}

View File

@ -60,6 +60,8 @@ public interface NetworkService {
Network getNetwork(long networkId);
Network getNetwork(String networkUuid);
IpAddress getIp(long id);
NetworkProfile convertNetworkToNetworkProfile(long networkId);

View File

@ -20,6 +20,7 @@ import java.util.List;
import java.util.Map;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.acl.SecurityChecker.AccessType;
import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd;
@ -193,6 +194,8 @@ public interface AccountService {
UserAccount getUserByApiKey(String apiKey);
RoleType getRoleType(Account account);
void checkAccess(Account account, Domain domain) throws PermissionDeniedException;
void checkAccess(Account account, AccessType accessType, boolean sameOwner, ControlledEntity... entities) throws PermissionDeniedException;

View File

@ -30,6 +30,8 @@ public interface DomainService {
Domain getDomain(long id);
Domain getDomain(String uuid);
/**
* Return whether a domain is a child domain of a given domain.
*

View File

@ -16,13 +16,10 @@
// under the License.
package com.cloud.user;
import com.cloud.server.ManagementService;
import com.cloud.utils.component.ComponentLocator;
public class UserContext {
private static ThreadLocal<UserContext> s_currentContext = new ThreadLocal<UserContext>();
private long userId;
private String sessionId;
private Account account;
@ -82,7 +79,7 @@ public class UserContext {
// however, there are many places that run background jobs assume the system context.
//
// If there is a security concern, all entry points from user (including the front end that takes HTTP
// request in and
// request in and
// the core async-job manager that runs commands from user) have explicitly setup the UserContext.
//
return UserContextInitializer.getInstance().getAdminContext();

View File

@ -16,17 +16,12 @@
// under the License.
package org.apache.cloudstack.acl;
import java.util.Properties;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.user.Account;
import com.cloud.user.User;
import org.apache.cloudstack.acl.RoleType;
import com.cloud.utils.component.Adapter;
/**
* APIAccessChecker checks the ownership and access control to API requests
*/
public interface APIAccessChecker extends Adapter {
// Interface for checking access to an API for an user
boolean canAccessAPI(User user, String apiCommandName) throws PermissionDeniedException;
// APIChecker checks the ownership and access control to API requests
public interface APIChecker extends Adapter {
// Interface for checking access for a role using apiname
boolean checkAccess(RoleType roleType, String apiCommandName) throws PermissionDeniedException;
}

View File

@ -4,9 +4,9 @@
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// the License. You may obtain a copy of the License at
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
@ -14,17 +14,24 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.utils.component;
package org.apache.cloudstack.acl;
import static java.lang.annotation.ElementType.FIELD;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
// Enum for default roles in CloudStack
public enum RoleType {
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
Admin(1),
ResourceAdmin(2),
DomainAdmin(4),
User(8),
Unknown(0);
@Target(FIELD)
@Retention(RUNTIME)
public @interface Inject {
Class<? extends Adapter> adapter() default Adapter.class;
private int mask;
private RoleType(int mask) {
this.mask = mask;
}
public int getValue() {
return mask;
}
}

View File

@ -158,6 +158,7 @@ public class ApiConstants {
public static final String RECEIVED_BYTES = "receivedbytes";
public static final String REQUIRES_HVM = "requireshvm";
public static final String RESOURCE_TYPE = "resourcetype";
public static final String RESPONSE = "response";
public static final String QUERY_FILTER = "queryfilter";
public static final String SCHEDULE = "schedule";
public static final String SCOPE = "scope";

View File

@ -25,6 +25,8 @@ public abstract class BaseAsyncCreateCmd extends BaseAsyncCmd {
@Parameter(name = "id", type = CommandType.LONG)
private Long id;
private String uuid;
public abstract void create() throws ResourceAllocationException;
public Long getEntityId() {
@ -35,14 +37,19 @@ public abstract class BaseAsyncCreateCmd extends BaseAsyncCmd {
this.id = id;
}
public abstract String getEntityTable();
public String getEntityUuid() {
return uuid;
}
public String getResponse(long jobId, long objectId, String objectEntityTable) {
public void setEntityUuid(String uuid) {
this.uuid = uuid;
}
public String getResponse(long jobId, String objectUuid) {
CreateCmdResponse response = new CreateCmdResponse();
AsyncJob job = _entityMgr.findById(AsyncJob.class, jobId);
response.setJobId(job.getUuid());
response.setId(objectId);
response.setIdEntityTable(objectEntityTable);
response.setId(objectUuid);
response.setResponseName(getCommandName());
return _responseGenerator.toSerializedString(response, getResponseType());
}

View File

@ -19,7 +19,6 @@ package org.apache.cloudstack.api;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
@ -64,10 +63,8 @@ import com.cloud.user.Account;
import com.cloud.user.AccountService;
import com.cloud.user.DomainService;
import com.cloud.user.ResourceLimitService;
import com.cloud.utils.IdentityProxy;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.vm.BareMetalVmService;
import com.cloud.vm.UserVmService;
@ -112,7 +109,6 @@ public abstract class BaseCmd {
@Parameter(name = "response", type = CommandType.STRING)
private String responseType;
public static ComponentLocator s_locator;
public static ConfigurationService _configService;
public static AccountService _accountService;
public static UserVmService _userVmService;
@ -146,37 +142,47 @@ public abstract class BaseCmd {
public static QueryService _queryService;
public static void setComponents(ResponseGenerator generator) {
ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name);
_mgr = (ManagementService) ComponentLocator.getComponent(ManagementService.Name);
_accountService = locator.getManager(AccountService.class);
_configService = locator.getManager(ConfigurationService.class);
_userVmService = locator.getManager(UserVmService.class);
_storageService = locator.getManager(StorageService.class);
_resourceService = locator.getManager(ResourceService.class);
_networkService = locator.getManager(NetworkService.class);
_templateService = locator.getManager(TemplateService.class);
_securityGroupService = locator.getManager(SecurityGroupService.class);
_snapshotService = locator.getManager(SnapshotService.class);
_consoleProxyService = locator.getManager(ConsoleProxyService.class);
_routerService = locator.getManager(VpcVirtualNetworkApplianceService.class);
_entityMgr = locator.getManager(EntityManager.class);
_rulesService = locator.getManager(RulesService.class);
_lbService = locator.getManager(LoadBalancingRulesService.class);
_autoScaleService = locator.getManager(AutoScaleService.class);
_ravService = locator.getManager(RemoteAccessVpnService.class);
_mgr = ComponentContext.getComponent(ManagementService.class);
_accountService = ComponentContext.getComponent(AccountService.class);
_configService = ComponentContext.getComponent(ConfigurationService.class);
_userVmService = ComponentContext.getComponent(UserVmService.class);
// TODO, ugly and will change soon
//
Map<String, UserVmService> svmServices = ComponentContext.getComponentsOfType(UserVmService.class);
_userVmService = svmServices.get("BareMetalVmManagerImpl");
_storageService = ComponentContext.getComponent(StorageService.class);
_resourceService = ComponentContext.getComponent(ResourceService.class);
_networkService = ComponentContext.getComponent(NetworkService.class);
_templateService = ComponentContext.getComponent(TemplateService.class);
// TODO, will change to looking for primary component
// ugly binding to a specific implementation
Map<String, SecurityGroupService> _sgServices = ComponentContext.getComponentsOfType(SecurityGroupService.class);
_securityGroupService = _sgServices.get("SecurityGroupManagerImpl2");
_snapshotService = ComponentContext.getComponent(SnapshotService.class);
_consoleProxyService = ComponentContext.getComponent(ConsoleProxyService.class);
_routerService = ComponentContext.getComponent(VpcVirtualNetworkApplianceService.class);
_entityMgr = ComponentContext.getComponent(EntityManager.class);
_rulesService = ComponentContext.getComponent(RulesService.class);
_lbService = ComponentContext.getComponent(LoadBalancingRulesService.class);
_ravService = ComponentContext.getComponent(RemoteAccessVpnService.class);
_responseGenerator = generator;
_bareMetalVmService = locator.getManager(BareMetalVmService.class);
_projectService = locator.getManager(ProjectService.class);
_firewallService = locator.getManager(FirewallService.class);
_domainService = locator.getManager(DomainService.class);
_resourceLimitService = locator.getManager(ResourceLimitService.class);
_identityService = locator.getManager(IdentityService.class);
_storageNetworkService = locator.getManager(StorageNetworkService.class);
_taggedResourceService = locator.getManager(TaggedResourceService.class);
_vpcService = locator.getManager(VpcService.class);
_networkACLService = locator.getManager(NetworkACLService.class);
_s2sVpnService = locator.getManager(Site2SiteVpnService.class);
_queryService = locator.getManager(QueryService.class);
_bareMetalVmService = ComponentContext.getComponent(BareMetalVmService.class);
_projectService = ComponentContext.getComponent(ProjectService.class);
_firewallService = ComponentContext.getComponent(FirewallService.class);
_domainService = ComponentContext.getComponent(DomainService.class);
_resourceLimitService = ComponentContext.getComponent(ResourceLimitService.class);
_identityService = ComponentContext.getComponent(IdentityService.class);
_storageNetworkService = ComponentContext.getComponent(StorageNetworkService.class);
_taggedResourceService = ComponentContext.getComponent(TaggedResourceService.class);
_vpcService = ComponentContext.getComponent(VpcService.class);
_networkACLService = ComponentContext.getComponent(NetworkACLService.class);
_s2sVpnService = ComponentContext.getComponent(Site2SiteVpnService.class);
}
public abstract void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException;
@ -211,9 +217,9 @@ public abstract class BaseCmd {
}
public ManagementService getMgmtServiceRef() {
return _mgr;
return _mgr;
}
public static String getDateString(Date date) {
if (date == null) {
return "";
@ -526,8 +532,8 @@ public abstract class BaseCmd {
if (!enabledOnly || project.getState() == Project.State.Active) {
return project.getProjectAccountId();
} else {
PermissionDeniedException ex = new PermissionDeniedException("Can't add resources to the project with specified projectId in state=" + project.getState() + " as it's no longer active");
ex.addProxyObject(project, projectId, "projectId");
PermissionDeniedException ex = new PermissionDeniedException("Can't add resources to the project with specified projectId in state=" + project.getState() + " as it's no longer active");
ex.addProxyObject(project, projectId, "projectId");
throw ex;
}
} else {

View File

@ -17,7 +17,6 @@
package org.apache.cloudstack.api;
import org.apache.cloudstack.api.ApiConstants;
import com.cloud.utils.IdentityProxy;
import org.apache.cloudstack.api.ResponseObject;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
@ -46,6 +45,7 @@ public abstract class BaseResponse implements ResponseObject {
this.objectName = objectName;
}
@Override
public String getObjectId() {
return null;
}
@ -56,18 +56,22 @@ public abstract class BaseResponse implements ResponseObject {
@SerializedName(ApiConstants.JOB_STATUS) @Param(description="the current status of the latest async job acting on this object")
private Integer jobStatus;
@Override
public String getJobId() {
return jobId;
}
@Override
public void setJobId(String jobId) {
this.jobId = jobId;
}
@Override
public Integer getJobStatus() {
return jobStatus;
}
@Override
public void setJobStatus(Integer jobStatus) {
this.jobStatus = jobStatus;
}

View File

@ -314,13 +314,6 @@ public interface ResponseGenerator {
StorageNetworkIpRangeResponse createStorageNetworkIpRangeResponse(StorageNetworkIpRange result);
/**
* @param tableName TODO
* @param token
* @return
*/
Long getIdentiyId(String tableName, String token);
/**
* @param resourceTag
* @param keyValueOnly TODO

View File

@ -81,6 +81,7 @@ public class CreateCounterCmd extends BaseAsyncCreateCmd {
if (ctr != null) {
this.setEntityId(ctr.getId());
this.setEntityUuid(ctr.getUuid());
CounterResponse response = _responseGenerator.createCounterResponse(ctr);
response.setResponseName(getCommandName());
this.setResponseObject(response);
@ -113,8 +114,5 @@ public class CreateCounterCmd extends BaseAsyncCreateCmd {
return Account.ACCOUNT_ID_SYSTEM;
}
@Override
public String getEntityTable() {
return "counter";
}
}

View File

@ -18,23 +18,23 @@ package org.apache.cloudstack.api.command.admin.network;
import java.util.Map;
import org.apache.log4j.Logger;
import javax.inject.Inject;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.NetworkDeviceResponse;
import org.apache.cloudstack.network.ExternalNetworkDeviceManager;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.host.Host;
import org.apache.cloudstack.network.ExternalNetworkDeviceManager;
import com.cloud.server.ManagementService;
import org.apache.cloudstack.api.response.NetworkDeviceResponse;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = "addNetworkDevice", description="Adds a network device of one of the following types: ExternalDhcp, ExternalFirewall, ExternalLoadBalancer, PxeServer", responseObject = NetworkDeviceResponse.class)
@ -46,6 +46,7 @@ public class AddNetworkDeviceCmd extends BaseCmd {
// ////////////// API parameters /////////////////////
// ///////////////////////////////////////////////////
@Inject ExternalNetworkDeviceManager nwDeviceMgr;
@Parameter(name = ApiConstants.NETWORK_DEVICE_TYPE, type = CommandType.STRING, description = "Network device type, now supports ExternalDhcp, PxeServer, NetscalerMPXLoadBalancer, NetscalerVPXLoadBalancer, NetscalerSDXLoadBalancer, F5BigIpLoadBalancer, JuniperSRXFirewall")
private String type;
@ -63,11 +64,8 @@ public class AddNetworkDeviceCmd extends BaseCmd {
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException,
ResourceAllocationException {
ResourceAllocationException {
try {
ExternalNetworkDeviceManager nwDeviceMgr;
ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name);
nwDeviceMgr = locator.getManager(ExternalNetworkDeviceManager.class);
Host device = nwDeviceMgr.addNetworkDevice(this);
NetworkDeviceResponse response = nwDeviceMgr.getApiResponse(device);
response.setObjectName("networkdevice");

View File

@ -59,10 +59,6 @@ public class AddNetworkServiceProviderCmd extends BaseAsyncCreateCmd {
@Parameter(name=ApiConstants.SERVICE_LIST, type=CommandType.LIST, collectionType = CommandType.STRING, description="the list of services to be enabled for this physical network service provider")
private List<String> enabledServices;
@Override
public String getEntityTable() {
return "physical_network_service_providers";
}
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
@ -116,6 +112,7 @@ public class AddNetworkServiceProviderCmd extends BaseAsyncCreateCmd {
PhysicalNetworkServiceProvider result = _networkService.addProviderToPhysicalNetwork(getPhysicalNetworkId(), getProviderName(), getDestinationPhysicalNetworkId(), getEnabledServices());
if (result != null) {
setEntityId(result.getId());
setEntityUuid(result.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to add service provider entity to physical network");
}

View File

@ -79,10 +79,6 @@ public class CreatePhysicalNetworkCmd extends BaseAsyncCreateCmd {
return tags;
}
@Override
public String getEntityTable() {
return "physical_network";
}
public Long getZoneId() {
return zoneId;
@ -164,6 +160,7 @@ public class CreatePhysicalNetworkCmd extends BaseAsyncCreateCmd {
PhysicalNetwork result = _networkService.createPhysicalNetwork(getZoneId(),getVlan(),getNetworkSpeed(), getIsolationMethods(),getBroadcastDomainRange(),getDomainId(), getTags(), getNetworkName());
if (result != null) {
setEntityId(result.getId());
setEntityUuid(result.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create physical network entity");
}

View File

@ -16,23 +16,23 @@
// under the License.
package org.apache.cloudstack.api.command.admin.network;
import org.apache.log4j.Logger;
import javax.inject.Inject;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.network.ExternalNetworkDeviceManager;
import org.apache.cloudstack.api.response.HostResponse;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.cloudstack.network.ExternalNetworkDeviceManager;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.server.ManagementService;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = "deleteNetworkDevice", description="Deletes network device.", responseObject=SuccessResponse.class)
@ -40,6 +40,8 @@ public class DeleteNetworkDeviceCmd extends BaseCmd {
public static final Logger s_logger = Logger.getLogger(DeleteNetworkDeviceCmd.class);
private static final String s_name = "deletenetworkdeviceresponse";
@Inject ExternalNetworkDeviceManager nwDeviceMgr;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@ -54,11 +56,8 @@ public class DeleteNetworkDeviceCmd extends BaseCmd {
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException,
ResourceAllocationException {
ResourceAllocationException {
try {
ExternalNetworkDeviceManager nwDeviceMgr;
ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name);
nwDeviceMgr = locator.getManager(ExternalNetworkDeviceManager.class);
boolean result = nwDeviceMgr.deleteNetworkDevice(this);
if (result) {
SuccessResponse response = new SuccessResponse(getCommandName());

View File

@ -20,25 +20,25 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import javax.inject.Inject;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.BaseListCmd;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.network.ExternalNetworkDeviceManager;
import org.apache.cloudstack.api.response.NetworkDeviceResponse;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.NetworkDeviceResponse;
import org.apache.cloudstack.network.ExternalNetworkDeviceManager;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.host.Host;
import com.cloud.server.ManagementService;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = "listNetworkDevice", description="List network devices", responseObject = NetworkDeviceResponse.class)
@ -46,6 +46,7 @@ public class ListNetworkDeviceCmd extends BaseListCmd {
public static final Logger s_logger = Logger.getLogger(ListNetworkDeviceCmd.class);
private static final String s_name = "listnetworkdevice";
@Inject ExternalNetworkDeviceManager nwDeviceMgr;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@ -66,11 +67,8 @@ public class ListNetworkDeviceCmd extends BaseListCmd {
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException,
ResourceAllocationException {
ResourceAllocationException {
try {
ExternalNetworkDeviceManager nwDeviceMgr;
ComponentLocator locator = ComponentLocator.getLocator(ManagementService.Name);
nwDeviceMgr = locator.getManager(ExternalNetworkDeviceManager.class);
List<Host> devices = nwDeviceMgr.listNetworkDevice(this);
List<NetworkDeviceResponse> nwdeviceResponses = new ArrayList<NetworkDeviceResponse>();
ListResponse<NetworkDeviceResponse> listResponse = new ListResponse<NetworkDeviceResponse>();

View File

@ -53,10 +53,7 @@ public class CreateVirtualRouterElementCmd extends BaseAsyncCreateCmd {
this.nspId = nspId;
}
@Override
public String getEntityTable() {
return "virtual_router_providers";
}
public Long getNspId() {
return nspId;
@ -94,6 +91,7 @@ public class CreateVirtualRouterElementCmd extends BaseAsyncCreateCmd {
VirtualRouterProvider result = _service.addElement(getNspId(), VirtualRouterProviderType.VirtualRouter);
if (result != null) {
setEntityId(result.getId());
setEntityUuid(result.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to add Virtual Router entity to physical network");
}

View File

@ -66,10 +66,6 @@ public class AddTrafficTypeCmd extends BaseAsyncCreateCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@Override
public String getEntityTable() {
return "physical_network_traffic_types";
}
public Long getPhysicalNetworkId() {
return physicalNetworkId;
@ -136,6 +132,7 @@ public class AddTrafficTypeCmd extends BaseAsyncCreateCmd {
PhysicalNetworkTrafficType result = _networkService.addTrafficTypeToPhysicalNetwork(getPhysicalNetworkId(), getTrafficType(), getXenLabel(), getKvmLabel(), getVmwareLabel(), getSimulatorLabel(), getVlan());
if (result != null) {
setEntityId(result.getId());
setEntityUuid(result.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to add traffic type to physical network");
}

View File

@ -123,6 +123,7 @@ public class CreatePrivateGatewayCmd extends BaseAsyncCreateCmd {
if (result != null) {
this.setEntityId(result.getId());
this.setEntityUuid(result.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create private gateway");
}
@ -156,10 +157,6 @@ public class CreatePrivateGatewayCmd extends BaseAsyncCreateCmd {
return "creating private gateway";
}
@Override
public String getEntityTable() {
return "vpc_gateways";
}
@Override

View File

@ -70,6 +70,7 @@ public class CreateVPCOfferingCmd extends BaseAsyncCreateCmd{
VpcOffering vpcOff = _vpcService.createVpcOffering(getVpcOfferingName(), getDisplayText(), getSupportedServices());
if (vpcOff != null) {
this.setEntityId(vpcOff.getId());
this.setEntityUuid(vpcOff.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create a VPC offering");
}
@ -87,10 +88,6 @@ public class CreateVPCOfferingCmd extends BaseAsyncCreateCmd{
}
}
@Override
public String getEntityTable() {
return "vpc_offerings";
}
@Override
public String getEventType() {

View File

@ -87,9 +87,6 @@ public class AssociateIPAddrCmd extends BaseAsyncCreateCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "user_ip_address";
}
public String getAccountName() {
if (accountName != null) {
@ -220,6 +217,7 @@ public class AssociateIPAddrCmd extends BaseAsyncCreateCmd {
if (ip != null) {
this.setEntityId(ip.getId());
this.setEntityUuid(ip.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to allocate ip address");
}

View File

@ -62,10 +62,6 @@ public class CreateAutoScalePolicyCmd extends BaseAsyncCreateCmd {
private Long conditionDomainId;
private Long conditionAccountId;
@Override
public String getEntityTable() {
return "autoscale_policies";
}
public int getDuration() {
return duration;
@ -159,6 +155,7 @@ public class CreateAutoScalePolicyCmd extends BaseAsyncCreateCmd {
AutoScalePolicy result = _autoScaleService.createAutoScalePolicy(this);
if (result != null) {
this.setEntityId(result.getId());
this.setEntityUuid(result.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create AutoScale Policy");
}

View File

@ -72,10 +72,6 @@ public class CreateAutoScaleVmGroupCmd extends BaseAsyncCreateCmd {
// ///////////////// Accessors ///////////////////////
// ///////////////////////////////////////////////////
@Override
public String getEntityTable() {
return "autoscale_vmgroups";
}
public int getMinMembers() {
return minMembers;
@ -161,6 +157,7 @@ public class CreateAutoScaleVmGroupCmd extends BaseAsyncCreateCmd {
AutoScaleVmGroup result = _autoScaleService.createAutoScaleVmGroup(this);
if (result != null) {
this.setEntityId(result.getId());
this.setEntityUuid(result.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create Autoscale Vm Group");
}

View File

@ -86,10 +86,7 @@ public class CreateAutoScaleVmProfileCmd extends BaseAsyncCreateCmd {
private Long domainId;
private Long accountId;
@Override
public String getEntityTable() {
return "autoscale_vmprofiles";
}
public Long getDomainId() {
if (domainId == null) {
@ -232,6 +229,7 @@ public class CreateAutoScaleVmProfileCmd extends BaseAsyncCreateCmd {
AutoScaleVmProfile result = _autoScaleService.createAutoScaleVmProfile(this);
if (result != null) {
this.setEntityId(result.getId());
this.setEntityUuid(result.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create Autoscale Vm Profile");
}

View File

@ -72,6 +72,7 @@ public class CreateConditionCmd extends BaseAsyncCreateCmd {
if (condition != null) {
this.setEntityId(condition.getId());
this.setEntityUuid(condition.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create condition.");
}
@ -146,8 +147,5 @@ public class CreateConditionCmd extends BaseAsyncCreateCmd {
return accountId;
}
@Override
public String getEntityTable() {
return "conditions";
}
}

View File

@ -80,9 +80,6 @@ public class CreateFirewallRuleCmd extends BaseAsyncCreateCmd implements Firewal
// ///////////////// Accessors ///////////////////////
// ///////////////////////////////////////////////////
public String getEntityTable() {
return "firewall_rules";
}
public Long getIpAddressId() {
return ipAddressId;
@ -242,6 +239,7 @@ public class CreateFirewallRuleCmd extends BaseAsyncCreateCmd implements Firewal
try {
FirewallRule result = _firewallService.createFirewallRule(this);
setEntityId(result.getId());
setEntityUuid(result.getUuid());
} catch (NetworkRuleConflictException ex) {
s_logger.info("Network rule conflict: " + ex.getMessage());
s_logger.trace("Network Rule Conflict: ", ex);

View File

@ -94,9 +94,6 @@ public class CreatePortForwardingRuleCmd extends BaseAsyncCreateCmd implements P
// ///////////////// Accessors ///////////////////////
// ///////////////////////////////////////////////////
public String getEntityTable() {
return "firewall_rules";
}
public Long getIpAddressId() {
return ipAddressId;
@ -301,6 +298,7 @@ public class CreatePortForwardingRuleCmd extends BaseAsyncCreateCmd implements P
try {
PortForwardingRule result = _rulesService.createPortForwardingRule(this, virtualMachineId, getOpenFirewall());
setEntityId(result.getId());
setEntityUuid(result.getUuid());
} catch (NetworkRuleConflictException ex) {
s_logger.info("Network rule conflict: " , ex);
s_logger.trace("Network Rule Conflict: ", ex);

View File

@ -91,9 +91,7 @@ public class CreateLBStickinessPolicyCmd extends BaseAsyncCreateCmd {
return paramList;
}
public String getEntityTable() {
return "firewall_rules";
}
// ///////////////////////////////////////////////////
// ///////////// API Implementation///////////////////
// ///////////////////////////////////////////////////
@ -141,6 +139,7 @@ public class CreateLBStickinessPolicyCmd extends BaseAsyncCreateCmd {
try {
StickinessPolicy result = _lbService.createLBStickinessPolicy(this);
this.setEntityId(result.getId());
this.setEntityUuid(result.getUuid());
} catch (NetworkRuleConflictException e) {
s_logger.warn("Exception: ", e);
throw new ServerApiException(BaseCmd.NETWORK_RULE_CONFLICT_ERROR, e.getMessage());

View File

@ -120,9 +120,6 @@ public class CreateLoadBalancerRuleCmd extends BaseAsyncCreateCmd /*implements
return privatePort;
}
public String getEntityTable() {
return "firewall_rules";
}
public Long getSourceIpAddressId() {
if (publicIpId != null) {
@ -283,6 +280,7 @@ public class CreateLoadBalancerRuleCmd extends BaseAsyncCreateCmd /*implements
try {
LoadBalancer result = _lbService.createLoadBalancerRule(this, getOpenFirewall());
this.setEntityId(result.getId());
this.setEntityUuid(result.getUuid());
} catch (NetworkRuleConflictException e) {
s_logger.warn("Exception: ", e);
throw new ServerApiException(BaseCmd.NETWORK_RULE_CONFLICT_ERROR, e.getMessage());

View File

@ -75,9 +75,6 @@ public class CreateIpForwardingRuleCmd extends BaseAsyncCreateCmd implements Sta
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "firewall_rules";
}
public Long getIpAddressId() {
return ipAddressId;
@ -151,6 +148,7 @@ public class CreateIpForwardingRuleCmd extends BaseAsyncCreateCmd implements Sta
try {
StaticNatRule rule = _rulesService.createStaticNatRule(this, getOpenFirewall());
this.setEntityId(rule.getId());
this.setEntityUuid(rule.getUuid());
} catch (NetworkRuleConflictException e) {
s_logger.info("Unable to create Static Nat Rule due to ", e);
throw new ServerApiException(BaseCmd.NETWORK_RULE_CONFLICT_ERROR, e.getMessage());

View File

@ -86,10 +86,6 @@ public class CreateNetworkACLCmd extends BaseAsyncCreateCmd implements FirewallR
// ///////////////// Accessors ///////////////////////
// ///////////////////////////////////////////////////
public String getEntityTable() {
return "firewall_rules";
}
public Long getIpAddressId() {
return null;
}
@ -262,6 +258,7 @@ public class CreateNetworkACLCmd extends BaseAsyncCreateCmd implements FirewallR
try {
FirewallRule result = _networkACLService.createNetworkACL(this);
setEntityId(result.getId());
setEntityUuid(result.getUuid());
} catch (NetworkRuleConflictException ex) {
s_logger.info("Network rule conflict: " + ex.getMessage());
s_logger.trace("Network Rule Conflict: ", ex);

View File

@ -56,9 +56,6 @@ public class CreateProjectCmd extends BaseAsyncCreateCmd {
// ///////////////// Accessors ///////////////////////
// ///////////////////////////////////////////////////
public String getEntityTable() {
return "projects";
}
public String getAccountName() {
if (accountName != null) {
@ -127,6 +124,7 @@ public class CreateProjectCmd extends BaseAsyncCreateCmd {
Project project = _projectService.createProject(getName(), getDisplayText(), getAccountName(), getDomainId());
if (project != null) {
this.setEntityId(project.getId());
this.setEntityUuid(project.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create a project");
}

View File

@ -65,9 +65,6 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
// ///////////////// Accessors ///////////////////////
// ///////////////////////////////////////////////////
public String getEntityTable() {
return "snapshots";
}
public String getAccountName() {
return accountName;
@ -153,6 +150,7 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
Snapshot snapshot = _snapshotService.allocSnapshot(getVolumeId(), getPolicyId());
if (snapshot != null) {
this.setEntityId(snapshot.getId());
this.setEntityUuid(snapshot.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create snapshot");
}

View File

@ -102,9 +102,6 @@ import com.cloud.user.UserContext;
// ///////////////// Accessors ///////////////////////
// ///////////////////////////////////////////////////
public String getEntityTable() {
return "vm_template";
}
public Integer getBits() {
return bits;
@ -240,13 +237,15 @@ import com.cloud.user.UserContext;
public void create() throws ResourceAllocationException {
if (isBareMetal()) {
_bareMetalVmService.createPrivateTemplateRecord(this, _accountService.getAccount(getEntityOwnerId()));
/*Baremetal creates template record after taking image proceeded, use vmId as entity id here*/
/*Baremetal creates template record after taking image proceeded, use vmId as entity id and uuid here*/
this.setEntityId(vmId);
this.setEntityUuid(vmId.toString());
} else {
VirtualMachineTemplate template = null;
template = _userVmService.createPrivateTemplateRecord(this, _accountService.getAccount(getEntityOwnerId()));
if (template != null) {
this.setEntityId(template.getId());
this.setEntityUuid(template.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR,
"Failed to create a template");

View File

@ -147,10 +147,9 @@ public class DeployVMCmd extends BaseAsyncCreateCmd {
private List<String> securityGroupNameList;
@ACL(checkKeyAccess=true)
@Parameter(name = ApiConstants.IP_NETWORK_LIST, type = CommandType.MAP, entityType={Network.class,IpAddress.class},
@Parameter(name = ApiConstants.IP_NETWORK_LIST, type = CommandType.MAP, entityType={Network.class, IpAddress.class},
description = "ip to network mapping. Can't be specified with networkIds parameter." +
" Example: iptonetworklist[0].ip=10.10.10.11&iptonetworklist[0].networkid=204 - requests to" +
" use ip 10.10.10.11 in network id=204")
" Example: iptonetworklist[0].ip=10.10.10.11&iptonetworklist[0].networkid=uuid - requests to use ip 10.10.10.11 in network id=uuid")
private Map ipToNetworkList;
@Parameter(name=ApiConstants.IP_ADDRESS, type=CommandType.STRING, description="the ip address for default vm's network")
@ -171,9 +170,6 @@ public class DeployVMCmd extends BaseAsyncCreateCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "vm_instance";
}
public String getAccountName() {
if (accountName == null) {
@ -287,7 +283,17 @@ public class DeployVMCmd extends BaseAsyncCreateCmd {
Iterator iter = ipsCollection.iterator();
while (iter.hasNext()) {
HashMap<String, String> ips = (HashMap<String, String>) iter.next();
Long networkId = Long.valueOf(_responseGenerator.getIdentiyId("networks", ips.get("networkid")));
Long networkId;
Network network = _networkService.getNetwork(ips.get("networkid"));
if (network != null) {
networkId = network.getId();
} else {
try {
networkId = Long.parseLong(ips.get("networkid"));
} catch(NumberFormatException e) {
throw new InvalidParameterValueException("Unable to translate and find entity with networkId: " + ips.get("networkid"));
}
}
String requestedIp = (String) ips.get("ip");
ipToNetworkMap.put(networkId, requestedIp);
}
@ -446,6 +452,7 @@ public class DeployVMCmd extends BaseAsyncCreateCmd {
if (vm != null) {
setEntityId(vm.getId());
setEntityUuid(vm.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to deploy vm");
}

View File

@ -76,9 +76,6 @@ public class CreateVolumeCmd extends BaseAsyncCreateCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "volumes";
}
public String getAccountName() {
return accountName;
@ -154,6 +151,7 @@ public class CreateVolumeCmd extends BaseAsyncCreateCmd {
Volume volume = _storageService.allocVolume(this);
if (volume != null) {
this.setEntityId(volume.getId());
this.setEntityUuid(volume.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create volume");
}

View File

@ -137,7 +137,7 @@ public class ExtractVolumeCmd extends BaseAsyncCmd {
Volume vol = _entityMgr.findById(Volume.class, id);
response.setId(vol.getUuid());
response.setName(vol.getName());
DataCenter zone = _entityMgr.findById(DataCenter.class, id);
DataCenter zone = _entityMgr.findById(DataCenter.class, zoneId);
response.setZoneId(zone.getUuid());
response.setZoneName(zone.getName());
response.setMode(mode);

View File

@ -67,6 +67,7 @@ public class CreateStaticRouteCmd extends BaseAsyncCreateCmd{
try {
StaticRoute result = _vpcService.createStaticRoute(getGatewayId(), getCidr());
setEntityId(result.getId());
setEntityUuid(result.getUuid());
} catch (NetworkRuleConflictException ex) {
s_logger.info("Network rule conflict: " + ex.getMessage());
s_logger.trace("Network rule conflict: ", ex);
@ -74,10 +75,6 @@ public class CreateStaticRouteCmd extends BaseAsyncCreateCmd{
}
}
@Override
public String getEntityTable() {
return "static_routes";
}
@Override
public String getEventType() {

View File

@ -124,6 +124,7 @@ public class CreateVPCCmd extends BaseAsyncCreateCmd{
getCidr(), getNetworkDomain());
if (vpc != null) {
this.setEntityId(vpc.getId());
this.setEntityUuid(vpc.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create a VPC");
}
@ -157,11 +158,6 @@ public class CreateVPCCmd extends BaseAsyncCreateCmd{
}
}
@Override
public String getEntityTable() {
return "vpc";
}
@Override
public String getEventType() {

View File

@ -103,9 +103,6 @@ public class AddVpnUserCmd extends BaseAsyncCreateCmd {
return accountId;
}
public String getEntityTable() {
return "vpn_users";
}
@Override
public String getEventDescription() {
@ -150,5 +147,6 @@ public class AddVpnUserCmd extends BaseAsyncCreateCmd {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to add vpn user");
}
setEntityId(vpnUser.getId());
setEntityUuid(vpnUser.getUuid());
}
}

View File

@ -62,10 +62,6 @@ public class CreateRemoteAccessVpnCmd extends BaseAsyncCreateCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "user_ip_address";
}
public Long getPublicIpId() {
return publicIpId;
}
@ -146,6 +142,11 @@ public class CreateRemoteAccessVpnCmd extends BaseAsyncCreateCmd {
RemoteAccessVpn vpn = _ravService.createRemoteAccessVpn(publicIpId, ipRange, getOpenFirewall(), getNetworkId());
if (vpn != null) {
this.setEntityId(vpn.getServerAddressId());
// find uuid for server ip address
IpAddress ipAddr = _entityMgr.findById(IpAddress.class, vpn.getServerAddressId());
if (ipAddr != null) {
this.setEntityUuid(ipAddr.getUuid());
}
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create remote access vpn");
}

View File

@ -51,9 +51,6 @@ public class CreateVpnConnectionCmd extends BaseAsyncCreateCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "s2s_vpn_connection";
}
public Long getVpnGatewayId() {
return vpnGatewayId;
@ -95,6 +92,7 @@ public class CreateVpnConnectionCmd extends BaseAsyncCreateCmd {
Site2SiteVpnConnection conn = _s2sVpnService.createVpnConnection(this);
if (conn != null) {
this.setEntityId(conn.getId());
this.setEntityUuid(conn.getUuid());
} else {
throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to create site to site vpn connection");
}

View File

@ -78,9 +78,6 @@ public class CreateVpnCustomerGatewayCmd extends BaseAsyncCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "s2s_customer_gateway";
}
public String getName() {
return name;

View File

@ -47,10 +47,6 @@ public class CreateVpnGatewayCmd extends BaseAsyncCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "s2s_vpn_gateway";
}
public Long getVpcId() {
return vpcId;
}

View File

@ -44,9 +44,6 @@ public class DeleteVpnConnectionCmd extends BaseAsyncCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "s2s_vpn_connection";
}
public Long getId() {
return id;

View File

@ -43,9 +43,6 @@ public class DeleteVpnCustomerGatewayCmd extends BaseAsyncCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "s2s_customer_gateway";
}
public Long getId() {
return id;

View File

@ -43,9 +43,6 @@ public class DeleteVpnGatewayCmd extends BaseAsyncCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "s2s_vpn_gateway";
}
public Long getId() {
return id;

View File

@ -53,9 +53,6 @@ public class ResetVpnConnectionCmd extends BaseAsyncCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "s2s_vpn_connection";
}
public Long getDomainId() {
return domainId;

View File

@ -78,11 +78,7 @@ public class UpdateVpnCustomerGatewayCmd extends BaseAsyncCmd {
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public String getEntityTable() {
return "s2s_customer_gateway";
}
public Long getId() {
public Long getId() {
return id;
}

View File

@ -17,7 +17,6 @@
package org.apache.cloudstack.api.response;
import org.apache.cloudstack.api.ApiConstants;
import com.cloud.utils.IdentityProxy;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.BaseResponse;

View File

@ -16,24 +16,16 @@
// under the License.
package org.apache.cloudstack.api.response;
import org.apache.cloudstack.api.ApiConstants;
import com.cloud.utils.IdentityProxy;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.BaseResponse;
public class CreateCmdResponse extends BaseResponse {
@SerializedName(ApiConstants.ID)
private IdentityProxy id = new IdentityProxy();
private String id;
public Long getId() {
return id.getValue();
public String getId() {
return id;
}
public void setId(Long id) {
this.id.setValue(id);
}
public void setIdEntityTable(String entityTable) {
this.id.setTableName(entityTable);
public void setId(String id) {
this.id = id;
}
}

View File

@ -17,7 +17,6 @@
package org.apache.cloudstack.api.response;
import org.apache.cloudstack.api.ApiConstants;
import com.cloud.utils.IdentityProxy;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.BaseResponse;

View File

@ -19,7 +19,6 @@
package org.apache.cloudstack.api.response;
import com.cloud.serializer.Param;
import com.cloud.utils.IdentityProxy;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.BaseResponse;
@ -29,7 +28,7 @@ public class S3Response extends BaseResponse {
@SerializedName(ID)
@Param(description = "The ID of the S3 configuration")
private IdentityProxy id = new IdentityProxy("s3");
private String id;
@SerializedName(S3_ACCESS_KEY)
@Param(description = "The S3 access key")
@ -135,11 +134,11 @@ public class S3Response extends BaseResponse {
@Override
public String getObjectId() {
return this.id.getValue().toString();
return this.id;
}
public void setObjectId(Long id) {
this.id.setValue(id);
public void setObjectId(String id) {
this.id = id;
}
public String getAccessKey() {

View File

@ -135,8 +135,8 @@ public class TemplateResponse extends BaseResponse implements ControlledEntityRe
@SerializedName(ApiConstants.TAGS) @Param(description="the list of resource tags associated with tempate", responseObject = ResourceTagResponse.class)
private List<ResourceTagResponse> tags;
@SerializedName(ApiConstants.SSHKEY_ENABLED) @Param(description="true if template is sshkey enabled, false otherwise")
private Boolean sshKeyEnabled;
@Override
public String getObjectId() {
@ -290,4 +290,9 @@ public class TemplateResponse extends BaseResponse implements ControlledEntityRe
public void setTags(List<ResourceTagResponse> tags) {
this.tags = tags;
}
public void setSshKeyEnabled(boolean sshKeyEnabled) {
this.sshKeyEnabled = sshKeyEnabled;
}
}

View File

@ -16,142 +16,147 @@
// under the License.
package com.cloud.bridge.auth.ec2;
import org.apache.axiom.soap.SOAPEnvelope;
import org.apache.log4j.Logger;
import org.apache.axis2.context.MessageContext;
import org.apache.axis2.engine.Handler;
import org.apache.axis2.AxisFault;
import org.apache.axis2.description.HandlerDescription;
import org.apache.axis2.description.Parameter;
import org.apache.commons.codec.binary.Base64;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.security.cert.Certificate;
import java.security.cert.CertificateFactory;
import javax.inject.Inject;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.axiom.soap.SOAPEnvelope;
import org.apache.axis2.AxisFault;
import org.apache.axis2.context.MessageContext;
import org.apache.axis2.description.HandlerDescription;
import org.apache.axis2.description.Parameter;
import org.apache.axis2.engine.Handler;
import org.apache.commons.codec.binary.Base64;
import org.apache.log4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import java.security.cert.Certificate;
import java.security.cert.CertificateFactory;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import com.cloud.bridge.model.UserCredentialsVO;
import com.cloud.bridge.persist.dao.UserCredentialsDaoImpl;
import com.cloud.bridge.persist.dao.UserCredentialsDao;
import com.cloud.bridge.service.UserContext;
import com.cloud.bridge.util.AuthenticationUtils;
import com.cloud.utils.component.ComponentLocator;
public class AuthenticationHandler implements Handler {
protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class);
protected final UserCredentialsDaoImpl ucDao = ComponentLocator.inject(UserCredentialsDaoImpl.class);
private DocumentBuilderFactory dbf = null;
protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class);
@Inject protected UserCredentialsDao ucDao;
private DocumentBuilderFactory dbf = null;
protected HandlerDescription handlerDesc = new HandlerDescription( "EC2AuthenticationHandler" );
private String name = "EC2AuthenticationHandler";
public void init( HandlerDescription handlerdesc )
{
dbf = DocumentBuilderFactory.newInstance();
dbf.setNamespaceAware( true );
protected HandlerDescription handlerDesc = new HandlerDescription( "EC2AuthenticationHandler" );
private String name = "EC2AuthenticationHandler";
this.handlerDesc = handlerdesc;
}
public String getName()
{
return name;
}
@Override
public void init( HandlerDescription handlerdesc )
{
dbf = DocumentBuilderFactory.newInstance();
dbf.setNamespaceAware( true );
public String toString()
{
return (name != null) ? name.toString() : null;
}
public HandlerDescription getHandlerDesc()
{
return handlerDesc;
}
public Parameter getParameter( String name )
{
return handlerDesc.getParameter( name );
}
/**
* For EC2 SOAP calls this function's goal is to extract the X509 certificate that is
* part of the WS-Security wrapped SOAP request. We need the cert in order to
* map it to the user's Cloud API key and Cloud Secret Key.
*/
public InvocationResponse invoke(MessageContext msgContext) throws AxisFault
{
// -> the certificate we want is embedded into the soap header
try
{ SOAPEnvelope soapEnvelope = msgContext.getEnvelope();
String xmlHeader = soapEnvelope.toString();
//System.out.println( "entire request: " + xmlHeader );
InputStream is = new ByteArrayInputStream( xmlHeader.getBytes("UTF-8"));
DocumentBuilder db = dbf.newDocumentBuilder();
Document request = db.parse( is );
NodeList certs = request.getElementsByTagNameNS( "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd", "BinarySecurityToken" );
if (0 < certs.getLength()) {
Node item = certs.item(0);
String result = new String( item.getFirstChild().getNodeValue());
byte[] certBytes = Base64.decodeBase64( result.getBytes());
this.handlerDesc = handlerdesc;
}
Certificate userCert = null;
CertificateFactory cf = CertificateFactory.getInstance( "X.509" );
ByteArrayInputStream bs = new ByteArrayInputStream( certBytes );
while (bs.available() > 0) userCert = cf.generateCertificate(bs);
//System.out.println( "cert: " + userCert.toString());
String uniqueId = AuthenticationUtils.X509CertUniqueId( userCert );
logger.debug( "X509 cert's uniqueId: " + uniqueId );
// -> find the Cloud API key and the secret key from the cert's uniqueId
/* UserCredentialsDao credentialDao = new UserCredentialsDao();
@Override
public String getName()
{
return name;
}
@Override
public String toString()
{
return (name != null) ? name.toString() : null;
}
@Override
public HandlerDescription getHandlerDesc()
{
return handlerDesc;
}
@Override
public Parameter getParameter( String name )
{
return handlerDesc.getParameter( name );
}
/**
* For EC2 SOAP calls this function's goal is to extract the X509 certificate that is
* part of the WS-Security wrapped SOAP request. We need the cert in order to
* map it to the user's Cloud API key and Cloud Secret Key.
*/
@Override
public InvocationResponse invoke(MessageContext msgContext) throws AxisFault
{
// -> the certificate we want is embedded into the soap header
try
{ SOAPEnvelope soapEnvelope = msgContext.getEnvelope();
String xmlHeader = soapEnvelope.toString();
//System.out.println( "entire request: " + xmlHeader );
InputStream is = new ByteArrayInputStream( xmlHeader.getBytes("UTF-8"));
DocumentBuilder db = dbf.newDocumentBuilder();
Document request = db.parse( is );
NodeList certs = request.getElementsByTagNameNS( "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd", "BinarySecurityToken" );
if (0 < certs.getLength()) {
Node item = certs.item(0);
String result = new String( item.getFirstChild().getNodeValue());
byte[] certBytes = Base64.decodeBase64( result.getBytes());
Certificate userCert = null;
CertificateFactory cf = CertificateFactory.getInstance( "X.509" );
ByteArrayInputStream bs = new ByteArrayInputStream( certBytes );
while (bs.available() > 0) userCert = cf.generateCertificate(bs);
//System.out.println( "cert: " + userCert.toString());
String uniqueId = AuthenticationUtils.X509CertUniqueId( userCert );
logger.debug( "X509 cert's uniqueId: " + uniqueId );
// -> find the Cloud API key and the secret key from the cert's uniqueId
/* UserCredentialsDao credentialDao = new UserCredentialsDao();
UserCredentials cloudKeys = credentialDao.getByCertUniqueId( uniqueId );
*/
UserCredentialsVO cloudKeys = ucDao.getByCertUniqueId(uniqueId);
if ( null == cloudKeys ) {
logger.error( "Cert does not map to Cloud API keys: " + uniqueId );
throw new AxisFault( "User not properly registered: Certificate does not map to Cloud API Keys", "Client.Blocked" );
}
else UserContext.current().initContext( cloudKeys.getAccessKey(), cloudKeys.getSecretKey(), cloudKeys.getAccessKey(), "SOAP Request", null );
//System.out.println( "end of cert match: " + UserContext.current().getSecretKey());
}
}
catch (AxisFault e) {
throw e;
}
catch( Exception e ) {
*/
UserCredentialsVO cloudKeys = ucDao.getByCertUniqueId(uniqueId);
if ( null == cloudKeys ) {
logger.error( "Cert does not map to Cloud API keys: " + uniqueId );
throw new AxisFault( "User not properly registered: Certificate does not map to Cloud API Keys", "Client.Blocked" );
}
else UserContext.current().initContext( cloudKeys.getAccessKey(), cloudKeys.getSecretKey(), cloudKeys.getAccessKey(), "SOAP Request", null );
//System.out.println( "end of cert match: " + UserContext.current().getSecretKey());
}
}
catch (AxisFault e) {
throw e;
}
catch( Exception e ) {
logger.error("EC2 Authentication Handler: ", e);
throw new AxisFault( "An unknown error occurred.", "Server.InternalError" );
}
throw new AxisFault( "An unknown error occurred.", "Server.InternalError" );
}
return InvocationResponse.CONTINUE;
}
}
public void revoke(MessageContext msgContext)
{
logger.info(msgContext.getEnvelope().toString());
}
public void setName(String name)
{
this.name = name;
}
@Override
public void cleanup()
{
}
public void revoke(MessageContext msgContext)
{
logger.info(msgContext.getEnvelope().toString());
}
@Override
public void flowComplete( MessageContext arg0 )
{
}
public void setName(String name)
{
this.name = name;
}
@Override
public void cleanup()
{
}
@Override
public void flowComplete( MessageContext arg0 )
{
}
}

View File

@ -18,197 +18,203 @@ package com.cloud.bridge.auth.s3;
import java.sql.SQLException;
import javax.inject.Inject;
import javax.servlet.http.HttpServletRequest;
import org.apache.axiom.soap.SOAPEnvelope;
import org.apache.axiom.soap.SOAPBody;
import org.apache.log4j.Logger;
import org.apache.axis2.context.MessageContext;
import org.apache.axis2.engine.Handler;
import org.apache.axiom.soap.SOAPEnvelope;
import org.apache.axis2.AxisFault;
import org.apache.axis2.description.HandlerDescription;
import org.apache.axis2.context.MessageContext;
import org.apache.axis2.description.HandlerDescription;
import org.apache.axis2.description.Parameter;
import org.apache.axis2.engine.Handler;
import org.apache.log4j.Logger;
import com.cloud.bridge.model.UserCredentialsVO;
import com.cloud.bridge.persist.dao.UserCredentialsDaoImpl;
import com.cloud.bridge.service.UserContext;
import com.cloud.bridge.util.S3SoapAuth;
import com.cloud.utils.component.ComponentLocator;
/*
* For SOAP compatibility.
*/
public class AuthenticationHandler implements Handler {
protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class);
protected final UserCredentialsDaoImpl ucDao = ComponentLocator.inject(UserCredentialsDaoImpl.class);
protected HandlerDescription handlerDesc = new HandlerDescription( "default handler" );
private String name = "S3AuthenticationHandler";
public void init( HandlerDescription handlerdesc )
{
this.handlerDesc = handlerdesc;
}
public String getName()
{
//logger.debug( "getName entry S3AuthenticationHandler" + name );
return name;
}
protected final static Logger logger = Logger.getLogger(AuthenticationHandler.class);
@Inject UserCredentialsDaoImpl ucDao;
protected HandlerDescription handlerDesc = new HandlerDescription( "default handler" );
private String name = "S3AuthenticationHandler";
public String toString()
{
return (name != null) ? name.toString() : null;
}
public HandlerDescription getHandlerDesc()
{
return handlerDesc;
}
public Parameter getParameter( String name )
{
return handlerDesc.getParameter( name );
}
/**
* Verify the request's authentication signature by extracting all the
* necessary parts of the request, obtaining the requestor's secret key, and
* recalculating the signature.
*
* On Signature mismatch raise an AxisFault (i.e., a SoapFault) with what Amazon S3
* defines as a "Client.SignatureMismatch" error.
*
* Special case: need to deal with anonymous requests where no AWSAccessKeyId is
* given. In this case just pass the request on.
*/
public InvocationResponse invoke(MessageContext msgContext) throws AxisFault
{
String accessKey = null;
String operation = null;
String msgSig = null;
String timestamp = null;
String secretKey = null;
String temp = null;
// [A] Obtain the HttpServletRequest object
HttpServletRequest httpObj =(HttpServletRequest)msgContext.getProperty("transport.http.servletRequest");
if (null != httpObj) System.out.println("S3 SOAP auth test header access - acceptable Encoding type: "+ httpObj.getHeader("Accept-Encoding"));
// [A] Try to recalculate the signature for non-anonymous requests
try
{ SOAPEnvelope soapEnvelope = msgContext.getEnvelope();
SOAPBody soapBody = soapEnvelope.getBody();
String xmlBody = soapBody.toString();
//logger.debug( "xmlrequest: " + xmlBody );
// -> did we get here yet its an EC2 request?
int offset = xmlBody.indexOf( "http://ec2.amazonaws.com" );
if (-1 != offset) return InvocationResponse.CONTINUE;
// -> if it is anonymous request, then no access key should exist
int start = xmlBody.indexOf( "AWSAccessKeyId>" );
if (-1 == start) {
UserContext.current().initContext();
return InvocationResponse.CONTINUE;
}
temp = xmlBody.substring( start+15 );
int end = temp.indexOf( "</" );
accessKey = temp.substring( 0, end );
//logger.debug( "accesskey " + accessKey );
// -> what if we cannot find the user's key?
if (null != (secretKey = lookupSecretKey( accessKey )))
@Override
public void init( HandlerDescription handlerdesc )
{
this.handlerDesc = handlerdesc;
}
@Override
public String getName()
{
//logger.debug( "getName entry S3AuthenticationHandler" + name );
return name;
}
@Override
public String toString()
{
return (name != null) ? name.toString() : null;
}
@Override
public HandlerDescription getHandlerDesc()
{
return handlerDesc;
}
@Override
public Parameter getParameter( String name )
{
return handlerDesc.getParameter( name );
}
/**
* Verify the request's authentication signature by extracting all the
* necessary parts of the request, obtaining the requestor's secret key, and
* recalculating the signature.
*
* On Signature mismatch raise an AxisFault (i.e., a SoapFault) with what Amazon S3
* defines as a "Client.SignatureMismatch" error.
*
* Special case: need to deal with anonymous requests where no AWSAccessKeyId is
* given. In this case just pass the request on.
*/
@Override
public InvocationResponse invoke(MessageContext msgContext) throws AxisFault
{
String accessKey = null;
String operation = null;
String msgSig = null;
String timestamp = null;
String secretKey = null;
String temp = null;
// [A] Obtain the HttpServletRequest object
HttpServletRequest httpObj =(HttpServletRequest)msgContext.getProperty("transport.http.servletRequest");
if (null != httpObj) System.out.println("S3 SOAP auth test header access - acceptable Encoding type: "+ httpObj.getHeader("Accept-Encoding"));
// [A] Try to recalculate the signature for non-anonymous requests
try
{ SOAPEnvelope soapEnvelope = msgContext.getEnvelope();
SOAPBody soapBody = soapEnvelope.getBody();
String xmlBody = soapBody.toString();
//logger.debug( "xmlrequest: " + xmlBody );
// -> did we get here yet its an EC2 request?
int offset = xmlBody.indexOf( "http://ec2.amazonaws.com" );
if (-1 != offset) return InvocationResponse.CONTINUE;
// -> if it is anonymous request, then no access key should exist
int start = xmlBody.indexOf( "AWSAccessKeyId>" );
if (-1 == start) {
UserContext.current().initContext();
return InvocationResponse.CONTINUE;
}
temp = xmlBody.substring( start+15 );
int end = temp.indexOf( "</" );
accessKey = temp.substring( 0, end );
//logger.debug( "accesskey " + accessKey );
// -> what if we cannot find the user's key?
if (null != (secretKey = lookupSecretKey( accessKey )))
{
// -> if any other field is missing, then the signature will not match
if ( null != (operation = soapBody.getFirstElementLocalName()))
operation = operation.trim();
else operation = "";
//logger.debug( "operation " + operation );
start = xmlBody.indexOf( "Timestamp>" );
if ( -1 < start )
{
// -> if any other field is missing, then the signature will not match
if ( null != (operation = soapBody.getFirstElementLocalName()))
operation = operation.trim();
else operation = "";
//logger.debug( "operation " + operation );
start = xmlBody.indexOf( "Timestamp>" );
if ( -1 < start )
{
temp = xmlBody.substring( start+10 );
end = temp.indexOf( "</" );
timestamp = temp.substring( 0, end );
//logger.debug( "timestamp " + timestamp );
}
else timestamp = "";
start = xmlBody.indexOf( "Signature>" );
if ( -1 < start )
{
temp = xmlBody.substring( start+10 );
end = temp.indexOf( "</" );
msgSig = temp.substring( 0, end );
//logger.debug( "signature " + msgSig );
}
else msgSig = "";
temp = xmlBody.substring( start+10 );
end = temp.indexOf( "</" );
timestamp = temp.substring( 0, end );
//logger.debug( "timestamp " + timestamp );
}
}
catch( Exception e )
{
else timestamp = "";
start = xmlBody.indexOf( "Signature>" );
if ( -1 < start )
{
temp = xmlBody.substring( start+10 );
end = temp.indexOf( "</" );
msgSig = temp.substring( 0, end );
//logger.debug( "signature " + msgSig );
}
else msgSig = "";
}
}
catch( Exception e )
{
logger.error("Signature calculation failed due to: ", e);
throw new AxisFault( e.toString(), "Server.InternalError" );
}
// [B] Verify that the given signature matches what we calculated here
if (null == secretKey)
{
logger.error( "Unknown AWSAccessKeyId: [" + accessKey + "]" );
throw new AxisFault( "Unknown AWSAccessKeyId: [" + accessKey + "]", "Client.InvalidAccessKeyId" );
}
throw new AxisFault( e.toString(), "Server.InternalError" );
}
// [B] Verify that the given signature matches what we calculated here
if (null == secretKey)
{
logger.error( "Unknown AWSAccessKeyId: [" + accessKey + "]" );
throw new AxisFault( "Unknown AWSAccessKeyId: [" + accessKey + "]", "Client.InvalidAccessKeyId" );
}
// -> for SOAP requests the Cloud API keys are sent here and only here
S3SoapAuth.verifySignature( msgSig, operation, timestamp, accessKey, secretKey );
S3SoapAuth.verifySignature( msgSig, operation, timestamp, accessKey, secretKey );
UserContext.current().initContext( accessKey, secretKey, accessKey, "S3 SOAP request", httpObj );
return InvocationResponse.CONTINUE;
}
}
public void revoke(MessageContext msgContext)
{
logger.info(msgContext.getEnvelope().toString());
}
public void setName(String name)
{
//logger.debug( "setName entry S3AuthenticationHandler " + name );
this.name = name;
}
/**
* Given the user's access key, then obtain his secret key in the user database.
*
* @param accessKey - a unique string allocated for each registered user
* @return the secret key or null of no matching user found
*/
private String lookupSecretKey( String accessKey )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
UserCredentialsVO cloudKeys = ucDao.getByAccessKey( accessKey );
if ( null == cloudKeys ) {
logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" );
return null;
}
else return cloudKeys.getSecretKey();
}
public void revoke(MessageContext msgContext)
{
logger.info(msgContext.getEnvelope().toString());
}
@Override
public void cleanup()
{
//logger.debug( "cleanup entry S3AuthenticationHandler " );
}
public void setName(String name)
{
//logger.debug( "setName entry S3AuthenticationHandler " + name );
this.name = name;
}
@Override
public void flowComplete( MessageContext arg0 )
{
//logger.debug( "flowComplete entry S3AuthenticationHandler " );
}
/**
* Given the user's access key, then obtain his secret key in the user database.
*
* @param accessKey - a unique string allocated for each registered user
* @return the secret key or null of no matching user found
*/
private String lookupSecretKey( String accessKey )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
UserCredentialsVO cloudKeys = ucDao.getByAccessKey( accessKey );
if ( null == cloudKeys ) {
logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" );
return null;
}
else return cloudKeys.getSecretKey();
}
@Override
public void cleanup()
{
//logger.debug( "cleanup entry S3AuthenticationHandler " );
}
@Override
public void flowComplete( MessageContext arg0 )
{
//logger.debug( "flowComplete entry S3AuthenticationHandler " );
}
}

View File

@ -23,7 +23,6 @@ import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.bridge.model.BucketPolicyVO;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
@ -33,43 +32,43 @@ import com.cloud.utils.db.Transaction;
@Local(value={BucketPolicyDao.class})
public class BucketPolicyDaoImpl extends GenericDaoBase<BucketPolicyVO, Long> implements BucketPolicyDao{
public static final Logger logger = Logger.getLogger(BucketPolicyDaoImpl.class);
public BucketPolicyDaoImpl(){ }
public BucketPolicyDaoImpl(){ }
/**
* Since a bucket policy can exist before its bucket we also need to keep the policy's owner
* so we can restrict who modifies it (because of the "s3:CreateBucket" action).
*/
@Override
public BucketPolicyVO getByName( String bucketName ) {
SearchBuilder <BucketPolicyVO> searchByBucket = createSearchBuilder();
searchByBucket.and("BucketName", searchByBucket.entity().getBucketName(), SearchCriteria.Op.EQ);
Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
/**
* Since a bucket policy can exist before its bucket we also need to keep the policy's owner
* so we can restrict who modifies it (because of the "s3:CreateBucket" action).
*/
@Override
public BucketPolicyVO getByName( String bucketName ) {
SearchBuilder <BucketPolicyVO> searchByBucket = createSearchBuilder();
searchByBucket.and("BucketName", searchByBucket.entity().getBucketName(), SearchCriteria.Op.EQ);
Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
try {
txn.start();
SearchCriteria<BucketPolicyVO> sc = searchByBucket.create();
sc.setParameters("BucketName", bucketName);
return findOneBy(sc);
}finally {
txn.close();
}
}
@Override
public void deletePolicy( String bucketName ) {
SearchBuilder <BucketPolicyVO> deleteByBucket = createSearchBuilder();
deleteByBucket.and("BucketName", deleteByBucket.entity().getBucketName(), SearchCriteria.Op.EQ);
Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
try {
}finally {
txn.close();
}
}
@Override
public void deletePolicy( String bucketName ) {
SearchBuilder <BucketPolicyVO> deleteByBucket = createSearchBuilder();
deleteByBucket.and("BucketName", deleteByBucket.entity().getBucketName(), SearchCriteria.Op.EQ);
Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
try {
txn.start();
SearchCriteria<BucketPolicyVO> sc = deleteByBucket.create();
sc.setParameters("BucketName", bucketName);
remove(sc);
}finally {
txn.close();
}
}
}finally {
txn.close();
}
}
}

View File

@ -16,18 +16,12 @@
// under the License.
package com.cloud.bridge.persist.dao;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.bridge.model.CloudStackConfigurationVO;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
@ -37,31 +31,31 @@ import com.cloud.utils.db.Transaction;
@Component
@Local(value={CloudStackConfigurationDao.class})
public class CloudStackConfigurationDaoImpl extends GenericDaoBase<CloudStackConfigurationVO, String> implements CloudStackConfigurationDao {
private static final Logger s_logger = Logger.getLogger(CloudStackConfigurationDaoImpl.class);
final SearchBuilder<CloudStackConfigurationVO> NameSearch= createSearchBuilder();
public CloudStackConfigurationDaoImpl() { }
@Override
@DB
public String getConfigValue(String name) {
private static final Logger s_logger = Logger.getLogger(CloudStackConfigurationDaoImpl.class);
final SearchBuilder<CloudStackConfigurationVO> NameSearch= createSearchBuilder();
public CloudStackConfigurationDaoImpl() { }
@Override
@DB
public String getConfigValue(String name) {
NameSearch.and("name", NameSearch.entity().getName(), SearchCriteria.Op.EQ);
Transaction txn = Transaction.currentTxn();
try {
txn.start();
SearchCriteria<CloudStackConfigurationVO> sc = NameSearch.create();
sc.setParameters("name", name);
CloudStackConfigurationVO configItem = findOneBy(sc);
if (configItem == null) {
s_logger.warn("No configuration item found with name " + name);
return null;
}
return configItem.getValue();
try {
txn.start();
SearchCriteria<CloudStackConfigurationVO> sc = NameSearch.create();
sc.setParameters("name", name);
CloudStackConfigurationVO configItem = findOneBy(sc);
if (configItem == null) {
s_logger.warn("No configuration item found with name " + name);
return null;
}
return configItem.getValue();
}finally {
}
}
}
}
}

View File

@ -16,21 +16,13 @@
// under the License.
package com.cloud.bridge.persist.dao;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.List;
import java.util.Properties;
import javax.inject.Inject;
import org.apache.log4j.Logger;
@ -40,71 +32,69 @@ import com.cloud.bridge.model.MultipartMetaVO;
import com.cloud.bridge.service.core.s3.S3MetaDataEntry;
import com.cloud.bridge.service.core.s3.S3MultipartPart;
import com.cloud.bridge.service.core.s3.S3MultipartUpload;
import com.cloud.bridge.util.ConfigurationHelper;
import com.cloud.bridge.util.OrderedPair;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.db.Transaction;
public class MultipartLoadDao {
public static final Logger logger = Logger.getLogger(MultipartLoadDao.class);
protected final MultipartMetaDao mpartMetaDao = ComponentLocator.inject(MultipartMetaDaoImpl.class);
protected final MultiPartPartsDao mpartPartsDao = ComponentLocator.inject(MultiPartPartsDaoImpl.class);
protected final MultiPartUploadsDao mpartUploadDao = ComponentLocator.inject(MultiPartUploadsDaoImpl.class);
public MultipartLoadDao() {}
/**
* If a multipart upload exists with the uploadId value then return the non-null creators
* accessKey.
*
* @param uploadId
* @return creator of the multipart upload, and NameKey of upload
*/
public OrderedPair<String,String> multipartExits( int uploadId )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
return mpartUploadDao.multipartExits(uploadId);
}
/**
* The multipart upload was either successfully completed or was aborted. In either case, we need
* to remove all of its state from the tables. Note that we have cascade deletes so all tables with
* uploadId as a foreign key are automatically cleaned.
*
* @param uploadId
*
*/
public void deleteUpload( int uploadId ) {
mpartUploadDao.deleteUpload(uploadId);
}
/**
* The caller needs to know who initiated the multipart upload.
*
* @param uploadId
* @return the access key value defining the initiator
*/
public String getInitiator( int uploadId ) {
return mpartUploadDao.getAtrributeValue("AccessKey", uploadId);
}
/**
* Create a new "in-process" multipart upload entry to keep track of its state.
*
* @param accessKey
* @param bucketName
* @param key
* @param cannedAccess
*
* @return if positive its the uploadId to be returned to the client
*
*/
public int initiateUpload( String accessKey, String bucketName, String key, String cannedAccess, S3MetaDataEntry[] meta ) {
int uploadId = -1;
Transaction txn = null;
public static final Logger logger = Logger.getLogger(MultipartLoadDao.class);
@Inject MultipartMetaDao mpartMetaDao;
@Inject MultiPartPartsDao mpartPartsDao;
@Inject MultiPartUploadsDao mpartUploadDao;
public MultipartLoadDao() {}
/**
* If a multipart upload exists with the uploadId value then return the non-null creators
* accessKey.
*
* @param uploadId
* @return creator of the multipart upload, and NameKey of upload
*/
public OrderedPair<String,String> multipartExits( int uploadId )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
return mpartUploadDao.multipartExits(uploadId);
}
/**
* The multipart upload was either successfully completed or was aborted. In either case, we need
* to remove all of its state from the tables. Note that we have cascade deletes so all tables with
* uploadId as a foreign key are automatically cleaned.
*
* @param uploadId
*
*/
public void deleteUpload( int uploadId ) {
mpartUploadDao.deleteUpload(uploadId);
}
/**
* The caller needs to know who initiated the multipart upload.
*
* @param uploadId
* @return the access key value defining the initiator
*/
public String getInitiator( int uploadId ) {
return mpartUploadDao.getAtrributeValue("AccessKey", uploadId);
}
/**
* Create a new "in-process" multipart upload entry to keep track of its state.
*
* @param accessKey
* @param bucketName
* @param key
* @param cannedAccess
*
* @return if positive its the uploadId to be returned to the client
*
*/
public int initiateUpload( String accessKey, String bucketName, String key, String cannedAccess, S3MetaDataEntry[] meta ) {
int uploadId = -1;
Transaction txn = null;
try {
txn = Transaction.open(Transaction.AWSAPI_DB);
Date tod = new Date();
@ -126,26 +116,26 @@ public class MultipartLoadDao {
txn.commit();
}
}
return uploadId;
} finally {
txn.close();
}
}
/**
* Remember all the individual parts that make up the entire multipart upload so that once
* the upload is complete all the parts can be glued together into a single object. Note,
* the caller can over write an existing part.
*
* @param uploadId
* @param partNumber
* @param md5
* @param storedPath
* @param size
* @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
*/
public void savePart( int uploadId, int partNumber, String md5, String storedPath, int size ) {
/**
* Remember all the individual parts that make up the entire multipart upload so that once
* the upload is complete all the parts can be glued together into a single object. Note,
* the caller can over write an existing part.
*
* @param uploadId
* @param partNumber
* @param md5
* @param storedPath
* @param size
* @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
*/
public void savePart( int uploadId, int partNumber, String md5, String storedPath, int size ) {
try {
MultiPartPartsVO partVO = null;
@ -169,32 +159,32 @@ public class MultipartLoadDao {
} finally {
}
}
/**
* It is possible for there to be a null canned access policy defined.
* @param uploadId
* @return the value defined in the x-amz-acl header or null
*/
public String getCannedAccess( int uploadId ) {
return mpartUploadDao.getAtrributeValue("x_amz_acl", uploadId);
}
/**
* When the multipart are being composed into one object we need any meta data to be saved with
* the new re-constituted object.
*
* @param uploadId
* @return an array of S3MetaDataEntry (will be null if no meta values exist)
* @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
*/
public S3MetaDataEntry[] getMeta( int uploadId )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
List<S3MetaDataEntry> metaList = new ArrayList<S3MetaDataEntry>();
int count = 0;
List<MultipartMetaVO> metaVO;
/**
* It is possible for there to be a null canned access policy defined.
* @param uploadId
* @return the value defined in the x-amz-acl header or null
*/
public String getCannedAccess( int uploadId ) {
return mpartUploadDao.getAtrributeValue("x_amz_acl", uploadId);
}
/**
* When the multipart are being composed into one object we need any meta data to be saved with
* the new re-constituted object.
*
* @param uploadId
* @return an array of S3MetaDataEntry (will be null if no meta values exist)
* @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
*/
public S3MetaDataEntry[] getMeta( int uploadId )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
List<S3MetaDataEntry> metaList = new ArrayList<S3MetaDataEntry>();
int count = 0;
List<MultipartMetaVO> metaVO;
try {
metaVO = mpartMetaDao.getByUploadID(uploadId);
for (MultipartMetaVO multipartMetaVO : metaVO) {
S3MetaDataEntry oneMeta = new S3MetaDataEntry();
@ -203,42 +193,42 @@ public class MultipartLoadDao {
metaList.add( oneMeta );
count++;
}
if ( 0 == count )
return null;
else return metaList.toArray(new S3MetaDataEntry[0]);
} finally {
}
}
/**
* The result has to be ordered by key and if there is more than one identical key then all the
* identical keys are ordered by create time.
*
* @param bucketName
* @param maxParts
* @param prefix - can be null
* @param keyMarker - can be null
* @param uploadIdMarker - can be null, should only be defined if keyMarker is not-null
* @return OrderedPair<S3MultipartUpload[], isTruncated>
* @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
*/
public OrderedPair<S3MultipartUpload[],Boolean> getInitiatedUploads( String bucketName, int maxParts, String prefix, String keyMarker, String uploadIdMarker )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
S3MultipartUpload[] inProgress = new S3MultipartUpload[maxParts];
boolean isTruncated = false;
int i = 0;
int pos = 1;
List<MultiPartUploadsVO> uploadList;
// -> SQL like condition requires the '%' as a wildcard marker
if (null != prefix) prefix = prefix + "%";
}
/**
* The result has to be ordered by key and if there is more than one identical key then all the
* identical keys are ordered by create time.
*
* @param bucketName
* @param maxParts
* @param prefix - can be null
* @param keyMarker - can be null
* @param uploadIdMarker - can be null, should only be defined if keyMarker is not-null
* @return OrderedPair<S3MultipartUpload[], isTruncated>
* @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
*/
public OrderedPair<S3MultipartUpload[],Boolean> getInitiatedUploads( String bucketName, int maxParts, String prefix, String keyMarker, String uploadIdMarker )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
S3MultipartUpload[] inProgress = new S3MultipartUpload[maxParts];
boolean isTruncated = false;
int i = 0;
int pos = 1;
List<MultiPartUploadsVO> uploadList;
// -> SQL like condition requires the '%' as a wildcard marker
if (null != prefix) prefix = prefix + "%";
try {
uploadList = mpartUploadDao.getInitiatedUploads(bucketName, maxParts, prefix, keyMarker, uploadIdMarker);
uploadList = mpartUploadDao.getInitiatedUploads(bucketName, maxParts, prefix, keyMarker, uploadIdMarker);
for (MultiPartUploadsVO uploadsVO : uploadList) {
Calendar tod = Calendar.getInstance();
tod.setTime(uploadsVO.getCreateTime());
@ -258,33 +248,33 @@ public class MultipartLoadDao {
}finally {
}
}
/**
* Return info on a range of upload parts that have already been stored in disk.
* Note that parts can be uploaded in any order yet we must returned an ordered list
* of parts thus we use the "ORDERED BY" clause to sort the list.
*
* @param uploadId
* @param maxParts
* @param startAt
* @return an array of S3MultipartPart objects
* @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
*/
public S3MultipartPart[] getParts( int uploadId, int maxParts, int startAt )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
S3MultipartPart[] parts = new S3MultipartPart[maxParts];
int i = 0;
List<MultiPartPartsVO> partsVO;
try {
partsVO = mpartPartsDao.getParts(uploadId, startAt + maxParts + 1, startAt);
for (MultiPartPartsVO partVO : partsVO) {
}
/**
* Return info on a range of upload parts that have already been stored in disk.
* Note that parts can be uploaded in any order yet we must returned an ordered list
* of parts thus we use the "ORDERED BY" clause to sort the list.
*
* @param uploadId
* @param maxParts
* @param startAt
* @return an array of S3MultipartPart objects
* @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
*/
public S3MultipartPart[] getParts( int uploadId, int maxParts, int startAt )
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
{
S3MultipartPart[] parts = new S3MultipartPart[maxParts];
int i = 0;
List<MultiPartPartsVO> partsVO;
try {
partsVO = mpartPartsDao.getParts(uploadId, startAt + maxParts + 1, startAt);
for (MultiPartPartsVO partVO : partsVO) {
Calendar tod = Calendar.getInstance();
tod.setTime(partVO.getCreateTime());
parts[i] = new S3MultipartPart();
parts[i].setPartNumber(partVO.getPartNumber());
parts[i].setEtag(partVO.getMd5());
@ -293,74 +283,74 @@ public class MultipartLoadDao {
parts[i].setPath(partVO.getStoredPath());
i++;
}
if (i < maxParts) parts = (S3MultipartPart[])resizeArray(parts,i);
return parts;
} finally {
}
}
/**
* How many parts exist after the endMarker part number?
*
* @param uploadId
* @param endMarker - can be used to see if getUploadedParts was truncated
* @return number of parts with partNumber greater than endMarker
* @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
*/
public int numParts( int uploadId, int endMarker ) {
return mpartPartsDao.getnumParts(uploadId, endMarker);
}
/**
* How many parts exist after the endMarker part number?
*
* @param uploadId
* @param endMarker - can be used to see if getUploadedParts was truncated
* @return number of parts with partNumber greater than endMarker
* @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
*/
public int numParts( int uploadId, int endMarker ) {
return mpartPartsDao.getnumParts(uploadId, endMarker);
}
/**
* A multipart upload request can have zero to many meta data entries to be applied to the
* final object. We need to remember all of the objects meta data until the multipart is complete.
*
* @param uploadId - defines an in-process multipart upload
* @param meta - an array of meta data to be assocated with the uploadId value
*
*/
private void saveMultipartMeta( int uploadId, S3MetaDataEntry[] meta ) {
if (null == meta) return;
Transaction txn = null;
/**
* A multipart upload request can have zero to many meta data entries to be applied to the
* final object. We need to remember all of the objects meta data until the multipart is complete.
*
* @param uploadId - defines an in-process multipart upload
* @param meta - an array of meta data to be assocated with the uploadId value
*
*/
private void saveMultipartMeta( int uploadId, S3MetaDataEntry[] meta ) {
if (null == meta) return;
Transaction txn = null;
try {
txn = Transaction.open(Transaction.AWSAPI_DB);
for( int i=0; i < meta.length; i++ )
{
S3MetaDataEntry entry = meta[i];
MultipartMetaVO metaVO = new MultipartMetaVO();
metaVO.setUploadID(uploadId);
metaVO.setName(entry.getName());
metaVO.setValue(entry.getValue());
metaVO=mpartMetaDao.persist(metaVO);
S3MetaDataEntry entry = meta[i];
MultipartMetaVO metaVO = new MultipartMetaVO();
metaVO.setUploadID(uploadId);
metaVO.setName(entry.getName());
metaVO.setValue(entry.getValue());
metaVO=mpartMetaDao.persist(metaVO);
}
txn.commit();
} finally {
txn.close();
}
}
}
/**
* Reallocates an array with a new size, and copies the contents
* of the old array to the new array.
*
* @param oldArray the old array, to be reallocated.
* @param newSize the new array size.
* @return A new array with the same contents.
*/
/**
* Reallocates an array with a new size, and copies the contents
* of the old array to the new array.
*
* @param oldArray the old array, to be reallocated.
* @param newSize the new array size.
* @return A new array with the same contents.
*/
private static Object resizeArray(Object oldArray, int newSize)
{
int oldSize = java.lang.reflect.Array.getLength(oldArray);
Class elementType = oldArray.getClass().getComponentType();
Object newArray = java.lang.reflect.Array.newInstance(
elementType,newSize);
int preserveLength = Math.min(oldSize,newSize);
if (preserveLength > 0)
System.arraycopy (oldArray,0,newArray,0,preserveLength);
return newArray;
int oldSize = java.lang.reflect.Array.getLength(oldArray);
Class elementType = oldArray.getClass().getComponentType();
Object newArray = java.lang.reflect.Array.newInstance(
elementType,newSize);
int preserveLength = Math.min(oldSize,newSize);
if (preserveLength > 0)
System.arraycopy (oldArray,0,newArray,0,preserveLength);
return newArray;
}
}

View File

@ -22,16 +22,13 @@ import java.util.List;
import java.util.Set;
import javax.ejb.Local;
import javax.inject.Inject;
import org.springframework.stereotype.Component;
import com.cloud.bridge.model.SBucket;
import com.cloud.bridge.model.SBucketVO;
import com.cloud.bridge.model.SObjectItemVO;
import com.cloud.bridge.model.SObjectVO;
import com.cloud.bridge.util.EntityParam;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
@ -40,18 +37,18 @@ import com.cloud.utils.db.Transaction;
@Component
@Local(value={SObjectDao.class})
public class SObjectDaoImpl extends GenericDaoBase<SObjectVO, Long> implements SObjectDao {
protected final SObjectItemDao itemDao = ComponentLocator.inject(SObjectItemDaoImpl.class);
public SObjectDaoImpl() {}
@Inject SObjectItemDao itemDao;
@Override
public SObjectVO getByNameKey(SBucketVO bucket, String nameKey) {
SObjectVO object = null;
SearchBuilder<SObjectVO> SearchByName = createSearchBuilder();
SearchByName.and("SBucketID", SearchByName.entity().getBucketID() , SearchCriteria.Op.EQ);
SearchByName.and("NameKey", SearchByName.entity().getNameKey() , SearchCriteria.Op.EQ);
Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
try {
public SObjectDaoImpl() {}
@Override
public SObjectVO getByNameKey(SBucketVO bucket, String nameKey) {
SObjectVO object = null;
SearchBuilder<SObjectVO> SearchByName = createSearchBuilder();
SearchByName.and("SBucketID", SearchByName.entity().getBucketID() , SearchCriteria.Op.EQ);
SearchByName.and("NameKey", SearchByName.entity().getNameKey() , SearchCriteria.Op.EQ);
Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
try {
txn.start();
SearchCriteria<SObjectVO> sc = SearchByName.create();
sc.setParameters("SBucketID", bucket.getId());
@ -62,23 +59,23 @@ public class SObjectDaoImpl extends GenericDaoBase<SObjectVO, Long> implements S
itemDao.getItems(object.getId()));
object.setItems(items);
}
return object;
}finally {
return object;
}finally {
txn.close();
}
}
@Override
public List<SObjectVO> listBucketObjects(SBucketVO bucket, String prefix, String marker, int maxKeys) {
StringBuffer sb = new StringBuffer();
List<Object> params = new ArrayList<Object>();
SearchBuilder<SObjectVO> SearchByBucket = createSearchBuilder();
List<SObjectVO> objects = new ArrayList<SObjectVO>();
SearchByBucket.and("SBucketID", SearchByBucket.entity().getBucketID(), SearchCriteria.Op.EQ);
SearchByBucket.and("DeletionMark", SearchByBucket.entity().getDeletionMark(), SearchCriteria.Op.NULL);
}
}
@Override
public List<SObjectVO> listBucketObjects(SBucketVO bucket, String prefix, String marker, int maxKeys) {
StringBuffer sb = new StringBuffer();
List<Object> params = new ArrayList<Object>();
SearchBuilder<SObjectVO> SearchByBucket = createSearchBuilder();
List<SObjectVO> objects = new ArrayList<SObjectVO>();
SearchByBucket.and("SBucketID", SearchByBucket.entity().getBucketID(), SearchCriteria.Op.EQ);
SearchByBucket.and("DeletionMark", SearchByBucket.entity().getDeletionMark(), SearchCriteria.Op.NULL);
Transaction txn = Transaction.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true);
try {
txn.start();
@ -91,19 +88,19 @@ public class SObjectDaoImpl extends GenericDaoBase<SObjectVO, Long> implements S
}
return objects;
}finally {
txn.close();
txn.close();
}
}
@Override
public List<SObjectVO> listAllBucketObjects(SBucketVO bucket, String prefix, String marker, int maxKeys) {
StringBuffer sb = new StringBuffer();
List<Object> params = new ArrayList<Object>();
SearchBuilder<SObjectVO> getAllBuckets = createSearchBuilder();
List<SObjectVO> objects = new ArrayList<SObjectVO>();
getAllBuckets.and("SBucketID", getAllBuckets.entity().getBucketID(), SearchCriteria.Op.EQ);
}
Transaction txn = Transaction.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true);
@Override
public List<SObjectVO> listAllBucketObjects(SBucketVO bucket, String prefix, String marker, int maxKeys) {
StringBuffer sb = new StringBuffer();
List<Object> params = new ArrayList<Object>();
SearchBuilder<SObjectVO> getAllBuckets = createSearchBuilder();
List<SObjectVO> objects = new ArrayList<SObjectVO>();
getAllBuckets.and("SBucketID", getAllBuckets.entity().getBucketID(), SearchCriteria.Op.EQ);
Transaction txn = Transaction.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true);
try {
txn.start();
SearchCriteria<SObjectVO> sc = getAllBuckets.create();
@ -115,8 +112,8 @@ public class SObjectDaoImpl extends GenericDaoBase<SObjectVO, Long> implements S
}
return objects;
}finally {
txn.close();
txn.close();
}
}
}
}

View File

@ -20,6 +20,7 @@ import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.UUID;
import javax.inject.Inject;
import javax.servlet.RequestDispatcher;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
@ -30,50 +31,48 @@ import javax.servlet.http.HttpServletResponse;
import org.apache.log4j.Logger;
import com.cloud.bridge.persist.dao.CloudStackConfigurationDao;
import com.cloud.bridge.persist.dao.CloudStackConfigurationDaoImpl;
import com.cloud.bridge.util.ConfigurationHelper;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Transaction;
import net.sf.ehcache.Cache;
@DB
public class EC2MainServlet extends HttpServlet{
private static final long serialVersionUID = 2201599478145974479L;
public static final String EC2_REST_SERVLET_PATH="/rest/AmazonEC2/";
public static final String EC2_SOAP_SERVLET_PATH="/services/AmazonEC2/";
public static final String ENABLE_EC2_API="enable.ec2.api";
private static boolean isEC2APIEnabled = false;
public static final Logger logger = Logger.getLogger(EC2MainServlet.class);
CloudStackConfigurationDao csDao = ComponentLocator.inject(CloudStackConfigurationDaoImpl.class);
/**
* We build the path to where the keystore holding the WS-Security X509 certificates
* are stored.
*/
@DB
public void init( ServletConfig config ) throws ServletException {
try{
ConfigurationHelper.preConfigureConfigPathFromServletContext(config.getServletContext());
// check if API is enabled
String value = csDao.getConfigValue(ENABLE_EC2_API);
if(value != null){
isEC2APIEnabled = Boolean.valueOf(value);
}
logger.info("Value of EC2 API Flag ::" + value);
}catch(Exception e){
throw new ServletException("Error initializing awsapi: " + e.getMessage(), e);
}
}
protected void doGet(HttpServletRequest req, HttpServletResponse resp) {
doGetOrPost(req, resp);
private static final long serialVersionUID = 2201599478145974479L;
public static final String EC2_REST_SERVLET_PATH="/rest/AmazonEC2/";
public static final String EC2_SOAP_SERVLET_PATH="/services/AmazonEC2/";
public static final String ENABLE_EC2_API="enable.ec2.api";
private static boolean isEC2APIEnabled = false;
public static final Logger logger = Logger.getLogger(EC2MainServlet.class);
@Inject CloudStackConfigurationDao csDao;
/**
* We build the path to where the keystore holding the WS-Security X509 certificates
* are stored.
*/
@Override
@DB
public void init( ServletConfig config ) throws ServletException {
try{
ConfigurationHelper.preConfigureConfigPathFromServletContext(config.getServletContext());
// check if API is enabled
String value = csDao.getConfigValue(ENABLE_EC2_API);
if(value != null){
isEC2APIEnabled = Boolean.valueOf(value);
}
logger.info("Value of EC2 API Flag ::" + value);
}catch(Exception e){
throw new ServletException("Error initializing awsapi: " + e.getMessage(), e);
}
}
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) {
doGetOrPost(req, resp);
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) {
doGetOrPost(req, resp);
doGetOrPost(req, resp);
}
protected void doGetOrPost(HttpServletRequest request, HttpServletResponse response) {
@ -84,30 +83,30 @@ public class EC2MainServlet extends HttpServlet{
faultResponse(response, "404" , "EC2 API is disabled.");
return;
}
if(action != null){
//We presume it's a Query/Rest call
try {
RequestDispatcher dispatcher = request.getRequestDispatcher(EC2_REST_SERVLET_PATH);
dispatcher.forward(request, response);
} catch (ServletException e) {
throw new RuntimeException(e);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
else {
try {
request.getRequestDispatcher(EC2_SOAP_SERVLET_PATH).forward(request, response);
} catch (ServletException e) {
throw new RuntimeException(e);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
if(action != null){
//We presume it's a Query/Rest call
try {
RequestDispatcher dispatcher = request.getRequestDispatcher(EC2_REST_SERVLET_PATH);
dispatcher.forward(request, response);
} catch (ServletException e) {
throw new RuntimeException(e);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
else {
try {
request.getRequestDispatcher(EC2_SOAP_SERVLET_PATH).forward(request, response);
} catch (ServletException e) {
throw new RuntimeException(e);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
private void faultResponse(HttpServletResponse response, String errorCode, String errorMessage) {
try {
OutputStreamWriter out = new OutputStreamWriter(response.getOutputStream());

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -25,29 +25,27 @@ import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.InetAddress;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Timer;
import java.util.TimerTask;
import javax.inject.Inject;
import org.apache.axis2.AxisFault;
import org.apache.log4j.Logger;
import org.apache.log4j.xml.DOMConfigurator;
import com.amazon.s3.AmazonS3SkeletonInterface;
import com.amazon.ec2.AmazonEC2SkeletonInterface;
import com.amazon.s3.AmazonS3SkeletonInterface;
import com.cloud.bridge.model.MHostVO;
import com.cloud.bridge.model.SHost;
import com.cloud.bridge.model.SHostVO;
import com.cloud.bridge.model.UserCredentialsVO;
import com.cloud.bridge.persist.dao.MHostDao;
import com.cloud.bridge.persist.dao.MHostDaoImpl;
import com.cloud.bridge.persist.dao.SHostDao;
import com.cloud.bridge.persist.dao.SHostDaoImpl;
import com.cloud.bridge.persist.dao.UserCredentialsDao;
import com.cloud.bridge.persist.dao.UserCredentialsDaoImpl;
import com.cloud.bridge.service.EC2SoapServiceImpl;
import com.cloud.bridge.service.UserInfo;
import com.cloud.bridge.service.core.ec2.EC2Engine;
@ -58,191 +56,190 @@ import com.cloud.bridge.util.ConfigurationHelper;
import com.cloud.bridge.util.DateHelper;
import com.cloud.bridge.util.NetHelper;
import com.cloud.bridge.util.OrderedPair;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Transaction;
public class ServiceProvider {
protected final static Logger logger = Logger.getLogger(ServiceProvider.class);
protected final MHostDao mhostDao = ComponentLocator.inject(MHostDaoImpl.class);
protected final SHostDao shostDao = ComponentLocator.inject(SHostDaoImpl.class);
protected final UserCredentialsDao ucDao = ComponentLocator.inject(UserCredentialsDaoImpl.class);
public final static long HEARTBEAT_INTERVAL = 10000;
protected final static Logger logger = Logger.getLogger(ServiceProvider.class);
@Inject MHostDao mhostDao;
@Inject SHostDao shostDao;
@Inject UserCredentialsDao ucDao;
private static ServiceProvider instance;
public final static long HEARTBEAT_INTERVAL = 10000;
private Map<Class<?>, Object> serviceMap = new HashMap<Class<?>, Object>();
private Timer timer = new Timer();
private MHostVO mhost;
private Properties properties;
private boolean useSubDomain = false; // use DNS sub domain for bucket name
private String serviceEndpoint = null;
private String multipartDir = null; // illegal bucket name used as a folder for storing multiparts
private String masterDomain = ".s3.amazonaws.com";
private S3Engine engine;
private EC2Engine EC2_engine = null;
private static ServiceProvider instance;
// -> cache Bucket Policies here so we don't have to load from db on every access
private Map<String,S3BucketPolicy> policyMap = new HashMap<String,S3BucketPolicy>();
private final Map<Class<?>, Object> serviceMap = new HashMap<Class<?>, Object>();
private final Timer timer = new Timer();
private MHostVO mhost;
private Properties properties;
private boolean useSubDomain = false; // use DNS sub domain for bucket name
private String serviceEndpoint = null;
private String multipartDir = null; // illegal bucket name used as a folder for storing multiparts
private String masterDomain = ".s3.amazonaws.com";
private final S3Engine engine;
private EC2Engine EC2_engine = null;
protected ServiceProvider() throws IOException {
// register service implementation object
Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
txn.close();
engine = new S3Engine();
EC2_engine = new EC2Engine();
serviceMap.put(AmazonS3SkeletonInterface.class, new S3SerializableServiceImplementation(engine));
serviceMap.put(AmazonEC2SkeletonInterface.class, new EC2SoapServiceImpl(EC2_engine));
}
// -> cache Bucket Policies here so we don't have to load from db on every access
private final Map<String,S3BucketPolicy> policyMap = new HashMap<String,S3BucketPolicy>();
public synchronized static ServiceProvider getInstance() {
if(instance == null)
{
try {
instance = new ServiceProvider();
instance.initialize();
} catch(Throwable e) {
logger.error("Unexpected exception " + e.getMessage(), e);
} finally {
}
}
return instance;
}
protected ServiceProvider() throws IOException {
// register service implementation object
Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
txn.close();
engine = new S3Engine();
EC2_engine = new EC2Engine();
serviceMap.put(AmazonS3SkeletonInterface.class, new S3SerializableServiceImplementation(engine));
serviceMap.put(AmazonEC2SkeletonInterface.class, new EC2SoapServiceImpl(EC2_engine));
}
public long getManagementHostId() {
// we want to limit mhost within its own session, id of the value will be returned
long mhostId = 0;
if(mhost != null)
mhostId = mhost.getId() != null ? mhost.getId().longValue() : 0L;
return mhostId;
}
public synchronized static ServiceProvider getInstance() {
if(instance == null)
{
try {
instance = new ServiceProvider();
instance.initialize();
} catch(Throwable e) {
logger.error("Unexpected exception " + e.getMessage(), e);
} finally {
}
}
return instance;
}
/**
* We return a 2-tuple to distinguish between two cases:
* (1) there is no entry in the map for bucketName, and (2) there is a null entry
* in the map for bucketName. In case 2, the database was inspected for the
* bucket policy but it had none so we cache it here to reduce database lookups.
* @param bucketName
* @return Integer in the tuple means: -1 if no policy defined for the bucket, 0 if one defined
* even if it is set at null.
*/
public OrderedPair<S3BucketPolicy,Integer> getBucketPolicy(String bucketName) {
public long getManagementHostId() {
// we want to limit mhost within its own session, id of the value will be returned
long mhostId = 0;
if(mhost != null)
mhostId = mhost.getId() != null ? mhost.getId().longValue() : 0L;
return mhostId;
}
if (policyMap.containsKey( bucketName )) {
S3BucketPolicy policy = policyMap.get( bucketName );
return new OrderedPair<S3BucketPolicy,Integer>( policy, 0 );
}
else return new OrderedPair<S3BucketPolicy,Integer>( null, -1 ); // For case (1) where the map has no entry for bucketName
}
/**
* We return a 2-tuple to distinguish between two cases:
* (1) there is no entry in the map for bucketName, and (2) there is a null entry
* in the map for bucketName. In case 2, the database was inspected for the
* bucket policy but it had none so we cache it here to reduce database lookups.
* @param bucketName
* @return Integer in the tuple means: -1 if no policy defined for the bucket, 0 if one defined
* even if it is set at null.
*/
public OrderedPair<S3BucketPolicy,Integer> getBucketPolicy(String bucketName) {
/**
* The policy parameter can be set to null, which means that there is no policy
* for the bucket so a database lookup is not necessary.
*
* @param bucketName
* @param policy
*/
public void setBucketPolicy(String bucketName, S3BucketPolicy policy) {
policyMap.put(bucketName, policy);
}
if (policyMap.containsKey( bucketName )) {
S3BucketPolicy policy = policyMap.get( bucketName );
return new OrderedPair<S3BucketPolicy,Integer>( policy, 0 );
}
else return new OrderedPair<S3BucketPolicy,Integer>( null, -1 ); // For case (1) where the map has no entry for bucketName
}
public void deleteBucketPolicy(String bucketName) {
policyMap.remove(bucketName);
}
/**
* The policy parameter can be set to null, which means that there is no policy
* for the bucket so a database lookup is not necessary.
*
* @param bucketName
* @param policy
*/
public void setBucketPolicy(String bucketName, S3BucketPolicy policy) {
policyMap.put(bucketName, policy);
}
public S3Engine getS3Engine() {
return engine;
}
public void deleteBucketPolicy(String bucketName) {
policyMap.remove(bucketName);
}
public EC2Engine getEC2Engine() {
return EC2_engine;
}
public S3Engine getS3Engine() {
return engine;
}
public String getMasterDomain() {
return masterDomain;
}
public EC2Engine getEC2Engine() {
return EC2_engine;
}
public boolean getUseSubDomain() {
return useSubDomain;
}
public String getMasterDomain() {
return masterDomain;
}
public String getServiceEndpoint() {
return serviceEndpoint;
}
public boolean getUseSubDomain() {
return useSubDomain;
}
public String getMultipartDir() {
return multipartDir;
}
public String getServiceEndpoint() {
return serviceEndpoint;
}
public Properties getStartupProperties() {
return properties;
}
public String getMultipartDir() {
return multipartDir;
}
public UserInfo getUserInfo(String accessKey) {
UserInfo info = new UserInfo();
Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
try {
txn.start();
UserCredentialsVO cloudKeys = ucDao.getByAccessKey( accessKey );
if ( null == cloudKeys ) {
logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" );
return null;
} else {
info.setAccessKey( accessKey );
info.setSecretKey( cloudKeys.getSecretKey());
info.setCanonicalUserId(accessKey);
info.setDescription( "S3 REST request" );
return info;
}
}finally {
txn.commit();
}
}
@DB
protected void initialize() {
if(logger.isInfoEnabled())
logger.info("Initializing ServiceProvider...");
Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
//txn.close();
public Properties getStartupProperties() {
return properties;
}
File file = ConfigurationHelper.findConfigurationFile("log4j-cloud.xml");
if(file != null) {
System.out.println("Log4j configuration from : " + file.getAbsolutePath());
DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000);
} else {
System.out.println("Configure log4j with default properties");
}
public UserInfo getUserInfo(String accessKey) {
UserInfo info = new UserInfo();
Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
try {
txn.start();
UserCredentialsVO cloudKeys = ucDao.getByAccessKey( accessKey );
if ( null == cloudKeys ) {
logger.debug( accessKey + " is not defined in the S3 service - call SetUserKeys" );
return null;
} else {
info.setAccessKey( accessKey );
info.setSecretKey( cloudKeys.getSecretKey());
info.setCanonicalUserId(accessKey);
info.setDescription( "S3 REST request" );
return info;
}
}finally {
txn.commit();
}
}
loadStartupProperties();
String hostKey = properties.getProperty("host.key");
if(hostKey == null) {
InetAddress inetAddr = NetHelper.getFirstNonLoopbackLocalInetAddress();
if(inetAddr != null)
hostKey = NetHelper.getMacAddress(inetAddr);
}
if(hostKey == null)
throw new ConfigurationException("Please configure host.key property in cloud-bridge.properites");
String host = properties.getProperty("host");
if(host == null)
host = NetHelper.getHostName();
@DB
protected void initialize() {
if(logger.isInfoEnabled())
logger.info("Initializing ServiceProvider...");
if(properties.get("bucket.dns") != null &&
((String)properties.get("bucket.dns")).equalsIgnoreCase("true")) {
useSubDomain = true;
}
Transaction txn = Transaction.open(Transaction.AWSAPI_DB);
//txn.close();
serviceEndpoint = (String)properties.get("serviceEndpoint");
masterDomain = new String( "." + serviceEndpoint );
File file = ConfigurationHelper.findConfigurationFile("log4j-cloud.xml");
if(file != null) {
System.out.println("Log4j configuration from : " + file.getAbsolutePath());
DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000);
} else {
System.out.println("Configure log4j with default properties");
}
setupHost(hostKey, host);
loadStartupProperties();
String hostKey = properties.getProperty("host.key");
if(hostKey == null) {
InetAddress inetAddr = NetHelper.getFirstNonLoopbackLocalInetAddress();
if(inetAddr != null)
hostKey = NetHelper.getMacAddress(inetAddr);
}
if(hostKey == null)
throw new ConfigurationException("Please configure host.key property in cloud-bridge.properites");
String host = properties.getProperty("host");
if(host == null)
host = NetHelper.getHostName();
// we will commit and start a new transaction to allow host info be flushed to DB
//PersistContext.flush();
if(properties.get("bucket.dns") != null &&
((String)properties.get("bucket.dns")).equalsIgnoreCase("true")) {
useSubDomain = true;
}
String localStorageRoot = properties.getProperty("storage.root");
serviceEndpoint = (String)properties.get("serviceEndpoint");
masterDomain = new String( "." + serviceEndpoint );
setupHost(hostKey, host);
// we will commit and start a new transaction to allow host info be flushed to DB
//PersistContext.flush();
String localStorageRoot = properties.getProperty("storage.root");
if (localStorageRoot != null) {
if (localStorageRoot.toLowerCase().startsWith("castor")) {
setupCAStorStorage(localStorageRoot);
@ -251,138 +248,139 @@ public class ServiceProvider {
}
}
multipartDir = properties.getProperty("storage.multipartDir");
Transaction txn1 = Transaction.open(Transaction.AWSAPI_DB);
timer.schedule(getHeartbeatTask(), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL);
txn1.close();
multipartDir = properties.getProperty("storage.multipartDir");
if(logger.isInfoEnabled())
logger.info("ServiceProvider initialized");
}
Transaction txn1 = Transaction.open(Transaction.AWSAPI_DB);
timer.schedule(getHeartbeatTask(), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL);
txn1.close();
private void loadStartupProperties() {
File propertiesFile = ConfigurationHelper.findConfigurationFile("cloud-bridge.properties");
properties = new Properties();
if(propertiesFile != null) {
try {
properties.load(new FileInputStream(propertiesFile));
} catch (FileNotFoundException e) {
logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e);
} catch (IOException e) {
logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e);
}
logger.info("Use startup properties file: " + propertiesFile.getAbsolutePath());
} else {
if(logger.isInfoEnabled())
logger.info("Startup properties is not found.");
}
}
private TimerTask getHeartbeatTask() {
return new TimerTask() {
@Override
public void run() {
try {
mhost.setLastHeartbeatTime(DateHelper.currentGMTTime());
mhostDao.updateHeartBeat(mhost);
} catch(Throwable e){
logger.error("Unexpected exception " + e.getMessage(), e);
} finally {
}
}
};
}
private void setupHost(String hostKey, String host) {
mhost = mhostDao.getByHostKey(hostKey);
if(mhost == null) {
mhost = new MHostVO();
mhost.setHostKey(hostKey);
mhost.setHost(host);
mhost.setLastHeartbeatTime(DateHelper.currentGMTTime());
mhost = mhostDao.persist(mhost);
} else {
mhost.setHost(host);
mhostDao.update(mhost.getId(), mhost);
}
}
private void setupLocalStorage(String storageRoot) {
SHostVO shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot);
if(shost == null) {
shost = new SHostVO();
shost.setMhost(mhost);
shost.setMhostid(mhost.getId());
shost.setHostType(SHost.STORAGE_HOST_TYPE_LOCAL);
shost.setHost(NetHelper.getHostName());
shost.setExportRoot(storageRoot);
shostDao.persist(shost);
}
}
private void setupCAStorStorage(String storageRoot) {
SHostVO shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot);
if(shost == null) {
shost = new SHostVO();
shost.setMhost(mhost);
shost.setMhostid(mhost.getId());
shost.setHostType(SHost.STORAGE_HOST_TYPE_CASTOR);
shost.setHost(NetHelper.getHostName());
shost.setExportRoot(storageRoot);
shostDao.persist(shost);
}
if(logger.isInfoEnabled())
logger.info("ServiceProvider initialized");
}
public void shutdown() {
timer.cancel();
private void loadStartupProperties() {
File propertiesFile = ConfigurationHelper.findConfigurationFile("cloud-bridge.properties");
properties = new Properties();
if(propertiesFile != null) {
try {
properties.load(new FileInputStream(propertiesFile));
} catch (FileNotFoundException e) {
logger.warn("Unable to open properties file: " + propertiesFile.getAbsolutePath(), e);
} catch (IOException e) {
logger.warn("Unable to read properties file: " + propertiesFile.getAbsolutePath(), e);
}
if(logger.isInfoEnabled())
logger.info("ServiceProvider stopped");
}
logger.info("Use startup properties file: " + propertiesFile.getAbsolutePath());
} else {
if(logger.isInfoEnabled())
logger.info("Startup properties is not found.");
}
}
@SuppressWarnings("unchecked")
private static <T> T getProxy(Class<?> serviceInterface, final T serviceObject) {
return (T) Proxy.newProxyInstance(serviceObject.getClass().getClassLoader(),
new Class[] { serviceInterface },
new InvocationHandler() {
public Object invoke(Object proxy, Method method,
Object[] args) throws Throwable {
Object result = null;
try {
result = method.invoke(serviceObject, args);
} catch (Throwable e) {
// Rethrow the exception to Axis:
// Check if the exception is an AxisFault or a
// RuntimeException
// enveloped AxisFault and if so, pass it on as
// such. Otherwise
// log to help debugging and throw as is.
if (e.getCause() != null
&& e.getCause() instanceof AxisFault)
throw e.getCause();
else if (e.getCause() != null
&& e.getCause().getCause() != null
&& e.getCause().getCause() instanceof AxisFault)
throw e.getCause().getCause();
else {
logger.warn(
"Unhandled exception " + e.getMessage(),
e);
throw e;
}
} finally {
}
return result;
private TimerTask getHeartbeatTask() {
return new TimerTask() {
@Override
public void run() {
try {
mhost.setLastHeartbeatTime(DateHelper.currentGMTTime());
mhostDao.updateHeartBeat(mhost);
} catch(Throwable e){
logger.error("Unexpected exception " + e.getMessage(), e);
} finally {
}
}
};
}
private void setupHost(String hostKey, String host) {
mhost = mhostDao.getByHostKey(hostKey);
if(mhost == null) {
mhost = new MHostVO();
mhost.setHostKey(hostKey);
mhost.setHost(host);
mhost.setLastHeartbeatTime(DateHelper.currentGMTTime());
mhost = mhostDao.persist(mhost);
} else {
mhost.setHost(host);
mhostDao.update(mhost.getId(), mhost);
}
}
private void setupLocalStorage(String storageRoot) {
SHostVO shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot);
if(shost == null) {
shost = new SHostVO();
shost.setMhost(mhost);
shost.setMhostid(mhost.getId());
shost.setHostType(SHost.STORAGE_HOST_TYPE_LOCAL);
shost.setHost(NetHelper.getHostName());
shost.setExportRoot(storageRoot);
shostDao.persist(shost);
}
}
private void setupCAStorStorage(String storageRoot) {
SHostVO shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot);
if(shost == null) {
shost = new SHostVO();
shost.setMhost(mhost);
shost.setMhostid(mhost.getId());
shost.setHostType(SHost.STORAGE_HOST_TYPE_CASTOR);
shost.setHost(NetHelper.getHostName());
shost.setExportRoot(storageRoot);
shostDao.persist(shost);
}
}
public void shutdown() {
timer.cancel();
if(logger.isInfoEnabled())
logger.info("ServiceProvider stopped");
}
@SuppressWarnings("unchecked")
private static <T> T getProxy(Class<?> serviceInterface, final T serviceObject) {
return (T) Proxy.newProxyInstance(serviceObject.getClass().getClassLoader(),
new Class[] { serviceInterface },
new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method,
Object[] args) throws Throwable {
Object result = null;
try {
result = method.invoke(serviceObject, args);
} catch (Throwable e) {
// Rethrow the exception to Axis:
// Check if the exception is an AxisFault or a
// RuntimeException
// enveloped AxisFault and if so, pass it on as
// such. Otherwise
// log to help debugging and throw as is.
if (e.getCause() != null
&& e.getCause() instanceof AxisFault)
throw e.getCause();
else if (e.getCause() != null
&& e.getCause().getCause() != null
&& e.getCause().getCause() instanceof AxisFault)
throw e.getCause().getCause();
else {
logger.warn(
"Unhandled exception " + e.getMessage(),
e);
throw e;
}
});
}
} finally {
}
return result;
}
});
}
@SuppressWarnings("unchecked")
public <T> T getServiceImpl(Class<?> serviceInterface) {
return getProxy(serviceInterface, (T)serviceMap.get(serviceInterface));
}
@SuppressWarnings("unchecked")
public <T> T getServiceImpl(Class<?> serviceInterface) {
return getProxy(serviceInterface, (T)serviceMap.get(serviceInterface));
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -25,7 +25,7 @@
</listener>
<context-param>
<param-name>contextConfigLocation</param-name>
<param-value>classpath:applicationContext.xml</param-value>
<param-value>classpath:applicationContext.xml, classpath:componentContext.xml</param-value>
</context-param>
<servlet>

View File

@ -1,23 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# bitmap of permissions at the end of each classname, 1 = ADMIN, 2 =
# RESOURCE_DOMAIN_ADMIN, 4 = DOMAIN_ADMIN, 8 = USER
# Please standardize naming conventions to camel-case (even for acronyms).
# CloudStack API Discovery service command
listApis=15

View File

@ -1,3 +1,21 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:context="http://www.springframework.org/schema/context"
@ -13,6 +31,7 @@
http://www.springframework.org/schema/context/spring-context-3.0.xsd">
<context:annotation-config />
<context:component-scan base-package="org.apache.cloudstack, com.cloud" />
<!--
@ -31,6 +50,7 @@
<!--
RPC/Async/EventBus
-->
<bean id="onwireRegistry" class="org.apache.cloudstack.framework.serializer.OnwireClassRegistry"
init-method="scan" >
<property name="packages">

View File

@ -0,0 +1,70 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:tx="http://www.springframework.org/schema/tx"
xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
http://www.springframework.org/schema/tx
http://www.springframework.org/schema/tx/spring-tx-3.0.xsd
http://www.springframework.org/schema/aop
http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context-3.0.xsd">
<!--
Configurable components
-->
<bean id="management-server" class ="com.cloud.server.ManagementServerExtImpl" />
<bean id="configuration-server" class="com.cloud.server.ConfigurationServerImpl" />
<!--
Network Elements
-->
<bean id="VirtualRouter" class="com.cloud.network.element.VirtualRouterElement">
<property name="name" value="VirtualRouter"/>
</bean>
<bean id="Ovs" class="com.cloud.network.element.OvsElement">
<property name="name" value="Ovs"/>
</bean>
<bean id="ExternalDhcpServer" class="com.cloud.network.element.ExternalDhcpElement">
<property name="name" value="ExternalDhcpServer"/>
</bean>
<bean id="BareMetal" class="com.cloud.network.element.BareMetalElement">
<property name="name" value="BareMetal"/>
</bean>
<bean id="SecurityGroupProvider" class="com.cloud.network.element.SecurityGroupElement">
<property name="name" value="SecurityGroupProvider"/>
</bean>
<bean id="VpcVirtualRouter" class="com.cloud.network.element.VpcVirtualRouterElement">
<property name="name" value="VpcVirtualRouter"/>
</bean>
<bean id="NiciraNvp" class="com.cloud.network.element.NiciraNvpElement">
<property name="name" value="NiciraNvp"/>
</bean>
<!--
Adapters
-->
<bean id="StaticRoleBasedAPIAccessChecker" class="org.apache.cloudstack.acl.StaticRoleBasedAPIAccessChecker"/>
</beans>

View File

@ -53,12 +53,9 @@ under the License.
<dao name="Configuration configuration server" class="com.cloud.configuration.dao.ConfigurationDaoImpl">
<param name="premium">true</param>
</dao>
<adapters key="org.apache.cloudstack.acl.APIAccessChecker">
<adapters key="org.apache.cloudstack.acl.APIChecker">
<adapter name="StaticRoleBasedAPIAccessChecker" class="org.apache.cloudstack.acl.StaticRoleBasedAPIAccessChecker"/>
</adapters>
<adapters key="org.apache.cloudstack.discovery.ApiDiscoveryService">
<adapter name="ApiDiscoveryService" class="org.apache.cloudstack.discovery.ApiDiscoveryServiceImpl"/>
</adapters>
<adapters key="com.cloud.agent.manager.allocator.HostAllocator">
<adapter name="FirstFitRouting" class="com.cloud.agent.manager.allocator.impl.FirstFitRoutingAllocator"/>
<!--adapter name="FirstFitRouting" class="com.cloud.agent.manager.allocator.impl.RecreateHostAllocator"/-->

View File

@ -40,8 +40,8 @@ import com.cloud.agent.api.PingStorageCommand;
import com.cloud.agent.api.ReadyAnswer;
import com.cloud.agent.api.ReadyCommand;
import com.cloud.agent.api.SecStorageFirewallCfgCommand;
import com.cloud.agent.api.SecStorageSetupCommand;
import com.cloud.agent.api.SecStorageFirewallCfgCommand.PortConfig;
import com.cloud.agent.api.SecStorageSetupCommand;
import com.cloud.agent.api.SecStorageVMSetupCommand;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupStorageCommand;
@ -54,7 +54,6 @@ import com.cloud.agent.api.storage.UploadCommand;
import com.cloud.agent.api.storage.ssCommand;
import com.cloud.host.Host;
import com.cloud.host.Host.Type;
import com.cloud.resource.ServerResource;
import com.cloud.resource.ServerResourceBase;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.StoragePoolType;
@ -65,7 +64,7 @@ import com.cloud.storage.template.TemplateInfo;
import com.cloud.storage.template.UploadManager;
import com.cloud.storage.template.UploadManagerImpl;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.NetUtils;
import com.cloud.utils.net.NfsUtils;
@ -81,10 +80,10 @@ import com.cloud.utils.script.Script;
public class CifsSecondaryStorageResource extends ServerResourceBase implements SecondaryStorageResource {
private static final Logger s_logger = Logger.getLogger(CifsSecondaryStorageResource.class);
int _timeout;
String _instance;
String _parent;
String _dc;
String _pod;
String _guid;
@ -94,27 +93,27 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
StorageLayer _storage;
boolean _inSystemVM = false;
boolean _sslCopy = false;
Random _rand = new Random(System.currentTimeMillis());
DownloadManager _dlMgr;
UploadManager _upldMgr;
private String _configSslScr;
private String _configAuthScr;
private String _configIpFirewallScr;
private String _publicIp;
private String _hostname;
private String _localgw;
private String _eth1mask;
private String _eth1ip;
private String _configSslScr;
private String _configAuthScr;
private String _configIpFirewallScr;
private String _publicIp;
private String _hostname;
private String _localgw;
private String _eth1mask;
private String _eth1ip;
@Override
public void disconnected() {
if (_parent != null && !_inSystemVM) {
Script script = new Script(!_inSystemVM, "umount", _timeout, s_logger);
script.add(_parent);
script.execute();
File file = new File(_parent);
file.delete();
}
@ -133,104 +132,104 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
} else if(cmd instanceof DeleteEntityDownloadURLCommand){
return _upldMgr.handleDeleteEntityDownloadURLCommand((DeleteEntityDownloadURLCommand)cmd);
} else if (cmd instanceof GetStorageStatsCommand) {
return execute((GetStorageStatsCommand)cmd);
return execute((GetStorageStatsCommand)cmd);
} else if (cmd instanceof CheckHealthCommand) {
return new CheckHealthAnswer((CheckHealthCommand)cmd, true);
} else if (cmd instanceof DeleteTemplateCommand) {
return execute((DeleteTemplateCommand) cmd);
return execute((DeleteTemplateCommand) cmd);
} else if (cmd instanceof ReadyCommand) {
return new ReadyAnswer((ReadyCommand)cmd);
} else if (cmd instanceof SecStorageFirewallCfgCommand){
return execute((SecStorageFirewallCfgCommand)cmd);
return execute((SecStorageFirewallCfgCommand)cmd);
} else if (cmd instanceof SecStorageVMSetupCommand){
return execute((SecStorageVMSetupCommand)cmd);
return execute((SecStorageVMSetupCommand)cmd);
} else if (cmd instanceof SecStorageSetupCommand){
return new Answer(cmd, true, "success");
} else {
return Answer.createUnsupportedCommandAnswer(cmd);
}
}
private Answer execute(SecStorageVMSetupCommand cmd) {
if (!_inSystemVM){
return new Answer(cmd, true, null);
}
boolean success = true;
StringBuilder result = new StringBuilder();
for (String cidr: cmd.getAllowedInternalSites()) {
String tmpresult = allowOutgoingOnPrivate(cidr);
if (tmpresult != null) {
result.append(", ").append(tmpresult);
success = false;
}
}
if (success) {
if (cmd.getCopyPassword() != null && cmd.getCopyUserName() != null) {
String tmpresult = configureAuth(cmd.getCopyUserName(), cmd.getCopyPassword());
if (tmpresult != null) {
result.append("Failed to configure auth for copy ").append(tmpresult);
success = false;
}
}
}
return new Answer(cmd, success, result.toString());
if (!_inSystemVM){
return new Answer(cmd, true, null);
}
boolean success = true;
StringBuilder result = new StringBuilder();
for (String cidr: cmd.getAllowedInternalSites()) {
String tmpresult = allowOutgoingOnPrivate(cidr);
if (tmpresult != null) {
result.append(", ").append(tmpresult);
success = false;
}
}
if (success) {
if (cmd.getCopyPassword() != null && cmd.getCopyUserName() != null) {
String tmpresult = configureAuth(cmd.getCopyUserName(), cmd.getCopyPassword());
if (tmpresult != null) {
result.append("Failed to configure auth for copy ").append(tmpresult);
success = false;
}
}
}
return new Answer(cmd, success, result.toString());
}
}
private String allowOutgoingOnPrivate(String destCidr) {
Script command = new Script("/bin/bash", s_logger);
String intf = "eth1";
command.add("-c");
command.add("iptables -I OUTPUT -o " + intf + " -d " + destCidr + " -p tcp -m state --state NEW -m tcp -j ACCEPT");
String result = command.execute();
if (result != null) {
s_logger.warn("Error in allowing outgoing to " + destCidr + ", err=" + result );
return "Error in allowing outgoing to " + destCidr + ", err=" + result;
}
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr);
return null;
}
Script command = new Script("/bin/bash", s_logger);
String intf = "eth1";
command.add("-c");
command.add("iptables -I OUTPUT -o " + intf + " -d " + destCidr + " -p tcp -m state --state NEW -m tcp -j ACCEPT");
private Answer execute(SecStorageFirewallCfgCommand cmd) {
if (!_inSystemVM){
return new Answer(cmd, true, null);
}
String result = command.execute();
if (result != null) {
s_logger.warn("Error in allowing outgoing to " + destCidr + ", err=" + result );
return "Error in allowing outgoing to " + destCidr + ", err=" + result;
}
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr);
return null;
}
List<String> ipList = new ArrayList<String>();
for (PortConfig pCfg:cmd.getPortConfigs()){
if (pCfg.isAdd()) {
ipList.add(pCfg.getSourceIp());
}
}
boolean success = true;
String result;
result = configureIpFirewall(ipList);
if (result !=null)
success = false;
return new Answer(cmd, success, result);
}
protected GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) {
private Answer execute(SecStorageFirewallCfgCommand cmd) {
if (!_inSystemVM){
return new Answer(cmd, true, null);
}
List<String> ipList = new ArrayList<String>();
for (PortConfig pCfg:cmd.getPortConfigs()){
if (pCfg.isAdd()) {
ipList.add(pCfg.getSourceIp());
}
}
boolean success = true;
String result;
result = configureIpFirewall(ipList);
if (result !=null)
success = false;
return new Answer(cmd, success, result);
}
protected GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) {
final long usedSize = getUsedSize();
final long totalSize = getTotalSize();
if (usedSize == -1 || totalSize == -1) {
return new GetStorageStatsAnswer(cmd, "Unable to get storage stats");
return new GetStorageStatsAnswer(cmd, "Unable to get storage stats");
} else {
return new GetStorageStatsAnswer(cmd, totalSize, usedSize) ;
return new GetStorageStatsAnswer(cmd, totalSize, usedSize) ;
}
}
@Override
public String getRootDir(ssCommand cmd){
return null;
}
protected Answer execute(final DeleteTemplateCommand cmd) {
String relativeTemplatePath = cmd.getTemplatePath();
String parent = _parent;
@ -278,15 +277,15 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
}
return new Answer(cmd, true, null);
}
protected long getUsedSize() {
return _storage.getUsedSpace(_parent);
return _storage.getUsedSpace(_parent);
}
protected long getTotalSize() {
return _storage.getTotalSpace(_parent);
return _storage.getTotalSpace(_parent);
}
protected long convertFilesystemSize(final String size) {
if (size == null || size.isEmpty()) {
return -1;
@ -305,25 +304,25 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
return (long)(Double.parseDouble(size.substring(0, size.length() - 1)) * multiplier);
}
@Override
public Type getType() {
return Host.Type.SecondaryStorage;
}
@Override
public PingCommand getCurrentStatus(final long id) {
return new PingStorageCommand(Host.Type.Storage, id, new HashMap<String, Boolean>());
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_eth1ip = (String)params.get("eth1ip");
_eth1ip = (String)params.get("eth1ip");
if (_eth1ip != null) { //can only happen inside service vm
params.put("private.network.device", "eth1");
params.put("private.network.device", "eth1");
} else {
s_logger.warn("Wait, what's going on? eth1ip is null!!");
s_logger.warn("Wait, what's going on? eth1ip is null!!");
}
String eth2ip = (String) params.get("eth2ip");
if (eth2ip != null) {
@ -331,23 +330,23 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
}
_publicIp = (String) params.get("eth2ip");
_hostname = (String) params.get("name");
super.configure(name, params);
_params = params;
String value = (String)params.get("scripts.timeout");
_timeout = NumbersUtil.parseInt(value, 1440) * 1000;
_storage = (StorageLayer)params.get(StorageLayer.InstanceConfigKey);
if (_storage == null) {
value = (String)params.get(StorageLayer.ClassConfigKey);
if (value == null) {
value = "com.cloud.storage.JavaStorageLayer";
}
try {
Class<?> clazz = Class.forName(value);
_storage = (StorageLayer)ComponentLocator.inject(clazz);
_storage = (StorageLayer)ComponentContext.inject(clazz);
_storage.configure("StorageLayer", params);
} catch (ClassNotFoundException e) {
throw new ConfigurationException("Unable to find class " + value);
@ -362,30 +361,30 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
if (_configSslScr != null) {
s_logger.info("config_auth.sh found in " + _configAuthScr);
}
_configIpFirewallScr = Script.findScript(getDefaultScriptsDir(), "ipfirewall.sh");
if (_configIpFirewallScr != null) {
s_logger.info("_configIpFirewallScr found in " + _configIpFirewallScr);
}
_guid = (String)params.get("guid");
if (_guid == null) {
throw new ConfigurationException("Unable to find the guid");
}
_dc = (String)params.get("zone");
if (_dc == null) {
throw new ConfigurationException("Unable to find the zone");
}
_pod = (String)params.get("pod");
_instance = (String)params.get("instance");
_mountParent = (String)params.get("mount.parent");
if (_mountParent == null) {
_mountParent = File.separator + "mnt";
}
if (_instance != null) {
_mountParent = _mountParent + File.separator + _instance;
}
@ -394,63 +393,63 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
if (_nfsPath == null) {
throw new ConfigurationException("Unable to find mount.path");
}
String inSystemVM = (String)params.get("secondary.storage.vm");
if (inSystemVM == null || "true".equalsIgnoreCase(inSystemVM)) {
_inSystemVM = true;
_inSystemVM = true;
_localgw = (String)params.get("localgw");
if (_localgw != null) { //can only happen inside service vm
_eth1mask = (String)params.get("eth1mask");
String internalDns1 = (String)params.get("dns1");
String internalDns2 = (String)params.get("dns2");
_eth1mask = (String)params.get("eth1mask");
String internalDns1 = (String)params.get("dns1");
String internalDns2 = (String)params.get("dns2");
if (internalDns1 == null) {
s_logger.warn("No DNS entry found during configuration of NfsSecondaryStorage");
} else {
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, internalDns1);
}
String mgmtHost = (String)params.get("host");
String nfsHost = NfsUtils.getHostPart(_nfsPath);
if (nfsHost == null) {
s_logger.error("Invalid or corrupt nfs url " + _nfsPath);
throw new CloudRuntimeException("Unable to determine host part of nfs path");
}
try {
InetAddress nfsHostAddr = InetAddress.getByName(nfsHost);
nfsHost = nfsHostAddr.getHostAddress();
} catch (UnknownHostException uhe) {
s_logger.error("Unable to resolve nfs host " + nfsHost);
throw new CloudRuntimeException("Unable to resolve nfs host to an ip address " + nfsHost);
}
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, nfsHost);
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, mgmtHost);
if (internalDns2 != null) {
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, internalDns2);
}
if (internalDns1 == null) {
s_logger.warn("No DNS entry found during configuration of NfsSecondaryStorage");
} else {
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, internalDns1);
}
String mgmtHost = (String)params.get("host");
String nfsHost = NfsUtils.getHostPart(_nfsPath);
if (nfsHost == null) {
s_logger.error("Invalid or corrupt nfs url " + _nfsPath);
throw new CloudRuntimeException("Unable to determine host part of nfs path");
}
try {
InetAddress nfsHostAddr = InetAddress.getByName(nfsHost);
nfsHost = nfsHostAddr.getHostAddress();
} catch (UnknownHostException uhe) {
s_logger.error("Unable to resolve nfs host " + nfsHost);
throw new CloudRuntimeException("Unable to resolve nfs host to an ip address " + nfsHost);
}
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, nfsHost);
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, mgmtHost);
if (internalDns2 != null) {
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, internalDns2);
}
}
String useSsl = (String)params.get("sslcopy");
if (useSsl != null) {
_sslCopy = Boolean.parseBoolean(useSsl);
if (_sslCopy) {
configureSSL();
}
_sslCopy = Boolean.parseBoolean(useSsl);
if (_sslCopy) {
configureSSL();
}
}
startAdditionalServices();
_params.put("install.numthreads", "50");
_params.put("secondary.storage.vm", "true");
startAdditionalServices();
_params.put("install.numthreads", "50");
_params.put("secondary.storage.vm", "true");
}
_parent = mount(_nfsPath, _mountParent);
if (_parent == null) {
throw new ConfigurationException("Unable to create mount point");
}
s_logger.info("Mount point established at " + _parent);
try {
_params.put("template.parent", _parent);
_params.put(StorageLayer.InstanceConfigKey, _storage);
@ -464,98 +463,98 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
}
return true;
}
private void startAdditionalServices() {
Script command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("if [ -f /etc/init.d/ssh ]; then service ssh restart; else service sshd restart; fi ");
String result = command.execute();
if (result != null) {
s_logger.warn("Error in starting sshd service err=" + result );
}
command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("iptables -I INPUT -i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT");
result = command.execute();
if (result != null) {
s_logger.warn("Error in opening up ssh port err=" + result );
}
}
private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) {
s_logger.debug("addRouteToInternalIp: localgw=" + localgw + ", eth1ip=" + eth1ip + ", eth1mask=" + eth1mask + ",destIp=" + destIpOrCidr);
if (destIpOrCidr == null) {
s_logger.debug("addRouteToInternalIp: destIp is null");
return;
}
if (!NetUtils.isValidIp(destIpOrCidr) && !NetUtils.isValidCIDR(destIpOrCidr)){
s_logger.warn(" destIp is not a valid ip address or cidr destIp=" + destIpOrCidr);
return;
}
boolean inSameSubnet = false;
if (NetUtils.isValidIp(destIpOrCidr)) {
if (eth1ip != null && eth1mask != null) {
inSameSubnet = NetUtils.sameSubnet(eth1ip, destIpOrCidr, eth1mask);
} else {
s_logger.warn("addRouteToInternalIp: unable to determine same subnet: _eth1ip=" + eth1ip + ", dest ip=" + destIpOrCidr + ", _eth1mask=" + eth1mask);
}
} else {
inSameSubnet = NetUtils.isNetworkAWithinNetworkB(destIpOrCidr, NetUtils.ipAndNetMaskToCidr(eth1ip, eth1mask));
}
if (inSameSubnet) {
s_logger.debug("addRouteToInternalIp: dest ip " + destIpOrCidr + " is in the same subnet as eth1 ip " + eth1ip);
return;
}
Script command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("ip route delete " + destIpOrCidr);
command.execute();
command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("ip route add " + destIpOrCidr + " via " + localgw);
String result = command.execute();
if (result != null) {
s_logger.warn("Error in configuring route to internal ip err=" + result );
} else {
s_logger.debug("addRouteToInternalIp: added route to internal ip=" + destIpOrCidr + " via " + localgw);
}
Script command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("if [ -f /etc/init.d/ssh ]; then service ssh restart; else service sshd restart; fi ");
String result = command.execute();
if (result != null) {
s_logger.warn("Error in starting sshd service err=" + result );
}
command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("iptables -I INPUT -i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT");
result = command.execute();
if (result != null) {
s_logger.warn("Error in opening up ssh port err=" + result );
}
}
private void configureSSL() {
Script command = new Script(_configSslScr);
command.add(_publicIp);
command.add(_hostname);
String result = command.execute();
if (result != null) {
s_logger.warn("Unable to configure httpd to use ssl");
}
}
private String configureAuth(String user, String passwd) {
Script command = new Script(_configAuthScr);
command.add(user);
command.add(passwd);
String result = command.execute();
if (result != null) {
s_logger.warn("Unable to configure httpd to use auth");
}
return result;
}
private String configureIpFirewall(List<String> ipList){
Script command = new Script(_configIpFirewallScr);
for (String ip : ipList){
command.add(ip);
}
String result = command.execute();
if (result != null) {
s_logger.warn("Unable to configure firewall for command : " +command);
}
return result;
}
protected String mount(String path, String parent) {
private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) {
s_logger.debug("addRouteToInternalIp: localgw=" + localgw + ", eth1ip=" + eth1ip + ", eth1mask=" + eth1mask + ",destIp=" + destIpOrCidr);
if (destIpOrCidr == null) {
s_logger.debug("addRouteToInternalIp: destIp is null");
return;
}
if (!NetUtils.isValidIp(destIpOrCidr) && !NetUtils.isValidCIDR(destIpOrCidr)){
s_logger.warn(" destIp is not a valid ip address or cidr destIp=" + destIpOrCidr);
return;
}
boolean inSameSubnet = false;
if (NetUtils.isValidIp(destIpOrCidr)) {
if (eth1ip != null && eth1mask != null) {
inSameSubnet = NetUtils.sameSubnet(eth1ip, destIpOrCidr, eth1mask);
} else {
s_logger.warn("addRouteToInternalIp: unable to determine same subnet: _eth1ip=" + eth1ip + ", dest ip=" + destIpOrCidr + ", _eth1mask=" + eth1mask);
}
} else {
inSameSubnet = NetUtils.isNetworkAWithinNetworkB(destIpOrCidr, NetUtils.ipAndNetMaskToCidr(eth1ip, eth1mask));
}
if (inSameSubnet) {
s_logger.debug("addRouteToInternalIp: dest ip " + destIpOrCidr + " is in the same subnet as eth1 ip " + eth1ip);
return;
}
Script command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("ip route delete " + destIpOrCidr);
command.execute();
command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("ip route add " + destIpOrCidr + " via " + localgw);
String result = command.execute();
if (result != null) {
s_logger.warn("Error in configuring route to internal ip err=" + result );
} else {
s_logger.debug("addRouteToInternalIp: added route to internal ip=" + destIpOrCidr + " via " + localgw);
}
}
private void configureSSL() {
Script command = new Script(_configSslScr);
command.add(_publicIp);
command.add(_hostname);
String result = command.execute();
if (result != null) {
s_logger.warn("Unable to configure httpd to use ssl");
}
}
private String configureAuth(String user, String passwd) {
Script command = new Script(_configAuthScr);
command.add(user);
command.add(passwd);
String result = command.execute();
if (result != null) {
s_logger.warn("Unable to configure httpd to use auth");
}
return result;
}
private String configureIpFirewall(List<String> ipList){
Script command = new Script(_configIpFirewallScr);
for (String ip : ipList){
command.add(ip);
}
String result = command.execute();
if (result != null) {
s_logger.warn("Unable to configure firewall for command : " +command);
}
return result;
}
protected String mount(String path, String parent) {
String mountPoint = null;
for (int i = 0; i < 10; i++) {
String mntPt = parent + File.separator + Integer.toHexString(_rand.nextInt(Integer.MAX_VALUE));
@ -568,29 +567,29 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
}
s_logger.debug("Unable to create mount: " + mntPt);
}
if (mountPoint == null) {
s_logger.warn("Unable to create a mount point");
return null;
}
Script script = null;
String result = null;
script = new Script(!_inSystemVM, "umount", _timeout, s_logger);
script.add(path);
result = script.execute();
if( _parent != null ) {
script = new Script("rmdir", _timeout, s_logger);
script.add(_parent);
result = script.execute();
}
Script command = new Script(!_inSystemVM, "mount", _timeout, s_logger);
command.add("-t", "cifs");
if (_inSystemVM) {
//Fedora Core 12 errors out with any -o option executed from java
//command.add("-o", "soft,timeo=133,retrans=2147483647,tcp,acdirmax=0,acdirmin=0");
//Fedora Core 12 errors out with any -o option executed from java
//command.add("-o", "soft,timeo=133,retrans=2147483647,tcp,acdirmax=0,acdirmin=0");
}
String tok[] = path.split(":");
//command.add(path);
@ -601,25 +600,25 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
s_logger.warn("Unable to mount " + path + " due to " + result);
File file = new File(mountPoint);
if (file.exists())
file.delete();
file.delete();
return null;
}
// XXX: Adding the check for creation of snapshots dir here. Might have to move it somewhere more logical later.
if (!checkForSnapshotsDir(mountPoint)) {
return null;
return null;
}
// Create the volumes dir
if (!checkForVolumesDir(mountPoint)) {
return null;
return null;
}
return mountPoint;
}
@Override
public boolean start() {
return true;
@ -633,14 +632,14 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
@Override
public StartupCommand[] initialize() {
/*disconnected();
_parent = mount(_nfsPath, _mountParent);
if( _parent == null ) {
s_logger.warn("Unable to mount the nfs server");
return null;
}
try {
_params.put("template.parent", _parent);
_params.put(StorageLayer.InstanceConfigKey, _storage);
@ -650,12 +649,12 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
s_logger.warn("Caught problem while configuring folers", e);
return null;
}*/
final StartupStorageCommand cmd = new StartupStorageCommand(_parent, StoragePoolType.NetworkFilesystem, getTotalSize(), new HashMap<String, TemplateInfo>());
cmd.setResourceType(Storage.StorageResourceType.SECONDARY_STORAGE);
cmd.setIqn(null);
fillNetworkInformation(cmd);
cmd.setDataCenter(_dc);
cmd.setPod(_pod);
@ -687,38 +686,38 @@ public class CifsSecondaryStorageResource extends ServerResourceBase implements
String snapshotsDirLocation = mountPoint + File.separator + "snapshots";
return createDir("snapshots", snapshotsDirLocation, mountPoint);
}
protected boolean checkForVolumesDir(String mountPoint) {
String volumesDirLocation = mountPoint + "/" + "volumes";
return createDir("volumes", volumesDirLocation, mountPoint);
}
protected boolean createDir(String dirName, String dirLocation, String mountPoint) {
boolean dirExists = false;
File dir = new File(dirLocation);
if (dir.exists()) {
if (dir.isDirectory()) {
s_logger.debug(dirName + " already exists on secondary storage, and is mounted at " + mountPoint);
dirExists = true;
} else {
if (dir.delete() && _storage.mkdir(dirLocation)) {
dirExists = true;
}
}
} else if (_storage.mkdir(dirLocation)) {
dirExists = true;
}
if (dirExists) {
s_logger.info(dirName + " directory created/exists on Secondary Storage.");
} else {
s_logger.info(dirName + " directory does not exist on Secondary Storage.");
}
return dirExists;
protected boolean checkForVolumesDir(String mountPoint) {
String volumesDirLocation = mountPoint + "/" + "volumes";
return createDir("volumes", volumesDirLocation, mountPoint);
}
protected boolean createDir(String dirName, String dirLocation, String mountPoint) {
boolean dirExists = false;
File dir = new File(dirLocation);
if (dir.exists()) {
if (dir.isDirectory()) {
s_logger.debug(dirName + " already exists on secondary storage, and is mounted at " + mountPoint);
dirExists = true;
} else {
if (dir.delete() && _storage.mkdir(dirLocation)) {
dirExists = true;
}
}
} else if (_storage.mkdir(dirLocation)) {
dirExists = true;
}
if (dirExists) {
s_logger.info(dirName + " directory created/exists on Secondary Storage.");
} else {
s_logger.info(dirName + " directory does not exist on Secondary Storage.");
}
return dirExists;
}
@Override
protected String getDefaultScriptsDir() {
return "./scripts/storage/secondary";

View File

@ -19,7 +19,6 @@ package com.cloud.storage.resource;
import java.util.HashMap;
import java.util.Map;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
@ -36,11 +35,11 @@ import com.cloud.agent.api.ReadyCommand;
import com.cloud.agent.api.SecStorageSetupCommand;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupStorageCommand;
import com.cloud.agent.api.storage.DownloadCommand;
import com.cloud.agent.api.storage.DownloadProgressCommand;
import com.cloud.agent.api.storage.ListTemplateAnswer;
import com.cloud.agent.api.storage.ListTemplateCommand;
import com.cloud.agent.api.storage.ssCommand;
import com.cloud.agent.api.storage.DownloadCommand;
import com.cloud.agent.api.storage.DownloadProgressCommand;
import com.cloud.host.Host;
import com.cloud.host.Host.Type;
import com.cloud.resource.ServerResourceBase;
@ -50,39 +49,38 @@ import com.cloud.storage.StorageLayer;
import com.cloud.storage.template.DownloadManager;
import com.cloud.storage.template.DownloadManagerImpl;
import com.cloud.storage.template.TemplateInfo;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.component.ComponentContext;
public class LocalSecondaryStorageResource extends ServerResourceBase implements SecondaryStorageResource {
private static final Logger s_logger = Logger.getLogger(LocalSecondaryStorageResource.class);
int _timeout;
String _instance;
String _parent;
String _dc;
String _pod;
String _guid;
StorageLayer _storage;
DownloadManager _dlMgr;
@Override
public void disconnected() {
}
@Override
public String getRootDir(ssCommand cmd){
return getRootDir();
}
public String getRootDir() {
return _parent;
}
@Override
public Answer executeRequest(Command cmd) {
if (cmd instanceof DownloadProgressCommand) {
@ -103,7 +101,7 @@ public class LocalSecondaryStorageResource extends ServerResourceBase implements
return Answer.createUnsupportedCommandAnswer(cmd);
}
}
private Answer execute(ComputeChecksumCommand cmd) {
return new Answer(cmd, false, null);
}
@ -119,13 +117,13 @@ public class LocalSecondaryStorageResource extends ServerResourceBase implements
public Type getType() {
return Host.Type.LocalSecondaryStorage;
}
@Override
public PingCommand getCurrentStatus(final long id) {
return new PingStorageCommand(Host.Type.Storage, id, new HashMap<String, Boolean>());
}
@Override
@SuppressWarnings("unchecked")
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
@ -135,30 +133,30 @@ public class LocalSecondaryStorageResource extends ServerResourceBase implements
if (_guid == null) {
throw new ConfigurationException("Unable to find the guid");
}
_dc = (String)params.get("zone");
if (_dc == null) {
throw new ConfigurationException("Unable to find the zone");
}
_pod = (String)params.get("pod");
_instance = (String)params.get("instance");
_parent = (String)params.get("mount.path");
if (_parent == null) {
throw new ConfigurationException("No directory specified.");
}
_storage = (StorageLayer)params.get(StorageLayer.InstanceConfigKey);
if (_storage == null) {
String value = (String)params.get(StorageLayer.ClassConfigKey);
if (value == null) {
value = "com.cloud.storage.JavaStorageLayer";
}
try {
Class<StorageLayer> clazz = (Class<StorageLayer>)Class.forName(value);
_storage = ComponentLocator.inject(clazz);
_storage = ComponentContext.inject(clazz);
} catch (ClassNotFoundException e) {
throw new ConfigurationException("Unable to find class " + value);
}
@ -168,15 +166,15 @@ public class LocalSecondaryStorageResource extends ServerResourceBase implements
s_logger.warn("Unable to create the directory " + _parent);
throw new ConfigurationException("Unable to create the directory " + _parent);
}
s_logger.info("Mount point established at " + _parent);
params.put("template.parent", _parent);
params.put(StorageLayer.InstanceConfigKey, _storage);
_dlMgr = new DownloadManagerImpl();
_dlMgr.configure("DownloadManager", params);
return true;
}
@ -192,7 +190,7 @@ public class LocalSecondaryStorageResource extends ServerResourceBase implements
@Override
public StartupCommand[] initialize() {
final StartupStorageCommand cmd = new StartupStorageCommand(_parent, StoragePoolType.Filesystem, 1024l*1024l*1024l*1024l, _dlMgr.gatherTemplateInfo(_parent));
cmd.setResourceType(Storage.StorageResourceType.LOCAL_SECONDARY_STORAGE);
cmd.setIqn("local://");
@ -202,10 +200,10 @@ public class LocalSecondaryStorageResource extends ServerResourceBase implements
cmd.setGuid(_guid);
cmd.setName(_guid);
cmd.setVersion(LocalSecondaryStorageResource.class.getPackage().getImplementationVersion());
return new StartupCommand [] {cmd};
}
@Override
protected String getDefaultScriptsDir() {
return "scripts/storage/secondary";

View File

@ -29,8 +29,8 @@ import java.security.NoSuchAlgorithmException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.UUID;
@ -43,7 +43,6 @@ import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.DownloadAnswer;
import com.cloud.agent.api.storage.DownloadCommand;
import com.cloud.agent.api.storage.DownloadCommand.Proxy;
@ -60,10 +59,6 @@ import com.cloud.storage.template.Processor.FormatInfo;
import com.cloud.storage.template.TemplateDownloader.DownloadCompleteCallback;
import com.cloud.storage.template.TemplateDownloader.Status;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.component.Adapter;
import com.cloud.utils.component.Adapters;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.component.LegacyComponentLocator.ComponentInfo;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
@ -72,7 +67,7 @@ import com.cloud.utils.script.Script;
public class DownloadManagerImpl implements DownloadManager {
private String _name;
StorageLayer _storage;
Adapters<Processor> _processors;
Map<String, Processor> _processors;
public class Completion implements DownloadCompleteCallback {
private final String jobId;
@ -94,14 +89,14 @@ public class DownloadManagerImpl implements DownloadManager {
private final boolean hvm;
private final ImageFormat format;
private String tmpltPath;
private String description;
private final String description;
private String checksum;
private Long accountId;
private String installPathPrefix;
private final Long accountId;
private final String installPathPrefix;
private long templatesize;
private long templatePhysicalSize;
private long id;
private ResourceType resourceType;
private final long id;
private final ResourceType resourceType;
public DownloadJob(TemplateDownloader td, String jobId, long id, String tmpltName, ImageFormat format, boolean hvm, Long accountId, String descr, String cksum, String installPathPrefix, ResourceType resourceType) {
super();
@ -160,10 +155,10 @@ public class DownloadManagerImpl implements DownloadManager {
}
public ResourceType getResourceType() {
return resourceType;
}
return resourceType;
}
public void setTmpltPath(String tmpltPath) {
public void setTmpltPath(String tmpltPath) {
this.tmpltPath = tmpltPath;
}
@ -205,9 +200,9 @@ public class DownloadManagerImpl implements DownloadManager {
public long getTemplatePhysicalSize() {
return templatePhysicalSize;
}
public void setCheckSum(String checksum) {
this.checksum = checksum;
this.checksum = checksum;
}
}
@ -216,7 +211,7 @@ public class DownloadManagerImpl implements DownloadManager {
private String _volumeDir;
private String createTmpltScr;
private String createVolScr;
private Adapters<Processor> processors;
private List<Processor> processors;
private ExecutorService threadPool;
@ -278,9 +273,9 @@ public class DownloadManagerImpl implements DownloadManager {
break;
}
}
private String computeCheckSum(File f) {
byte[] buffer = new byte[8192];
byte[] buffer = new byte[8192];
int read = 0;
MessageDigest digest;
String checksum = null;
@ -296,16 +291,16 @@ public class DownloadManagerImpl implements DownloadManager {
checksum = String.format("%032x",bigInt);
return checksum;
}catch(IOException e) {
return null;
return null;
}catch (NoSuchAlgorithmException e) {
return null;
return null;
}
finally {
try {
if(is != null)
is.close();
if(is != null)
is.close();
} catch (IOException e) {
return null;
return null;
}
}
}
@ -320,17 +315,17 @@ public class DownloadManagerImpl implements DownloadManager {
TemplateDownloader td = dnld.getTemplateDownloader();
String resourcePath = null;
ResourceType resourceType = dnld.getResourceType();
// once template path is set, remove the parent dir so that the template is installed with a relative path
String finalResourcePath = "";
if (resourceType == ResourceType.TEMPLATE){
finalResourcePath += _templateDir + File.separator + dnld.getAccountId() + File.separator + dnld.getId() + File.separator;
resourcePath = dnld.getInstallPathPrefix() + dnld.getAccountId() + File.separator + dnld.getId() + File.separator;// dnld.getTmpltName();
finalResourcePath += _templateDir + File.separator + dnld.getAccountId() + File.separator + dnld.getId() + File.separator;
resourcePath = dnld.getInstallPathPrefix() + dnld.getAccountId() + File.separator + dnld.getId() + File.separator;// dnld.getTmpltName();
}else {
finalResourcePath += _volumeDir + File.separator + dnld.getId() + File.separator;
resourcePath = dnld.getInstallPathPrefix() + dnld.getId() + File.separator;// dnld.getTmpltName();
finalResourcePath += _volumeDir + File.separator + dnld.getId() + File.separator;
resourcePath = dnld.getInstallPathPrefix() + dnld.getId() + File.separator;// dnld.getTmpltName();
}
_storage.mkdirs(resourcePath);
dnld.setTmpltPath(finalResourcePath);
@ -389,9 +384,9 @@ public class DownloadManagerImpl implements DownloadManager {
// Set permissions for template/volume.properties
String propertiesFile = resourcePath;
if (resourceType == ResourceType.TEMPLATE){
propertiesFile += "/template.properties";
propertiesFile += "/template.properties";
}else{
propertiesFile += "/volume.properties";
propertiesFile += "/volume.properties";
}
File templateProperties = new File(propertiesFile);
_storage.setWorldReadableAndWriteable(templateProperties);
@ -405,9 +400,9 @@ public class DownloadManagerImpl implements DownloadManager {
return "Unable to download due to " + e.getMessage();
}
Enumeration<Processor> en = _processors.enumeration();
while (en.hasMoreElements()) {
Processor processor = en.nextElement();
Iterator<Processor> en = _processors.values().iterator();
while (en.hasNext()) {
Processor processor = en.next();
FormatInfo info = null;
try {
@ -423,7 +418,7 @@ public class DownloadManagerImpl implements DownloadManager {
break;
}
}
if (!loc.save()) {
s_logger.warn("Cleaning up because we're unable to save the formats");
loc.purge();
@ -450,9 +445,9 @@ public class DownloadManagerImpl implements DownloadManager {
String jobId = uuid.toString();
String tmpDir = "";
if(resourceType == ResourceType.TEMPLATE){
tmpDir = installPathPrefix + File.separator + accountId + File.separator + id;
tmpDir = installPathPrefix + File.separator + accountId + File.separator + id;
}else {
tmpDir = installPathPrefix + File.separator + id;
tmpDir = installPathPrefix + File.separator + id;
}
try {
@ -463,7 +458,7 @@ public class DownloadManagerImpl implements DownloadManager {
}
// TO DO - define constant for volume properties.
File file = ResourceType.TEMPLATE == resourceType ? _storage.getFile(tmpDir + File.separator + TemplateLocation.Filename) :
_storage.getFile(tmpDir + File.separator + "volume.properties");
_storage.getFile(tmpDir + File.separator + "volume.properties");
if ( file.exists() ) {
file.delete();
}
@ -524,9 +519,9 @@ public class DownloadManagerImpl implements DownloadManager {
}
return 0;
}
public String getDownloadCheckSum(String jobId) {
DownloadJob dj = jobs.get(jobId);
DownloadJob dj = jobs.get(jobId);
if (dj != null) {
return dj.getChecksum();
}
@ -589,7 +584,7 @@ public class DownloadManagerImpl implements DownloadManager {
@Override
public DownloadAnswer handleDownloadCommand(SecondaryStorageResource resource, DownloadCommand cmd) {
ResourceType resourceType = cmd.getResourceType();
ResourceType resourceType = cmd.getResourceType();
if (cmd instanceof DownloadProgressCommand) {
return handleDownloadProgressCmd( resource, (DownloadProgressCommand) cmd);
}
@ -604,9 +599,9 @@ public class DownloadManagerImpl implements DownloadManager {
String installPathPrefix = null;
if (ResourceType.TEMPLATE == resourceType){
installPathPrefix = resource.getRootDir(cmd) + File.separator + _templateDir;
installPathPrefix = resource.getRootDir(cmd) + File.separator + _templateDir;
}else {
installPathPrefix = resource.getRootDir(cmd) + File.separator + _volumeDir;
installPathPrefix = resource.getRootDir(cmd) + File.separator + _volumeDir;
}
String user = null;
@ -693,10 +688,10 @@ public class DownloadManagerImpl implements DownloadManager {
}
private List<String> listVolumes(String rootdir) {
List<String> result = new ArrayList<String>();
Script script = new Script(listVolScr, s_logger);
script.add("-r", rootdir);
ZfsPathParser zpp = new ZfsPathParser(rootdir);
@ -705,12 +700,12 @@ public class DownloadManagerImpl implements DownloadManager {
s_logger.info("found " + zpp.getPaths().size() + " volumes" + zpp.getPaths());
return result;
}
private List<String> listTemplates(String rootdir) {
List<String> result = new ArrayList<String>();
Script script = new Script(listTmpltScr, s_logger);
script.add("-r", rootdir);
ZfsPathParser zpp = new ZfsPathParser(rootdir);
@ -724,11 +719,11 @@ public class DownloadManagerImpl implements DownloadManager {
public Map<String, TemplateInfo> gatherTemplateInfo(String rootDir) {
Map<String, TemplateInfo> result = new HashMap<String, TemplateInfo>();
String templateDir = rootDir + File.separator + _templateDir;
if (! _storage.exists(templateDir)) {
_storage.mkdirs(templateDir);
}
List<String> publicTmplts = listTemplates(templateDir);
for (String tmplt : publicTmplts) {
String path = tmplt.substring(0, tmplt.lastIndexOf(File.separator));
@ -746,18 +741,18 @@ public class DownloadManagerImpl implements DownloadManager {
}
TemplateInfo tInfo = loc.getTemplateInfo();
if ((tInfo.size == tInfo.physicalSize) && (tInfo.installPath.endsWith(ImageFormat.OVA.getFileExtension()))) {
try {
Processor processor = _processors.get("VMDK Processor");
VmdkProcessor vmdkProcessor = (VmdkProcessor)processor;
long vSize = vmdkProcessor.getTemplateVirtualSize(path, tInfo.installPath.substring(tInfo.installPath.lastIndexOf(File.separator) + 1));
tInfo.size = vSize;
loc.updateVirtualSize(vSize);
loc.save();
} catch (Exception e) {
s_logger.error("Unable to get the virtual size of the template: " + tInfo.installPath + " due to " + e.getMessage());
}
try {
Processor processor = _processors.get("VMDK Processor");
VmdkProcessor vmdkProcessor = (VmdkProcessor)processor;
long vSize = vmdkProcessor.getTemplateVirtualSize(path, tInfo.installPath.substring(tInfo.installPath.lastIndexOf(File.separator) + 1));
tInfo.size = vSize;
loc.updateVirtualSize(vSize);
loc.save();
} catch (Exception e) {
s_logger.error("Unable to get the virtual size of the template: " + tInfo.installPath + " due to " + e.getMessage());
}
}
result.put(tInfo.templateName, tInfo);
@ -777,52 +772,52 @@ public class DownloadManagerImpl implements DownloadManager {
return result;
}
@Override
public Map<Long, TemplateInfo> gatherVolumeInfo(String rootDir) {
Map<Long, TemplateInfo> result = new HashMap<Long, TemplateInfo>();
String volumeDir = rootDir + File.separator + _volumeDir;
if (! _storage.exists(volumeDir)) {
_storage.mkdirs(volumeDir);
}
List<String> vols = listVolumes(volumeDir);
for (String vol : vols) {
String path = vol.substring(0, vol.lastIndexOf(File.separator));
TemplateLocation loc = new TemplateLocation(_storage, path);
try {
if (!loc.load()) {
s_logger.warn("Post download installation was not completed for " + path);
//loc.purge();
_storage.cleanup(path, volumeDir);
continue;
}
} catch (IOException e) {
s_logger.warn("Unable to load volume location " + path, e);
continue;
}
@Override
public Map<Long, TemplateInfo> gatherVolumeInfo(String rootDir) {
Map<Long, TemplateInfo> result = new HashMap<Long, TemplateInfo>();
String volumeDir = rootDir + File.separator + _volumeDir;
TemplateInfo vInfo = loc.getTemplateInfo();
if ((vInfo.size == vInfo.physicalSize) && (vInfo.installPath.endsWith(ImageFormat.OVA.getFileExtension()))) {
try {
Processor processor = _processors.get("VMDK Processor");
VmdkProcessor vmdkProcessor = (VmdkProcessor)processor;
long vSize = vmdkProcessor.getTemplateVirtualSize(path, vInfo.installPath.substring(vInfo.installPath.lastIndexOf(File.separator) + 1));
vInfo.size = vSize;
loc.updateVirtualSize(vSize);
loc.save();
} catch (Exception e) {
s_logger.error("Unable to get the virtual size of the volume: " + vInfo.installPath + " due to " + e.getMessage());
}
}
if (! _storage.exists(volumeDir)) {
_storage.mkdirs(volumeDir);
}
List<String> vols = listVolumes(volumeDir);
for (String vol : vols) {
String path = vol.substring(0, vol.lastIndexOf(File.separator));
TemplateLocation loc = new TemplateLocation(_storage, path);
try {
if (!loc.load()) {
s_logger.warn("Post download installation was not completed for " + path);
//loc.purge();
_storage.cleanup(path, volumeDir);
continue;
}
} catch (IOException e) {
s_logger.warn("Unable to load volume location " + path, e);
continue;
}
TemplateInfo vInfo = loc.getTemplateInfo();
if ((vInfo.size == vInfo.physicalSize) && (vInfo.installPath.endsWith(ImageFormat.OVA.getFileExtension()))) {
try {
Processor processor = _processors.get("VMDK Processor");
VmdkProcessor vmdkProcessor = (VmdkProcessor)processor;
long vSize = vmdkProcessor.getTemplateVirtualSize(path, vInfo.installPath.substring(vInfo.installPath.lastIndexOf(File.separator) + 1));
vInfo.size = vSize;
loc.updateVirtualSize(vSize);
loc.save();
} catch (Exception e) {
s_logger.error("Unable to get the virtual size of the volume: " + vInfo.installPath + " due to " + e.getMessage());
}
}
result.put(vInfo.getId(), vInfo);
s_logger.debug("Added volume name: " + vInfo.templateName + ", path: " + vol);
}
return result;
}
result.put(vInfo.getId(), vInfo);
s_logger.debug("Added volume name: " + vInfo.templateName + ", path: " + vol);
}
return result;
}
private int deleteDownloadDirectories(File downloadPath, int deleted) {
try {
if (downloadPath.exists()) {
@ -881,7 +876,7 @@ public class DownloadManagerImpl implements DownloadManager {
String value = null;
_storage = (StorageLayer) params.get(StorageLayer.InstanceConfigKey);
_storage = (StorageLayer)params.get(StorageLayer.InstanceConfigKey);
if (_storage == null) {
value = (String) params.get(StorageLayer.ClassConfigKey);
if (value == null) {
@ -891,10 +886,14 @@ public class DownloadManagerImpl implements DownloadManager {
Class<StorageLayer> clazz;
try {
clazz = (Class<StorageLayer>) Class.forName(value);
_storage = clazz.newInstance();
} catch (ClassNotFoundException e) {
throw new ConfigurationException("Unable to instantiate " + value);
} catch (InstantiationException e) {
throw new ConfigurationException("Unable to instantiate " + value);
} catch (IllegalAccessException e) {
throw new ConfigurationException("Unable to instantiate " + value);
}
_storage = ComponentLocator.inject(clazz);
}
String useSsl = (String)params.get("sslcopy");
if (useSsl != null) {
@ -943,29 +942,27 @@ public class DownloadManagerImpl implements DownloadManager {
}
s_logger.info("createvolume.sh found in " + createVolScr);
List<ComponentInfo<Adapter>> processors = new ArrayList<ComponentInfo<Adapter>>();
_processors = new HashMap<String, Processor>();
Processor processor = new VhdProcessor();
processor.configure("VHD Processor", params);
processors.add(new ComponentInfo<Adapter>("VHD Processor", VhdProcessor.class, processor));
_processors.put("VHD Processor", processor);
processor = new IsoProcessor();
processor.configure("ISO Processor", params);
processors.add(new ComponentInfo<Adapter>("ISO Processor", IsoProcessor.class, processor));
_processors.put("ISO Processor", processor);
processor = new QCOW2Processor();
processor.configure("QCOW2 Processor", params);
processors.add(new ComponentInfo<Adapter>("QCOW2 Processor", QCOW2Processor.class, processor));
_processors.put("QCOW2 Processor", processor);
processor = new VmdkProcessor();
processor.configure("VMDK Processor", params);
processors.add(new ComponentInfo<Adapter>("VMDK Processor", VmdkProcessor.class, processor));
_processors.put("VMDK Processor", processor);
processor = new RawImageProcessor();
processor.configure("Raw Image Processor", params);
processors.add(new ComponentInfo<Adapter>("Raw Image Processor", RawImageProcessor.class, processor));
_processors = new Adapters<Processor>("processors", processors);
_processors.put("Raw Image Processor", processor);
_templateDir = (String) params.get("public.templates.root.dir");
if (_templateDir == null) {
@ -1047,5 +1044,5 @@ public class DownloadManagerImpl implements DownloadManager {
return;
}
}
}

View File

@ -21,6 +21,7 @@ import java.net.URI;
import java.net.URISyntaxException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
@ -46,15 +47,13 @@ import com.cloud.storage.resource.SecondaryStorageResource;
import com.cloud.storage.template.TemplateUploader.Status;
import com.cloud.storage.template.TemplateUploader.UploadCompleteCallback;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.component.Adapters;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
public class UploadManagerImpl implements UploadManager {
public class Completion implements UploadCompleteCallback {
public class Completion implements UploadCompleteCallback {
private final String jobId;
public Completion(String jobId) {
@ -66,180 +65,180 @@ public class UploadManagerImpl implements UploadManager {
setUploadStatus(jobId, status);
}
}
private static class UploadJob {
private final TemplateUploader tu;
private final String jobId;
private final String name;
private final ImageFormat format;
private String tmpltPath;
private String description;
private String checksum;
private Long accountId;
private String installPathPrefix;
private long templatesize;
private long id;
public UploadJob(TemplateUploader tu, String jobId, long id, String name, ImageFormat format, boolean hvm, Long accountId, String descr, String cksum, String installPathPrefix) {
super();
this.tu = tu;
this.jobId = jobId;
this.name = name;
this.format = format;
this.accountId = accountId;
this.description = descr;
this.checksum = cksum;
this.installPathPrefix = installPathPrefix;
this.templatesize = 0;
this.id = id;
}
private static class UploadJob {
private final TemplateUploader tu;
private final String jobId;
private final String name;
private final ImageFormat format;
private String tmpltPath;
private String description;
private String checksum;
private Long accountId;
private String installPathPrefix;
private long templatesize;
private long id;
public TemplateUploader getTd() {
return tu;
}
public UploadJob(TemplateUploader tu, String jobId, long id, String name, ImageFormat format, boolean hvm, Long accountId, String descr, String cksum, String installPathPrefix) {
super();
this.tu = tu;
this.jobId = jobId;
this.name = name;
this.format = format;
this.accountId = accountId;
this.description = descr;
this.checksum = cksum;
this.installPathPrefix = installPathPrefix;
this.templatesize = 0;
this.id = id;
}
public String getDescription() {
return description;
}
public TemplateUploader getTd() {
return tu;
}
public String getChecksum() {
return checksum;
}
public String getDescription() {
return description;
}
public UploadJob(TemplateUploader td, String jobId, UploadCommand cmd) {
this.tu = td;
this.jobId = jobId;
this.name = cmd.getName();
this.format = cmd.getFormat();
}
public String getChecksum() {
return checksum;
}
public TemplateUploader getTemplateUploader() {
return tu;
}
public UploadJob(TemplateUploader td, String jobId, UploadCommand cmd) {
this.tu = td;
this.jobId = jobId;
this.name = cmd.getName();
this.format = cmd.getFormat();
}
public String getJobId() {
return jobId;
}
public TemplateUploader getTemplateUploader() {
return tu;
}
public String getTmpltName() {
return name;
}
public String getJobId() {
return jobId;
}
public ImageFormat getFormat() {
return format;
}
public String getTmpltName() {
return name;
}
public Long getAccountId() {
return accountId;
}
public ImageFormat getFormat() {
return format;
}
public long getId() {
return id;
}
public Long getAccountId() {
return accountId;
}
public void setTmpltPath(String tmpltPath) {
this.tmpltPath = tmpltPath;
}
public long getId() {
return id;
}
public String getTmpltPath() {
return tmpltPath;
}
public void setTmpltPath(String tmpltPath) {
this.tmpltPath = tmpltPath;
}
public String getInstallPathPrefix() {
return installPathPrefix;
}
public String getTmpltPath() {
return tmpltPath;
}
public void cleanup() {
if (tu != null) {
String upldPath = tu.getUploadLocalPath();
if (upldPath != null) {
File f = new File(upldPath);
f.delete();
}
}
}
public String getInstallPathPrefix() {
return installPathPrefix;
}
public void setTemplatesize(long templatesize) {
this.templatesize = templatesize;
}
public void cleanup() {
if (tu != null) {
String upldPath = tu.getUploadLocalPath();
if (upldPath != null) {
File f = new File(upldPath);
f.delete();
}
}
}
public void setTemplatesize(long templatesize) {
this.templatesize = templatesize;
}
public long getTemplatesize() {
return templatesize;
}
}
public static final Logger s_logger = Logger.getLogger(UploadManagerImpl.class);
private ExecutorService threadPool;
private final Map<String, UploadJob> jobs = new ConcurrentHashMap<String, UploadJob>();
private String parentDir;
private List<Processor> _processors;
private String publicTemplateRepo;
private final String extractMountPoint = "/mnt/SecStorage/extractmnt";
private StorageLayer _storage;
private int installTimeoutPerGig;
private boolean _sslCopy;
private String _name;
private boolean hvm;
@Override
public String uploadPublicTemplate(long id, String url, String name,
ImageFormat format, Long accountId, String descr,
String cksum, String installPathPrefix, String userName,
String passwd, long templateSizeInBytes) {
public long getTemplatesize() {
return templatesize;
}
}
public static final Logger s_logger = Logger.getLogger(UploadManagerImpl.class);
private ExecutorService threadPool;
private final Map<String, UploadJob> jobs = new ConcurrentHashMap<String, UploadJob>();
private String parentDir;
private Adapters<Processor> _processors;
private String publicTemplateRepo;
private String extractMountPoint = "/mnt/SecStorage/extractmnt";
private StorageLayer _storage;
private int installTimeoutPerGig;
private boolean _sslCopy;
private String _name;
private boolean hvm;
@Override
public String uploadPublicTemplate(long id, String url, String name,
ImageFormat format, Long accountId, String descr,
String cksum, String installPathPrefix, String userName,
String passwd, long templateSizeInBytes) {
UUID uuid = UUID.randomUUID();
String jobId = uuid.toString();
String completePath = parentDir + File.separator + installPathPrefix;
s_logger.debug("Starting upload from " + completePath);
URI uri;
try {
uri = new URI(url);
} catch (URISyntaxException e) {
s_logger.error("URI is incorrect: " + url);
throw new CloudRuntimeException("URI is incorrect: " + url);
}
TemplateUploader tu;
if ((uri != null) && (uri.getScheme() != null)) {
if (uri.getScheme().equalsIgnoreCase("ftp")) {
tu = new FtpTemplateUploader(completePath, url, new Completion(jobId), templateSizeInBytes);
} else {
s_logger.error("Scheme is not supported " + url);
throw new CloudRuntimeException("Scheme is not supported " + url);
}
} else {
s_logger.error("Unable to download from URL: " + url);
throw new CloudRuntimeException("Unable to download from URL: " + url);
}
UploadJob uj = new UploadJob(tu, jobId, id, name, format, hvm, accountId, descr, cksum, installPathPrefix);
jobs.put(jobId, uj);
threadPool.execute(tu);
try {
uri = new URI(url);
} catch (URISyntaxException e) {
s_logger.error("URI is incorrect: " + url);
throw new CloudRuntimeException("URI is incorrect: " + url);
}
TemplateUploader tu;
if ((uri != null) && (uri.getScheme() != null)) {
if (uri.getScheme().equalsIgnoreCase("ftp")) {
tu = new FtpTemplateUploader(completePath, url, new Completion(jobId), templateSizeInBytes);
} else {
s_logger.error("Scheme is not supported " + url);
throw new CloudRuntimeException("Scheme is not supported " + url);
}
} else {
s_logger.error("Unable to download from URL: " + url);
throw new CloudRuntimeException("Unable to download from URL: " + url);
}
UploadJob uj = new UploadJob(tu, jobId, id, name, format, hvm, accountId, descr, cksum, installPathPrefix);
jobs.put(jobId, uj);
threadPool.execute(tu);
return jobId;
}
return jobId;
@Override
public String getUploadError(String jobId) {
}
@Override
public String getUploadError(String jobId) {
UploadJob uj = jobs.get(jobId);
if (uj != null) {
return uj.getTemplateUploader().getUploadError();
}
return null;
}
}
@Override
public int getUploadPct(String jobId) {
UploadJob uj = jobs.get(jobId);
@Override
public int getUploadPct(String jobId) {
UploadJob uj = jobs.get(jobId);
if (uj != null) {
return uj.getTemplateUploader().getUploadPercent();
}
return 0;
}
}
@Override
public Status getUploadStatus(String jobId) {
@Override
public Status getUploadStatus(String jobId) {
UploadJob job = jobs.get(jobId);
if (job != null) {
TemplateUploader tu = job.getTemplateUploader();
@ -248,8 +247,8 @@ public class UploadManagerImpl implements UploadManager {
}
}
return Status.UNKNOWN;
}
}
public static UploadVO.Status convertStatus(Status tds) {
switch (tds) {
case ABORTED:
@ -277,11 +276,11 @@ public class UploadManagerImpl implements UploadManager {
public com.cloud.storage.UploadVO.Status getUploadStatus2(String jobId) {
return convertStatus(getUploadStatus(jobId));
}
@Override
public String getPublicTemplateRepo() {
// TODO Auto-generated method stub
return null;
}
@Override
public String getPublicTemplateRepo() {
// TODO Auto-generated method stub
return null;
}
private UploadAnswer handleUploadProgressCmd(UploadProgressCommand cmd) {
String jobId = cmd.getJobId();
@ -290,7 +289,7 @@ public class UploadManagerImpl implements UploadManager {
if (jobId != null)
uj = jobs.get(jobId);
if (uj == null) {
return new UploadAnswer(null, 0, "Cannot find job", com.cloud.storage.UploadVO.Status.UNKNOWN, "", "", 0);
return new UploadAnswer(null, 0, "Cannot find job", com.cloud.storage.UploadVO.Status.UNKNOWN, "", "", 0);
}
TemplateUploader td = uj.getTemplateUploader();
switch (cmd.getRequest()) {
@ -300,7 +299,7 @@ public class UploadManagerImpl implements UploadManager {
td.stopUpload();
sleep();
break;
/*case RESTART:
/*case RESTART:
td.stopUpload();
sleep();
threadPool.execute(td);
@ -316,10 +315,10 @@ public class UploadManagerImpl implements UploadManager {
return new UploadAnswer(jobId, getUploadPct(jobId), getUploadError(jobId), getUploadStatus2(jobId), getUploadLocalPath(jobId), getInstallPath(jobId),
getUploadTemplateSize(jobId));
}
@Override
public UploadAnswer handleUploadCommand(SecondaryStorageResource resource, UploadCommand cmd) {
s_logger.warn("Handling the upload " +cmd.getInstallPath() + " " + cmd.getId());
s_logger.warn("Handling the upload " +cmd.getInstallPath() + " " + cmd.getId());
if (cmd instanceof UploadProgressCommand) {
return handleUploadProgressCmd((UploadProgressCommand) cmd);
}
@ -327,9 +326,9 @@ public class UploadManagerImpl implements UploadManager {
String user = null;
String password = null;
String jobId = uploadPublicTemplate(cmd.getId(), cmd.getUrl(), cmd.getName(),
cmd.getFormat(), cmd.getAccountId(), cmd.getDescription(),
cmd.getChecksum(), cmd.getInstallPath(), user, password,
cmd.getTemplateSizeInBytes());
cmd.getFormat(), cmd.getAccountId(), cmd.getDescription(),
cmd.getChecksum(), cmd.getInstallPath(), user, password,
cmd.getTemplateSizeInBytes());
sleep();
if (jobId == null) {
return new UploadAnswer(null, 0, "Internal Error", com.cloud.storage.UploadVO.Status.UPLOAD_ERROR, "", "", 0);
@ -337,18 +336,18 @@ public class UploadManagerImpl implements UploadManager {
return new UploadAnswer(jobId, getUploadPct(jobId), getUploadError(jobId), getUploadStatus2(jobId), getUploadLocalPath(jobId), getInstallPath(jobId),
getUploadTemplateSize(jobId));
}
@Override
public CreateEntityDownloadURLAnswer handleCreateEntityURLCommand(CreateEntityDownloadURLCommand cmd){
boolean isApacheUp = checkAndStartApache();
if (!isApacheUp){
String errorString = "Error in starting Apache server ";
boolean isApacheUp = checkAndStartApache();
if (!isApacheUp){
String errorString = "Error in starting Apache server ";
s_logger.error(errorString);
return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE);
}
}
// Create the directory structure so that its visible under apache server root
String extractDir = "/var/www/html/userdata/";
String extractDir = "/var/www/html/userdata/";
Script command = new Script("mkdir", s_logger);
command.add("-p");
command.add(extractDir);
@ -358,19 +357,19 @@ public class UploadManagerImpl implements UploadManager {
s_logger.error(errorString);
return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE);
}
// Create a random file under the directory for security reasons.
String uuid = cmd.getExtractLinkUUID();
command = new Script("touch", s_logger);
command.add(extractDir + uuid);
result = command.execute();
if (result != null) {
String errorString = "Error in creating file " +uuid+ " ,error: " + result;
s_logger.warn(errorString);
return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE);
}
command = new Script("touch", s_logger);
command.add(extractDir + uuid);
result = command.execute();
if (result != null) {
String errorString = "Error in creating file " +uuid+ " ,error: " + result;
s_logger.warn(errorString);
return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE);
}
// Create a symbolic link from the actual directory to the template location. The entity would be directly visible under /var/www/html/userdata/cmd.getInstallPath();
command = new Script("/bin/bash", s_logger);
command.add("-c");
@ -381,11 +380,11 @@ public class UploadManagerImpl implements UploadManager {
s_logger.error(errorString);
return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE);
}
return new CreateEntityDownloadURLAnswer("", CreateEntityDownloadURLAnswer.RESULT_SUCCESS);
}
@Override
public DeleteEntityDownloadURLAnswer handleDeleteEntityDownloadURLCommand(DeleteEntityDownloadURLCommand cmd){
@ -394,8 +393,8 @@ public class UploadManagerImpl implements UploadManager {
String path = cmd.getPath();
Script command = new Script("/bin/bash", s_logger);
command.add("-c");
//We just need to remove the UUID.vhd
//We just need to remove the UUID.vhd
String extractUrl = cmd.getExtractUrl();
command.add("unlink /var/www/html/userdata/" +extractUrl.substring(extractUrl.lastIndexOf(File.separator) + 1));
String result = command.execute();
@ -404,7 +403,7 @@ public class UploadManagerImpl implements UploadManager {
s_logger.warn(errorString);
return new DeleteEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE);
}
// If its a volume also delete the Hard link since it was created only for the purpose of download.
if(cmd.getType() == Upload.Type.VOLUME){
command = new Script("/bin/bash", s_logger);
@ -418,31 +417,31 @@ public class UploadManagerImpl implements UploadManager {
return new DeleteEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE);
}
}
return new DeleteEntityDownloadURLAnswer("", CreateEntityDownloadURLAnswer.RESULT_SUCCESS);
}
private String getInstallPath(String jobId) {
// TODO Auto-generated method stub
return null;
}
private String getInstallPath(String jobId) {
// TODO Auto-generated method stub
return null;
}
private String getUploadLocalPath(String jobId) {
// TODO Auto-generated method stub
return null;
}
private String getUploadLocalPath(String jobId) {
// TODO Auto-generated method stub
return null;
}
private long getUploadTemplateSize(String jobId){
UploadJob uj = jobs.get(jobId);
private long getUploadTemplateSize(String jobId){
UploadJob uj = jobs.get(jobId);
if (uj != null) {
return uj.getTemplatesize();
}
return 0;
}
}
@Override
public boolean configure(String name, Map<String, Object> params)
throws ConfigurationException {
@Override
public boolean configure(String name, Map<String, Object> params)
throws ConfigurationException {
_name = name;
String value = null;
@ -457,21 +456,25 @@ public class UploadManagerImpl implements UploadManager {
Class<StorageLayer> clazz;
try {
clazz = (Class<StorageLayer>) Class.forName(value);
_storage = clazz.newInstance();
} catch (ClassNotFoundException e) {
throw new ConfigurationException("Unable to instantiate " + value);
} catch (InstantiationException e) {
throw new ConfigurationException("Unable to instantiate " + value);
} catch (IllegalAccessException e) {
throw new ConfigurationException("Unable to instantiate " + value);
}
_storage = ComponentLocator.inject(clazz);
}
String useSsl = (String)params.get("sslcopy");
if (useSsl != null) {
_sslCopy = Boolean.parseBoolean(useSsl);
_sslCopy = Boolean.parseBoolean(useSsl);
}
String inSystemVM = (String)params.get("secondary.storage.vm");
if (inSystemVM != null && "true".equalsIgnoreCase(inSystemVM)) {
s_logger.info("UploadManager: starting additional services since we are inside system vm");
startAdditionalServices();
//blockOutgoingOnPrivate();
s_logger.info("UploadManager: starting additional services since we are inside system vm");
startAdditionalServices();
//blockOutgoingOnPrivate();
}
value = (String) params.get("install.timeout.pergig");
@ -489,53 +492,53 @@ public class UploadManagerImpl implements UploadManager {
threadPool = Executors.newFixedThreadPool(numInstallThreads);
return true;
}
private void startAdditionalServices() {
Script command = new Script("rm", s_logger);
command.add("-rf");
command.add(extractMountPoint);
String result = command.execute();
if (result != null) {
s_logger.warn("Error in creating file " +extractMountPoint+ " ,error: " + result );
return;
}
command = new Script("touch", s_logger);
command.add(extractMountPoint);
result = command.execute();
if (result != null) {
s_logger.warn("Error in creating file " +extractMountPoint+ " ,error: " + result );
return;
}
command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("ln -sf " + parentDir + " " +extractMountPoint);
result = command.execute();
if (result != null) {
s_logger.warn("Error in linking err=" + result );
return;
}
}
}
@Override
public String getName() {
return _name;
}
private void startAdditionalServices() {
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
Script command = new Script("rm", s_logger);
command.add("-rf");
command.add(extractMountPoint);
String result = command.execute();
if (result != null) {
s_logger.warn("Error in creating file " +extractMountPoint+ " ,error: " + result );
return;
}
command = new Script("touch", s_logger);
command.add(extractMountPoint);
result = command.execute();
if (result != null) {
s_logger.warn("Error in creating file " +extractMountPoint+ " ,error: " + result );
return;
}
command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("ln -sf " + parentDir + " " +extractMountPoint);
result = command.execute();
if (result != null) {
s_logger.warn("Error in linking err=" + result );
return;
}
}
@Override
public String getName() {
return _name;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
/**
* Get notified of change of job status. Executed in context of uploader thread
@ -582,7 +585,7 @@ public class UploadManagerImpl implements UploadManager {
tu.setStatus(Status.UNRECOVERABLE_ERROR);
tu.setUploadError("Failed post upload script: " + result);
} else {
s_logger.warn("Upload completed successfully at " + new SimpleDateFormat().format(new Date()));
s_logger.warn("Upload completed successfully at " + new SimpleDateFormat().format(new Date()));
tu.setStatus(Status.POST_UPLOAD_FINISHED);
tu.setUploadError("Upload completed successfully at " + new SimpleDateFormat().format(new Date()));
}
@ -596,9 +599,9 @@ public class UploadManagerImpl implements UploadManager {
}
}
private String postUpload(String jobId) {
return null;
}
private String postUpload(String jobId) {
return null;
}
private void sleep() {
try {
@ -608,21 +611,21 @@ public class UploadManagerImpl implements UploadManager {
}
}
private boolean checkAndStartApache() {
//Check whether the Apache server is running
Script command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("if [ -d /etc/apache2 ] ; then service apache2 status | grep pid; else service httpd status | grep pid; fi ");
String result = command.execute();
//Apache Server is not running. Try to start it.
if (result != null) {
/*s_logger.warn("Apache server not running, trying to start it");
private boolean checkAndStartApache() {
//Check whether the Apache server is running
Script command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("if [ -d /etc/apache2 ] ; then service apache2 status | grep pid; else service httpd status | grep pid; fi ");
String result = command.execute();
//Apache Server is not running. Try to start it.
if (result != null) {
/*s_logger.warn("Apache server not running, trying to start it");
String port = Integer.toString(TemplateConstants.DEFAULT_TMPLT_COPY_PORT);
String intf = TemplateConstants.DEFAULT_TMPLT_COPY_INTF;
command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("iptables -D INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j DROP;" +
@ -636,23 +639,23 @@ public class UploadManagerImpl implements UploadManager {
"iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j DROP;" +
"iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j HTTP;" +
"iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j HTTP;");
result = command.execute();
if (result != null) {
s_logger.warn("Error in opening up httpd port err=" + result );
return false;
}*/
command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("if [ -d /etc/apache2 ] ; then service apache2 start; else service httpd start; fi ");
result = command.execute();
if (result != null) {
s_logger.warn("Error in starting httpd service err=" + result );
return false;
}
}
return true;
}
command = new Script("/bin/bash", s_logger);
command.add("-c");
command.add("if [ -d /etc/apache2 ] ; then service apache2 start; else service httpd start; fi ");
result = command.execute();
if (result != null) {
s_logger.warn("Error in starting httpd service err=" + result );
return false;
}
}
return true;
}
}

View File

@ -22,26 +22,19 @@
under the License.
-->
<section id="accessing-vms">
<title>Accessing VMs</title>
<para>Any user can access their own virtual machines. The administrator can access all VMs running in the cloud.</para>
<para>To access a VM through the &PRODUCT; UI:</para>
<orderedlist>
<listitem><para>Log in to the &PRODUCT; UI as a user or admin.</para></listitem>
<listitem><para>Click Instances, then click the name of a running VM.</para></listitem>
<listitem><para>Click the View Console <inlinemediaobject>
<imageobject>
<imagedata fileref="./images/console-icon.png"/>
</imageobject>
<textobject>
<phrase>consoleicon.png: button to view the console.</phrase>
</textobject>
</inlinemediaobject></para></listitem>
</orderedlist>
<para>To access a VM directly over the network:</para>
<orderedlist>
<listitem><para>The VM must have some port open to incoming traffic. For example, in a basic zone, a new VM might be assigned to a security group which allows incoming traffic. This depends on what security group you picked when creating the VM. In other cases, you can open a port by setting up a port forwarding policy. See IP Forwarding and Firewalling.</para></listitem>
<listitem><para>If a port is open but you can not access the VM using ssh, its possible that ssh is not already enabled on the VM. This will depend on whether ssh is enabled in the template you picked when creating the VM. Access the VM through the &PRODUCT; UI and enable ssh on the machine using the commands for the VMs operating system.</para></listitem>
<listitem><para>If the network has an external firewall device, you will need to create a firewall rule to allow access. See IP Forwarding and Firewalling.</para></listitem>
</orderedlist>
<title>Accessing VMs</title>
<para>Any user can access their own virtual machines. The administrator can access all VMs running in the cloud.</para>
<para>To access a VM through the &PRODUCT; UI:</para>
<orderedlist>
<listitem><para>Log in to the &PRODUCT; UI as a user or admin.</para></listitem>
<listitem><para>Click Instances, then click the name of a running VM.</para></listitem>
<listitem><para>Click the View Console button <inlinegraphic format="PNG" fileref="images/view-console-button.png"/>.</para></listitem>
</orderedlist>
<para>To access a VM directly over the network:</para>
<orderedlist>
<listitem><para>The VM must have some port open to incoming traffic. For example, in a basic zone, a new VM might be assigned to a security group which allows incoming traffic. This depends on what security group you picked when creating the VM. In other cases, you can open a port by setting up a port forwarding policy. See IP Forwarding and Firewalling.</para></listitem>
<listitem><para>If a port is open but you can not access the VM using ssh, its possible that ssh is not already enabled on the VM. This will depend on whether ssh is enabled in the template you picked when creating the VM. Access the VM through the &PRODUCT; UI and enable ssh on the machine using the commands for the VMs operating system.</para></listitem>
<listitem><para>If the network has an external firewall device, you will need to create a firewall rule to allow access. See IP Forwarding and Firewalling.</para></listitem>
</orderedlist>
</section>

284
docs/en-US/autoscale.xml Normal file
View File

@ -0,0 +1,284 @@
<?xml version='1.0' encoding='utf-8' ?>
<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
%BOOK_ENTITIES;
]>
<!-- Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<section id="autoscale">
<title>Configuring AutoScale</title>
<para>AutoScaling allows you to scale your back-end services or application VMs up or down
seamlessly and automatically according to the conditions you define. With AutoScaling enabled,
you can ensure that the number of VMs you are using seamlessly scale up when demand increases,
and automatically decreases when demand subsides. Thus it helps you save compute costs by
terminating underused VMs automatically and launching new VMs when you need them, without the
need for manual intervention.</para>
<para>NetScaler AutoScaling is designed to seamlessly launch or terminate VMs based on
user-defined conditions. Conditions for triggering a scaleup or scaledown action can vary from a
simple use case like monitoring the CPU usage of a server to a complex use case of monitoring a
combination of server's responsiveness and its CPU usage. For example, you can configure
AutoScaling to launch an additional VM whenever CPU usage exceeds 80 percent for 15 minutes, or
to remove a VM whenever CPU usage is less than 20 percent for 30 minutes.</para>
<para>&PRODUCT; uses the NetScaler load balancer to monitor all aspects of a system's health and
work in unison with &PRODUCT; to initiate scale-up or scale-down actions. The supported
NetScaler version is 10.0.</para>
<formalpara>
<title>Prerequisites</title>
<para>Before you configure an AutoScale rule, consider the following:</para>
</formalpara>
<itemizedlist>
<listitem>
<para>Ensure that the necessary template is prepared before configuring AutoScale. When a VM
is deployed by using a template and when it comes up, the application should be up and
running.</para>
<note>
<para>If the application is not running, the NetScaler device considers the VM as
ineffective and continues provisioning the VMs unconditionally until the resource limit is
exhausted.</para>
</note>
</listitem>
<listitem>
<para>Deploy the templates you prepared. Ensure that the applications come up on the first
boot and is ready to take the traffic. Observe the time requires to deploy the template.
Consider this time when you specify the quiet time while configuring AutoScale.</para>
</listitem>
<listitem>
<para>The AutoScale feature supports the SNMP counters that can be used to define conditions
for taking scale up or scale down actions. To monitor the SNMP-based counter, ensure that
the SNMP agent is installed in the template used for creating the AutoScale VMs, and the
SNMP operations work with the configured SNMP community and port by using standard SNMP
managers. For example, see <xref linkend="configure-snmp-rhel"/> to configure SNMP on a RHEL
machine.</para>
</listitem>
<listitem>
<para>Ensure that the endpointe.url parameter present in the Global Settings is set to the
Management Server API URL. For example, http://10.102.102.22:8080/client/api. In a
multi-node Management Server deployment, use the virtual IP address configured in the load
balancer for the management servers cluster. Additionally, ensure that the NetScaler device
has access to this IP address to provide AutoScale support.</para>
<para>If you update the endpointe.url, disable the AutoScale functionality of the load
balancer rules in the system, then enable them back to reflect the changes. For more
information see <xref linkend="update-autoscale"/></para>
</listitem>
<listitem>
<para>If the API Key and Secret Key are regenerated for an AutoScale user, ensure that the
AutoScale functionality of the load balancers that the user participates in are disabled and
then enabled to reflect the configuration changes in the NetScaler.</para>
</listitem>
<listitem>
<para>In an advanced Zone, ensure that at least one VM should be present before configuring a
load balancer rule with AutoScale. Having one VM in the network ensures that the network is
in implemented state for configuring AutoScale.</para>
</listitem>
</itemizedlist>
<formalpara>
<title>Configuration</title>
<para>Specify the following:</para>
</formalpara>
<mediaobject>
<imageobject>
<imagedata fileref="./images/autoscale-config.png"/>
</imageobject>
<textobject>
<phrase>autoscaleateconfig.png: Configuring AutoScale</phrase>
</textobject>
</mediaobject>
<itemizedlist>
<listitem>
<para><emphasis role="bold">Template</emphasis>: A template consists of a base OS image and
application. A template is used to provision the new instance of an application on a scaleup
action. When a VM is deployed from a template, the VM can start taking the traffic from the
load balancer without any admin intervention. For example, if the VM is deployed for a Web
service, it should have the Web server running, the database connected, and so on.</para>
</listitem>
<listitem>
<para><emphasis role="bold">Compute offering</emphasis>: A predefined set of virtual hardware
attributes, including CPU speed, number of CPUs, and RAM size, that the user can select when
creating a new virtual machine instance. Choose one of the compute offerings to be used
while provisioning a VM instance as part of scaleup action.</para>
</listitem>
<listitem>
<para><emphasis role="bold">Min Instance</emphasis>: The minimum number of active VM instances
that is assigned to a load balancing rule. The active VM instances are the application
instances that are up and serving the traffic, and are being load balanced. This parameter
ensures that a load balancing rule has at least the configured number of active VM instances
are available to serve the traffic.</para>
<note>
<para>If an application, such as SAP, running on a VM instance is down for some reason, the
VM is then not counted as part of Min Instance parameter, and the AutoScale feature
initiates a scaleup action if the number of active VM instances is below the configured
value. Similarly, when an application instance comes up from its earlier down state, this
application instance is counted as part of the active instance count and the AutoScale
process initiates a scaledown action when the active instance count breaches the Max
instance value.</para>
</note>
</listitem>
<listitem>
<para><emphasis role="bold">Max Instance</emphasis>: Maximum number of active VM instances
that <emphasis role="bold">should be assigned to </emphasis>a load balancing rule. This
parameter defines the upper limit of active VM instances that can be assigned to a load
balancing rule.</para>
<para>Specifying a large value for the maximum instance parameter might result in provisioning
large number of VM instances, which in turn leads to a single load balancing rule exhausting
the VM instances limit specified at the account or domain level.</para>
<note>
<para>If an application, such as SAP, running on a VM instance is down for some reason, the
VM is not counted as part of Max Instance parameter. So there may be scenarios where the
number of VMs provisioned for a scaleup action might be more than the configured Max
Instance value. Once the application instances in the VMs are up from an earlier down
state, the AutoScale feature starts aligning to the configured Max Instance value.</para>
</note>
</listitem>
</itemizedlist>
<para>Specify the following scale-up and scale-down policies:</para>
<itemizedlist>
<listitem>
<para><emphasis role="bold">Duration</emphasis>: The duration, in seconds, for which the
conditions you specify must be true to trigger a scaleup action. The conditions defined
should hold true for the entire duration you specify for an AutoScale action to be invoked.
</para>
</listitem>
<listitem>
<para><emphasis role="bold">Counter</emphasis>: The performance counters expose the state of
the monitored instances. By default, &PRODUCT; offers four performance counters: Three SNMP
counters and one NetScaler counter. The SNMP counters are Linux User CPU, Linux System CPU,
and Linux CPU Idle. The NetScaler counter is ResponseTime. The root administrator can add
additional counters into &PRODUCT; by using the &PRODUCT; API. </para>
</listitem>
<listitem>
<para><emphasis role="bold">Operator</emphasis>: The following five relational operators are
supported in AutoScale feature: Greater than, Less than, Less than or equal to, Greater than
or equal to, and Equal to.</para>
</listitem>
<listitem>
<para><emphasis role="bold">Threshold</emphasis>: Threshold value to be used for the counter.
Once the counter defined above breaches the threshold value, the AutoScale feature initiates
a scaleup or scaledown action.</para>
</listitem>
<listitem>
<para><emphasis role="bold">Add</emphasis>: Click Add to add the condition.</para>
</listitem>
</itemizedlist>
<para>Additionally, if you want to configure the advanced settings, click Show advanced settings,
and specify the following:</para>
<itemizedlist>
<listitem>
<para><emphasis role="bold">Polling interval</emphasis>: Frequency in which the conditions,
combination of counter, operator and threshold, are to be evaluated before taking a scale up
or down action. The default polling interval is 30 seconds.</para>
</listitem>
<listitem>
<para><emphasis role="bold">Quiet Time</emphasis>: This is the cool down period after an
AutoScale action is initiated. The time includes the time taken to complete provisioning a
VM instance from its template and the time taken by an application to be ready to serve
traffic. This quiet time allows the fleet to come up to a stable state before any action can
take place. The default is 300 seconds.</para>
</listitem>
<listitem>
<para><emphasis role="bold">Destroy VM Grace Period</emphasis>: The duration in seconds, after
a scaledown action is initiated, to wait before the VM is destroyed as part of scaledown
action. This is to ensure graceful close of any pending sessions or transactions being
served by the VM marked for destroy. The default is 120 seconds.</para>
</listitem>
<listitem>
<para><emphasis role="bold">Security Groups</emphasis>: Security groups provide a way to
isolate traffic to the VM instances. A security group is a group of VMs that filter their
incoming and outgoing traffic according to a set of rules, called ingress and egress rules.
These rules filter network traffic according to the IP address that is attempting to
communicate with the VM.</para>
</listitem>
<listitem>
<para><emphasis role="bold">Disk Offerings</emphasis>: A predefined set of disk size for
primary data storage. </para>
</listitem>
<listitem>
<para><emphasis role="bold">SNMP Community</emphasis>: The SNMP community string to be used by
the NetScaler device to query the configured counter value from the provisioned VM
instances. Default is public.</para>
</listitem>
<listitem>
<para><emphasis role="bold">SNMP Port</emphasis>: The port number on which the SNMP agent that
run on the provisioned VMs is listening. Default port is 161. </para>
</listitem>
<listitem>
<para><emphasis role="bold">User</emphasis>: This is the user that the NetScaler device use to
invoke scaleup and scaledown API calls to the cloud. If no option is specified, the user who
configures AutoScaling is applied. Specify another user name to override.</para>
</listitem>
<listitem>
<para><emphasis role="bold">Apply</emphasis>: Click Apply to create the AutoScale
configuration.</para>
</listitem>
</itemizedlist>
<formalpara>
<title>Disabling and Enabling an AutoScale Configuration</title>
<para>If you want to perform any maintenance operation on the AutoScale VM instances, disable
the AutoScale configuration. When the AutoScale configuration is disabled, no scaleup or
scaledown action is performed. You can use this downtime for the maintenance activities. To
disable the AutoScale configuration, click the Disable AutoScale<inlinemediaobject>
<imageobject>
<imagedata fileref="./images/enable-disable-autoscale.png"/>
</imageobject>
<textobject>
<phrase>EnableDisable.png: button to enable or disable AutoScale.</phrase>
</textobject>
</inlinemediaobject>button.</para>
</formalpara>
<para>The button toggles between enable and disable, depending on whether AutoScale is currently
enabled or not. After the maintenance operations are done, you can enable the AutoScale
configuration back. To enable, open the AutoScale configuration page again, then click the
Enable AutoScale<inlinemediaobject>
<imageobject>
<imagedata fileref="./images/enable-disable-autoscale.png"/>
</imageobject>
<textobject>
<phrase>EnableDisable.png: button to enable or disable AutoScale.</phrase>
</textobject>
</inlinemediaobject>button.</para>
<formalpara id="update-autoscale">
<title>Updating an AutoScale Configuration</title>
<para>You can update the various parameters and add or delete the conditions in a scaleup or
scaledown rule. Before you update an AutoScale configuration, ensure that you disable the
AutoScale load balancer rule by clicking the Disable AutoScale button.</para>
</formalpara>
<para>After you modify the required AutoScale parameters, click Apply. To apply the new AutoScale
policies, open the AutoScale configuration page again, then click the Enable AutoScale
button.</para>
<formalpara>
<title>Runtime Considerations</title>
<para/>
</formalpara>
<itemizedlist>
<listitem>
<para>An administrator should not assign a VM to a load balancing rule which is configured for
AutoScale.</para>
</listitem>
<listitem>
<para>Before a VM provisioning is completed if NetScaler is shutdown or restarted, the
provisioned VM cannot be a part of the load balancing rule though the intent was to assign
it to a load balancing rule. To workaround, rename the AutoScale provisioned VMs based on
the rule name or ID so at any point of time the VMs can be reconciled to its load balancing
rule.</para>
</listitem>
<listitem>
<para>Making API calls outside the context of AutoScale, such as destroyVM, on an autoscaled
VM leaves the load balancing configuration in an inconsistent state. Though VM is destroyed
from the load balancer rule, NetScaler continues to show the VM as a service assigned to a
rule.</para>
</listitem>
</itemizedlist>
</section>

View File

@ -0,0 +1,145 @@
<?xml version='1.0' encoding='utf-8' ?>
<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
%BOOK_ENTITIES;
]>
<!-- Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<section id="aws-api-examples">
<title>Examples</title>
<para>There are many tools available to interface with a AWS compatible API. In this section we provide
a few examples that users of &PRODUCT; can build upon.</para>
<section id="aws-api-boto-examples">
<title>Boto Examples</title>
<para>Boto is one of them. It is a Python package available at https://github.com/boto/boto.
In this section we provide two examples of Python scripts that use Boto and have been tested with the
&PRODUCT; AWS API Interface.</para>
<para>First is an EC2 example. Replace the Access and Secret Keys with your own and
update the endpoint.</para>
<para>
<example>
<title>An EC2 Boto example</title>
<programlisting>#!/usr/bin/env python
import sys
import os
import boto
import boto.ec2
region = boto.ec2.regioninfo.RegionInfo(name="ROOT",endpoint="localhost")
apikey='GwNnpUPrO6KgIdZu01z_ZhhZnKjtSdRwuYd4DvpzvFpyxGMvrzno2q05MB0ViBoFYtdqKd'
secretkey='t4eXLEYWw7chBhDlaKf38adCMSHx_wlds6JfSx3z9fSpSOm0AbP9Moj0oGIzy2LSC8iw'
def main():
'''Establish connection to EC2 cloud'''
conn =boto.connect_ec2(aws_access_key_id=apikey,
aws_secret_access_key=secretkey,
is_secure=False,
region=region,
port=7080,
path="/awsapi",
api_version="2010-11-15")
'''Get list of images that I own'''
images = conn.get_all_images()
print images
myimage = images[0]
'''Pick an instance type'''
vm_type='m1.small'
reservation = myimage.run(instance_type=vm_type,security_groups=['default'])
if __name__ == '__main__':
main()
</programlisting>
</example>
</para>
<para>Second is an S3 example. Replace the Access and Secret keys with your own,
as well as the endpoint of the service. Be sure to also update the file paths to something
that exists on your machine.</para>
<para>
<example>
<title>An S3 Boto Example</title>
<programlisting>#!/usr/bin/env python
import sys
import os
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
apikey='ChOw-pwdcCFy6fpeyv6kUaR0NnhzmG3tE7HLN2z3OB_s-ogF5HjZtN4rnzKnq2UjtnHeg_yLA5gOw'
secretkey='IMY8R7CJQiSGFk4cHwfXXN3DUFXz07cCiU80eM3MCmfLs7kusgyOfm0g9qzXRXhoAPCH-IRxXc3w'
cf=OrdinaryCallingFormat()
def main():
'''Establish connection to S3 service'''
conn =S3Connection(aws_access_key_id=apikey,aws_secret_access_key=secretkey, \
is_secure=False, \
host='localhost', \
port=7080, \
calling_format=cf, \
path="/awsapi/rest/AmazonS3")
try:
bucket=conn.create_bucket('cloudstack')
k = Key(bucket)
k.key = 'test'
try:
k.set_contents_from_filename('/Users/runseb/Desktop/s3cs.py')
except:
print 'could not write file'
pass
except:
bucket = conn.get_bucket('cloudstack')
k = Key(bucket)
k.key = 'test'
try:
k.get_contents_to_filename('/Users/runseb/Desktop/foobar')
except:
print 'Could not get file'
pass
try:
bucket1=conn.create_bucket('teststring')
k=Key(bucket1)
k.key('foobar')
k.set_contents_from_string('This is my silly test')
except:
bucket1=conn.get_bucket('teststring')
k = Key(bucket1)
k.key='foobar'
k.get_contents_as_string()
if __name__ == '__main__':
main()
</programlisting>
</example>
</para>
</section>
<section id="aws-api-jclouds-examples">
<title>JClouds Examples</title>
<para></para>
</section>
</section>

View File

@ -23,26 +23,88 @@
-->
<section id="aws-ec2-configuration">
<title>Enabling the AWS API Compatible Interface</title>
<para>
The software that provides AWS API compatibility is installed along with &PRODUCT;. However, you must enable the feature and perform some setup steps.
</para>
<orderedlist>
<listitem><para>Set the global configuration parameter enable.ec2.api to true. See <xref linkend="global-config" />.</para></listitem>
<listitem><para>Create a set of &PRODUCT; service offerings with names that match the Amazon service offerings.
You can do this through the &PRODUCT; UI as described in the Administration Guide.</para>
<warning><para>Be sure you have included the Amazon default service offering, m1.small.</para></warning></listitem>
<listitem><para>If you did not already do so when you set the configuration parameter in step 1, restart the Management Server.</para>
<programlisting># service cloud-management restart</programlisting></listitem>
<listitem><para>(Optional) The AWS API listens for requests on port 7080. If you prefer AWS API to listen on another port, you can change it as follows:</para>
<orderedlist numeration="loweralpha">
<listitem><para>Edit the files /etc/cloud/management/server.xml, /etc/cloud/management/server-nonssl.xml, and /etc/cloud/management/server-ssl.xml.</para></listitem>
<listitem><para>In each file, find the tag &lt;Service name="Catalina7080"&gt;. Under this tag, locate &lt;Connector executor="tomcatThreadPool-internal" port= ....&lt;.</para></listitem>
<listitem><para>Change the port to whatever port you want to use, then save the files.</para></listitem>
<listitem><para>Restart the Management Server.</para>
<note><para>If you re-install CloudStack, you will have to make these changes again.</para></note>
<title>Enabling the EC2 and S3 Compatible Interface</title>
<para>The software that provides AWS API compatibility is installed along with &PRODUCT;. You must enable the services and perform some setup steps prior to using it.
</para>
<orderedlist>
<listitem><para>Set the global configuration parameters for each service to true.
See <xref linkend="global-config" />.</para></listitem>
<listitem><para>Create a set of &PRODUCT; service offerings with names that match the Amazon service offerings.
You can do this through the &PRODUCT; UI as described in the Administration Guide.</para>
<warning><para>Be sure you have included the Amazon default service offering, m1.small. As well as any EC2 instance types that you will use.</para></warning>
</listitem>
</orderedlist>
</listitem>
</orderedlist>
<listitem><para>If you did not already do so when you set the configuration parameter in step 1,
restart the Management Server.</para>
<programlisting># service cloud-management restart</programlisting>
</listitem>
</orderedlist>
<para>The following sections provides details to perform these steps</para>
<section id="aws-api-settings">
<title>Enabling the Services</title>
<para>To enable the EC2 and S3 compatible services you need to set the configuration variables <emphasis>enable.ec2.api</emphasis>
and <emphasis>enable.s3.api</emphasis> to true. You do not have to enable both at the same time. Enable the ones you need.
This can be done via the &PRODUCT; GUI by going in <emphasis>Global Settings</emphasis> or via the API.</para>
<para>The snapshot below shows you how to use the GUI to enable these services</para>
<para>
<mediaobject>
<imageobject>
<imagedata fileref="./images/ec2-s3-configuration.png"/>
</imageobject>
<textobject>
<phrase>Use the GUI to set the configuration variable to <emphasis>true</emphasis></phrase>
</textobject>
</mediaobject>
</para>
<para>Using the &PRODUCT; API, the easiest is to use the so-called integration port on which you can make
unauthenticated calls. In Global Settings set the port to 8096 and subsequently call the <emphasis>updateConfiguration</emphasis> method.
The following urls shows you how:</para>
<para>
<programlisting>
http://localhost:8096/client/api?command=updateConfiguration&amp;name=enable.ec2.api&amp;value=true
http://localhost:8096/client/api?command=updateConfiguration&amp;name=enable.ec2.api&amp;value=true
</programlisting>
</para>
<para>Once you have enabled the services, restart the server.</para>
</section>
<section id="aws-ec2-service-offerings">
<title>Creating EC2 Compatible Service Offerings</title>
<para>You will also need to define compute service offerings with names compatible with the <ulink url="http://aws.amazon.com/ec2/instance-types/">
Amazon EC2 instance types</ulink> API names (e.g m1.small,m1.large). This can be done via the &PRODUCT; GUI.
Go under <emphasis>Service Offerings</emphasis> select <emphasis>Compute offering</emphasis> and either create
a new compute offering or modify an existing one, ensuring that the name matches an EC2 instance type API name. The snapshot below shows you how:</para>
<para>
<mediaobject>
<imageobject>
<imagedata fileref="./images/compute-service-offerings.png"/>
</imageobject>
<textobject>
<phrase>Use the GUI to set the name of a compute service offering to an EC2 instance
type API name.</phrase>
</textobject>
</mediaobject>
</para>
</section>
<section id="aws-api-port-change">
<title>Modifying the AWS API Port</title>
<note>
<para>(Optional) The AWS API listens for requests on port 7080. If you prefer AWS API to listen on another port, you can change it as follows:</para>
<orderedlist numeration="loweralpha">
<listitem><para>Edit the files /etc/cloud/management/server.xml, /etc/cloud/management/server-nonssl.xml,
and /etc/cloud/management/server-ssl.xml.</para></listitem>
<listitem><para>In each file, find the tag &lt;Service name="Catalina7080"&gt;. Under this tag,
locate &lt;Connector executor="tomcatThreadPool-internal" port= ....&lt;.</para></listitem>
<listitem><para>Change the port to whatever port you want to use, then save the files.</para></listitem>
<listitem><para>Restart the Management Server.</para></listitem>
</orderedlist>
<para>If you re-install &PRODUCT;, you will have to re-enable the services and if need be update the port.</para>
</note>
</section>
</section>

View File

@ -23,16 +23,19 @@
-->
<section id="aws-ec2-introduction">
<title>Amazon Web Services EC2 Compatible Interface</title>
<title>Amazon Web Services Compatible Interface</title>
<para>&PRODUCT; can translate Amazon Web Services (AWS) API calls to native &PRODUCT; API calls
so that users can continue using existing AWS-compatible tools. This translation service runs as
a separate web application in the same tomcat server as the management server of &PRODUCT;,
listening on the same port. This Amazon EC2-compatible API is accessible through a SOAP web
service.</para>
listening on a different port. The Amazon Web Services (AWS) compatible interface provides the
EC2 SOAP and Query APIs as well as the S3 REST API.</para>
<note>
<para>This service was previously enabled by separate software called CloudBridge. It is now
fully integrated with the &PRODUCT; management server. </para>
</note>
<warning>
<para>The compatible interface for the EC2 Query API and the S3 API are Work In Progress. The S3 compatible API offers a way to store data on the management server file system, it is not an implementation of the S3 backend.</para>
</warning>
<para>Limitations</para>
<itemizedlist>
<listitem>
@ -42,7 +45,9 @@
<para>Available in fresh installations of &PRODUCT;. Not available through upgrade of previous versions.</para>
</listitem>
<listitem>
<para>If you need to support features such as elastic IP, set up a Citrix NetScaler to provide this service. The commands such as ec2-associate-address will not work without EIP setup. Users running VMs in this zone will be using the NetScaler-enabled network offering (DefaultSharedNetscalerEIP and ELBNetworkOffering).</para>
<para>Features such as Elastic IP (EIP) and Elastic Load Balacing (ELB) are only available in an infrastructure
with a Citrix NetScaler device. Users accessing a Zone with a NetScaler device will need to use a
NetScaler-enabled network offering (DefaultSharedNetscalerEIP and ELBNetworkOffering).</para>
</listitem>
</itemizedlist>
</section>

View File

@ -23,13 +23,14 @@
-->
<section id="aws-ec2-requirements">
<title>System Requirements</title>
<title>Supported API Version</title>
<itemizedlist>
<listitem><para>This interface complies with Amazon's WDSL version dated November 15, 2010, available at
<listitem><para>The EC2 interface complies with Amazon's WDSL version dated November 15, 2010, available at
<ulink url="http://ec2.amazonaws.com/doc/2010-11-15/">http://ec2.amazonaws.com/doc/2010-11-15/</ulink>.</para></listitem>
<listitem><para>Compatible with the EC2 command-line
<listitem><para>The interface is compatible with the EC2 command-line
tools <emphasis>EC2 tools v. 1.3.6230</emphasis>, which can be downloaded at <ulink
url="http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip">http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip</ulink>.</para>
</listitem>
</itemizedlist>
</section>
<note><para>Work is underway to support a more recent version of the EC2 API</para></note>
</section>

View File

@ -24,7 +24,7 @@
<section id="aws-ec2-supported-commands">
<title>Supported AWS API Calls</title>
<para>The following Amazon EC2 commands are supported by &PRODUCT; when the AWS API compatibility feature is enabled.
<para>The following Amazon EC2 commands are supported by &PRODUCT; when the AWS API compatible interface is enabled.
For a few commands, there are differences between the &PRODUCT; and Amazon EC2 versions, and these differences are noted. The underlying SOAP call for each command is also given, for those who have built tools using those calls.
</para>
<table frame='all'>

View File

@ -24,7 +24,7 @@
<section id="aws-ec2-timeouts">
<title>Using Timeouts to Ensure AWS API Command Completion</title>
<para>The Amazon EC2 command-line tools have a default connection timeout. When used with &PRODUCT;, a longer timeout might be needed for some commands. If you find that commands are not completing due to timeouts, you can gain more time for commands to finish by overriding the default timeouts on individual commands. You can add the following optional command-line parameters to any &PRODUCT;-supported EC2 command:</para>
<para>The Amazon EC2 command-line tools have a default connection timeout. When used with &PRODUCT;, a longer timeout might be needed for some commands. If you find that commands are not completing due to timeouts, you can specify a custom timeouts. You can add the following optional command-line parameters to any &PRODUCT;-supported EC2 command:</para>
<informaltable frame="all">
<tgroup cols="2" align="left" colsep="1" rowsep="1">
<colspec colname="c1" />
@ -47,4 +47,5 @@
</informaltable>
<para>Example:</para>
<programlisting>ec2-run-instances 2 z us-test1 n 1-3 --connection-timeout 120 --request-timeout 120</programlisting>
</section>
<note><para>The timeouts optional arguments are not specific to &PRODUCT;.</para></note>
</section>

View File

@ -22,76 +22,84 @@
under the License.
-->
<section id="aws-ec2-user-setup">
<title>AWS API User Setup Steps</title>
<title>AWS API User Setup</title>
<para>In general, users need not be aware that they are using a translation service provided by &PRODUCT;.
They need only send AWS API calls to &PRODUCT;'s endpoint, and it will translate the calls to the native API.
Users of the Amazon EC2 compatible interface will be able to keep their existing EC2 tools
They only need to send AWS API calls to &PRODUCT;'s endpoint, and it will translate the calls to the native &PRODUCT; API. Users of the Amazon EC2 compatible interface will be able to keep their existing EC2 tools
and scripts and use them with their &PRODUCT; deployment, by specifying the endpoint of the
management server and using the proper user credentials. In order to do this, each user must
perform the following configuration steps: </para>
<para>
<itemizedlist>
<listitem>
<para>Generate user credentials and register with the service.</para>
<para>Generate user credentials.</para>
</listitem>
<listitem>
<para>Set up the environment variables for the EC2 command-line tools.</para>
<para>Register with the service.</para>
</listitem>
<listitem>
<para>For SOAP access, use the endpoint http://<replaceable>&PRODUCT;-management-server</replaceable>:7080/awsapi.
The <replaceable>&PRODUCT;-management-server</replaceable> can be specified by a fully-qualified domain name or IP address.</para>
<para>For convenience, set up environment variables for the EC2 SOAP command-line tools.</para>
</listitem>
</itemizedlist>
</para>
<section id="aws-ec2-user-registration">
<title>AWS API User Registration</title>
<para>Each user must perform a one-time registration. The user follows these steps:</para>
<orderedlist>
<listitem>
<para>Obtain the following by looking in the &PRODUCT; UI, using the API, or asking the cloud administrator:</para>
<itemizedlist>
<listitem><para>The &PRODUCT; server's publicly available DNS name or IP address</para></listitem>
<listitem><para>The user account's API key and Secret key</para></listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
Generate a private key and a self-signed X.509 certificate. The user substitutes their own desired storage location for /path/to/… below.
</para>
<para><programlisting>$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /path/to/private_key.pem -out /path/to/cert.pem</programlisting>
</para>
</listitem>
<listitem>
<para>
Register the mapping from the X.509 certificate to the API/Secret keys.
Download the following script from http://download.cloud.com/releases/3.0.3/cloudstack-aws-api-register and run it.
Substitute the values that were obtained in step 1 in the URL below.
</para>
<para>
<programlisting>$ cloudstack-aws-api-register --apikey=<replaceable>Users &PRODUCT; API key</replaceable> --secretkey=<replaceable>Users &PRODUCT; Secret key</replaceable> --cert=<replaceable>/path/to/cert.pem</replaceable> --url=http://<replaceable>&PRODUCT;.server</replaceable>:7080/awsapi</programlisting>
</para>
</listitem>
</orderedlist>
<para>Each user must perform a one-time registration. The user follows these steps:</para>
<orderedlist>
<listitem>
<para>Obtain the following by looking in the &PRODUCT; UI, using the API, or asking the cloud administrator:
</para>
<itemizedlist>
<listitem><para>The &PRODUCT; server's publicly available DNS name or IP address</para></listitem>
<listitem><para>The user account's Access key and Secret key</para></listitem>
</itemizedlist>
</listitem>
<listitem>
<para>Generate a private key and a self-signed X.509 certificate. The user substitutes their own desired storage location for /path/to/… below.
</para>
<para>
<programlisting>$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /path/to/private_key.pem -out /path/to/cert.pem</programlisting>
</para>
</listitem>
<listitem>
<para>Register the user X.509 certificate and Access/Secret keys with the AWS compatible service.
If you have the source code of &PRODUCT; go to the awsapi-setup/setup directory and use the Python script
cloudstack-aws-api-register. If you do not have the source then download the script using the following command.
</para>
<para>
<programlisting>wget -O cloudstack-aws-api-register <ulink url="https://git-wip-us.apache.org/repos/asf?p=incubator-cloudstack.git;a=blob_plain;f=awsapi-setup/setup/cloudstack-aws-api-register;hb=HEAD">"https://git-wip-us.apache.org/repos/asf?p=incubator-cloudstack.git;a=blob_plain;f=awsapi-setup/setup/cloudstack-aws-api-register;hb=HEAD"</ulink>
</programlisting>
</para>
<para> Then execute it, using the parameter values that were obtained in step 1. An example is shown below.</para>
<para>
<programlisting>$ cloudstack-aws-api-register --apikey=<replaceable>Users &PRODUCT; API key</replaceable> --secretkey=<replaceable>Users &PRODUCT; Secret key</replaceable> --cert=<replaceable>/path/to/cert.pem</replaceable> --url=http://<replaceable>&PRODUCT;.server</replaceable>:7080/awsapi</programlisting>
</para>
</listitem>
</orderedlist>
<note>
<para>
A user with an existing AWS certificate could choose to use the same certificate with &PRODUCT;, but the public key would be uploaded to the &PRODUCT; management server database.
A user with an existing AWS certificate could choose to use the same certificate with &PRODUCT;, but note that the certificate would be uploaded to the &PRODUCT; management server database.
</para>
</note>
</section>
<section id="aws-api-tools-setup">
<title>AWS API Command-Line Tools Setup</title>
<para>To use the EC2 command-line tools, the user must perform these steps:</para>
<orderedlist>
<listitem><para>Be sure you have the right version of EC2 Tools.
The supported version is available at <ulink url="http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip">http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip</ulink>.</para>
</listitem>
<listitem>
<para>Set up the environment variables that will direct the tools to the server. As a best practice, you may wish to place these commands in a script that may be sourced before using the AWS API translation feature.</para>
<programlisting>$ export EC2_CERT=/path/to/cert.pem
$ export EC2_PRIVATE_KEY=/path/to/private_key.pem
$ export EC2_URL=http://<replaceable>&PRODUCT;.server</replaceable>:7080/awsapi
$ export EC2_HOME=/path/to/EC2_tools_directory</programlisting>
</listitem>
</orderedlist>
<title>AWS API Command-Line Tools Setup</title>
<para>To use the EC2 command-line tools, the user must perform these steps:</para>
<orderedlist>
<listitem>
<para>Be sure you have the right version of EC2 Tools.
The supported version is available at <ulink url="http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip">http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip</ulink>.
</para>
</listitem>
<listitem>
<para>Set up the EC2 environment variables. This can be done every time you use the service or you can set them up in the proper shell profile. Replace the endpoint (i.e EC2_URL) with the proper address of your &PRODUCT; management server and port. In a bash shell do the following.
</para>
<programlisting>
$ export EC2_CERT=/path/to/cert.pem
$ export EC2_PRIVATE_KEY=/path/to/private_key.pem
$ export EC2_URL=http://localhost:7080/awsapi
$ export EC2_HOME=/path/to/EC2_tools_directory
</programlisting>
</listitem>
</orderedlist>
</section>
</section>
</section>

View File

@ -23,11 +23,12 @@
-->
<chapter id="aws-interface-compatibility">
<title>Amazon Web Service Interface Compatibility</title>
<title>Amazon Web Services Compatible Interface</title>
<xi:include href="aws-ec2-introduction.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="aws-ec2-requirements.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="aws-ec2-configuration.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="aws-ec2-user-setup.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="aws-ec2-timeouts.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="aws-ec2-supported-commands.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="aws-api-examples.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
</chapter>

View File

@ -0,0 +1,46 @@
<?xml version='1.0' encoding='utf-8' ?>
<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
%BOOK_ENTITIES;
]>
<!-- Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<section id="building-marvin">
<title>Building and Installing Marvin</title>
<para>Marvin is built with Maven and is dependent on APIdoc. To build it do the following in the root tree of &PRODUCT;:</para>
<programlisting>mvn -P developer -l :cloud-apidoc</programlisting>
<programlisting>mvn -P developer -l :cloud-marvin</programlisting>
<para>If successfull the build will have created the cloudstackAPI Python package under tools/marvin/marvin/cloudstackAPI as well as a gziped Marvin package under tools/marvin dist. To install the Python Marvin module do the following in tools/marvin:</para>
<programlisting>sudo python ./setup.py install</programlisting>
<para>The dependencies will be downloaded the Python module installed and you should be able to use Marvin in Python. Check that you can import the module before starting to use it.</para>
<programlisting>$ python
Python 2.7.3 (default, Nov 17 2012, 19:54:34)
[GCC 4.2.1 Compatible Apple Clang 4.1 ((tags/Apple/clang-421.11.66))] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import marvin
>>> from marvin.cloudstackAPI import *
>>> </programlisting>
<para>You could also install it using <emphasis>pip</emphasis> or <emphasis>easy_install</emphasis> using the local distribution package in tools/marvin/dist :</para>
<programlisting>pip install tools/marvin/dist/Marvin-0.1.0.tar.gz</programlisting>
<para>Or:</para>
<programlisting>easy_install tools/marvin/dist/Marvin-0.1.0.tar.gz</programlisting>
</section>

View File

@ -0,0 +1,86 @@
<?xml version='1.0' encoding='utf-8' ?>
<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
%BOOK_ENTITIES;
]>
<!-- Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<section id="configure-snmp-rhel">
<title>Configuring SNMP Community String on a RHEL Server</title>
<para>The SNMP Community string is similar to a user id or password that provides access to a
network device, such as router. This string is sent along with all SNMP requests. If the
community string is correct, the device responds with the requested information. If the
community string is incorrect, the device discards the request and does not respond.</para>
<para>The NetScaler device uses SNMP to communicate with the VMs. You must install SNMP and
configure SNMP Community string for a secure communication between the NetScaler device and the
RHEL machine.</para>
<orderedlist>
<listitem>
<para>Ensure that you installed SNMP on RedHat. If not, run the following command:</para>
<screen>yum install net-snmp-utils</screen>
</listitem>
<listitem>
<para>Edit the /etc/snmp/snmpd.conf file to allow the SNMP polling from the NetScaler
device.</para>
<orderedlist>
<listitem>
<para>Map the community name into a security name (local and mynetwork, depending on where
the request is coming from):</para>
<note>
<para>Use a strong password instead of public when you edit the following table.</para>
</note>
<screen># sec.name source community
com2sec local localhost public
com2sec mynetwork 0.0.0.0 public</screen>
<note>
<para>Setting to 0.0.0.0 allows all IPs to poll the NetScaler server.</para>
</note>
</listitem>
<listitem>
<para>Map the security names into group names: </para>
<screen># group.name sec.model sec.name
group MyRWGroup v1 local
group MyRWGroup v2c local
group MyROGroup v1 mynetwork
group MyROGroup v2c mynetwork</screen>
</listitem>
<listitem>
<para>Create a view to allow the groups to have the permission to:</para>
<screen>incl/excl subtree mask view all included .1 </screen>
</listitem>
<listitem>
<para>Grant access with different write permissions to the two groups to the view you
created.</para>
<screen># context sec.model sec.level prefix read write notif
access MyROGroup "" any noauth exact all none none
access MyRWGroup "" any noauth exact all all all </screen>
</listitem>
</orderedlist>
</listitem>
<listitem>
<para>Unblock SNMP in iptables.</para>
<screen>iptables -A INPUT -p udp --dport 161 -j ACCEPT</screen>
</listitem>
<listitem>
<para>Start the SNMP service:</para>
<screen>service snmpd start</screen>
</listitem>
<listitem>
<para>Ensure that the SNMP service is started automatically during the system startup:</para>
<screen>chkconfig snmpd on</screen>
</listitem>
</orderedlist>
</section>

View File

@ -3,30 +3,31 @@
<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
%BOOK_ENTITIES;
]>
<!-- Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<section id="external-firewalls-and-load-balancers">
<title>External Firewalls and Load Balancers</title>
<para>&PRODUCT; is capable of replacing its Virtual Router with an external Juniper SRX device and an optional external NetScaler or F5 load balancer for gateway and load balancing services. In this case, the VMs use the SRX as their gateway.</para>
<xi:include href="using-netscaler-load-balancers.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="configure-snmp-rhel.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="initial-setup-of-external-firewalls-loadbalancers.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="ongoing-configuration-of-external-firewalls-loadbalancer.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="autoscale.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<title>External Firewalls and Load Balancers</title>
<para>&PRODUCT; is capable of replacing its Virtual Router with an external Juniper SRX device and
an optional external NetScaler or F5 load balancer for gateway and load balancing services. In
this case, the VMs use the SRX as their gateway.</para>
<xi:include href="using-netscaler-load-balancers.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="configure-snmp-rhel.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="initial-setup-of-external-firewalls-loadbalancers.xml"
xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="ongoing-configuration-of-external-firewalls-loadbalancer.xml"
xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="autoscale.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
</section>

Some files were not shown because too many files have changed in this diff Show More