mirror of https://github.com/apache/cloudstack.git
Merge branch '2.2.y' of ssh://git.cloud.com/var/lib/git/cloudstack-oss into 2.2.y
This commit is contained in:
commit
f733d38b7a
|
|
@ -263,6 +263,12 @@ public class MockVmManagerImpl implements MockVmManager {
|
|||
|
||||
@Override
|
||||
public NetworkUsageAnswer getNetworkUsage(NetworkUsageCommand cmd) {
|
||||
try {
|
||||
Thread.sleep(500);
|
||||
s_logger.debug("get network usage for router: " + cmd.getDomRName());
|
||||
} catch (InterruptedException e) {
|
||||
|
||||
}
|
||||
return new NetworkUsageAnswer(cmd, null, 100L, 100L);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ import com.cloud.agent.api.Command;
|
|||
import com.cloud.agent.api.CronCommand;
|
||||
import com.cloud.agent.api.ModifySshKeysCommand;
|
||||
import com.cloud.agent.api.PingCommand;
|
||||
import com.cloud.agent.api.ReadyCommand;
|
||||
import com.cloud.agent.api.ShutdownCommand;
|
||||
import com.cloud.agent.api.StartupAnswer;
|
||||
import com.cloud.agent.api.StartupCommand;
|
||||
|
|
@ -116,6 +117,8 @@ public class Agent implements HandlerFactory, IAgentControl {
|
|||
AtomicInteger _inProgress = new AtomicInteger();
|
||||
|
||||
StartupTask _startup = null;
|
||||
long _startupWaitDefault = 180000;
|
||||
long _startupWait = _startupWaitDefault;
|
||||
boolean _reconnectAllowed = true;
|
||||
//For time sentitive task, e.g. PingTask
|
||||
private ThreadPoolExecutor _ugentTaskPool;
|
||||
|
|
@ -281,7 +284,7 @@ public class Agent implements HandlerFactory, IAgentControl {
|
|||
s_logger.debug("Adding a watch list");
|
||||
}
|
||||
final WatchTask task = new WatchTask(link, request, this);
|
||||
_timer.schedule(task, delay, period);
|
||||
_timer.schedule(task, 0, period);
|
||||
_watchList.add(task);
|
||||
}
|
||||
}
|
||||
|
|
@ -314,7 +317,7 @@ public class Agent implements HandlerFactory, IAgentControl {
|
|||
}
|
||||
synchronized (this) {
|
||||
_startup = new StartupTask(link);
|
||||
_timer.schedule(_startup, 180000);
|
||||
_timer.schedule(_startup, _startupWait);
|
||||
}
|
||||
try {
|
||||
link.send(request.toBytes());
|
||||
|
|
@ -476,9 +479,28 @@ public class Agent implements HandlerFactory, IAgentControl {
|
|||
cancelTasks();
|
||||
_reconnectAllowed = false;
|
||||
answer = new Answer(cmd, true, null);
|
||||
} else if (cmd instanceof ReadyCommand) {
|
||||
ReadyCommand ready = (ReadyCommand)cmd;
|
||||
s_logger.debug("Received shutdownCommand, due to: " + ready.getDetails());
|
||||
if (ready.getDetails() != null) {
|
||||
cancelTasks();
|
||||
_reconnectAllowed = false;
|
||||
answer = new Answer(cmd, true, null);
|
||||
} else {
|
||||
_inProgress.incrementAndGet();
|
||||
try {
|
||||
answer = _resource.executeRequest(cmd);
|
||||
} finally {
|
||||
_inProgress.decrementAndGet();
|
||||
}
|
||||
if (answer == null) {
|
||||
s_logger.debug("Response: unsupported command" + cmd.toString());
|
||||
answer = Answer.createUnsupportedCommandAnswer(cmd);
|
||||
}
|
||||
}
|
||||
} else if (cmd instanceof AgentControlCommand) {
|
||||
answer = null;
|
||||
synchronized (_controlListeners) {
|
||||
answer = null;
|
||||
synchronized (_controlListeners) {
|
||||
for (IAgentControlListener listener : _controlListeners) {
|
||||
answer = listener.processControlRequest(request, (AgentControlCommand) cmd);
|
||||
if (answer != null) {
|
||||
|
|
@ -773,6 +795,7 @@ public class Agent implements HandlerFactory, IAgentControl {
|
|||
// TimerTask.cancel may fail depends on the calling context
|
||||
if (!cancelled) {
|
||||
cancelled = true;
|
||||
_startupWait = _startupWaitDefault;
|
||||
s_logger.debug("Startup task cancelled");
|
||||
return super.cancel();
|
||||
}
|
||||
|
|
@ -787,6 +810,7 @@ public class Agent implements HandlerFactory, IAgentControl {
|
|||
}
|
||||
cancelled = true;
|
||||
_startup = null;
|
||||
_startupWait = _startupWaitDefault *2;
|
||||
reconnect(_link);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
|
|
@ -351,7 +352,7 @@ public class AgentShell implements IAgentShell {
|
|||
if (!developer) {
|
||||
throw new ConfigurationException("Unable to find the guid");
|
||||
}
|
||||
_guid = MacAddress.getMacAddress().toString(":");
|
||||
_guid = UUID.randomUUID().toString();
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
@ -529,8 +530,12 @@ public class AgentShell implements IAgentShell {
|
|||
init(args);
|
||||
|
||||
String instance = getProperty(null, "instance");
|
||||
if (instance == null) {
|
||||
instance = "";
|
||||
if (instance == null) {
|
||||
if (Boolean.parseBoolean(getProperty(null, "developer"))) {
|
||||
instance = UUID.randomUUID().toString();
|
||||
} else {
|
||||
instance = "";
|
||||
}
|
||||
} else {
|
||||
instance += ".";
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,11 @@
|
|||
*/
|
||||
package com.cloud.agent.resource;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import javax.ejb.Local;
|
||||
|
||||
|
|
@ -26,9 +30,17 @@ import com.cloud.agent.api.Answer;
|
|||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.PingCommand;
|
||||
import com.cloud.agent.api.StartupCommand;
|
||||
import com.cloud.agent.api.StartupRoutingCommand;
|
||||
import com.cloud.agent.api.StartupStorageCommand;
|
||||
import com.cloud.agent.api.StoragePoolInfo;
|
||||
import com.cloud.agent.api.StartupRoutingCommand.VmState;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.Host.Type;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.network.Networks.RouterPrivateIpStrategy;
|
||||
import com.cloud.resource.ServerResource;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
|
||||
@Local(value={ServerResource.class})
|
||||
public class DummyResource implements ServerResource {
|
||||
|
|
@ -36,6 +48,7 @@ public class DummyResource implements ServerResource {
|
|||
Host.Type _type;
|
||||
boolean _negative;
|
||||
IAgentControl _agentControl;
|
||||
private Map<String, Object> _params;
|
||||
|
||||
@Override
|
||||
public void disconnected() {
|
||||
|
|
@ -58,10 +71,98 @@ public class DummyResource implements ServerResource {
|
|||
public Type getType() {
|
||||
return _type;
|
||||
}
|
||||
|
||||
protected String getConfiguredProperty(String key, String defaultValue) {
|
||||
String val = (String)_params.get(key);
|
||||
return val==null?defaultValue:val;
|
||||
}
|
||||
|
||||
protected Long getConfiguredProperty(String key, Long defaultValue) {
|
||||
String val = (String)_params.get(key);
|
||||
|
||||
if (val != null) {
|
||||
Long result = Long.parseLong(val);
|
||||
return result;
|
||||
}
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
protected List<Object> getHostInfo() {
|
||||
final ArrayList<Object> info = new ArrayList<Object>();
|
||||
long speed = getConfiguredProperty("cpuspeed", 4000L) ;
|
||||
long cpus = getConfiguredProperty("cpus", 4L);
|
||||
long ram = getConfiguredProperty("memory", 16000L*1024L*1024L);
|
||||
long dom0ram = Math.min(ram/10, 768*1024*1024L);
|
||||
|
||||
|
||||
String cap = getConfiguredProperty("capabilities", "hvm");
|
||||
info.add((int)cpus);
|
||||
info.add(speed);
|
||||
info.add(ram);
|
||||
info.add(cap);
|
||||
info.add(dom0ram);
|
||||
return info;
|
||||
|
||||
}
|
||||
|
||||
protected void fillNetworkInformation(final StartupCommand cmd) {
|
||||
|
||||
cmd.setPrivateIpAddress((String)getConfiguredProperty("private.ip.address", "127.0.0.1"));
|
||||
cmd.setPrivateMacAddress((String)getConfiguredProperty("private.mac.address", "8A:D2:54:3F:7C:C3"));
|
||||
cmd.setPrivateNetmask((String)getConfiguredProperty("private.ip.netmask", "255.255.255.0"));
|
||||
|
||||
cmd.setStorageIpAddress((String)getConfiguredProperty("private.ip.address", "127.0.0.1"));
|
||||
cmd.setStorageMacAddress((String)getConfiguredProperty("private.mac.address", "8A:D2:54:3F:7C:C3"));
|
||||
cmd.setStorageNetmask((String)getConfiguredProperty("private.ip.netmask", "255.255.255.0"));
|
||||
cmd.setGatewayIpAddress((String)getConfiguredProperty("gateway.ip.address", "127.0.0.1"));
|
||||
|
||||
}
|
||||
|
||||
private Map<String, String> getVersionStrings() {
|
||||
Map<String, String> result = new HashMap<String, String>();
|
||||
String hostOs = (String) _params.get("Host.OS");
|
||||
String hostOsVer = (String) _params.get("Host.OS.Version");
|
||||
String hostOsKernVer = (String) _params.get("Host.OS.Kernel.Version");
|
||||
result.put("Host.OS", hostOs==null?"Fedora":hostOs);
|
||||
result.put("Host.OS.Version", hostOsVer==null?"14":hostOsVer);
|
||||
result.put("Host.OS.Kernel.Version", hostOsKernVer==null?"2.6.35.6-45.fc14.x86_64":hostOsKernVer);
|
||||
return result;
|
||||
}
|
||||
|
||||
protected StoragePoolInfo initializeLocalStorage() {
|
||||
String hostIp = (String)getConfiguredProperty("private.ip.address", "127.0.0.1");
|
||||
String localStoragePath = (String)getConfiguredProperty("local.storage.path", "/mnt");
|
||||
String lh = hostIp + localStoragePath;
|
||||
String uuid = UUID.nameUUIDFromBytes(lh.getBytes()).toString();
|
||||
|
||||
String capacity = (String)getConfiguredProperty("local.storage.capacity", "1000000000");
|
||||
String available = (String)getConfiguredProperty("local.storage.avail", "10000000");
|
||||
|
||||
return new StoragePoolInfo(uuid, hostIp, localStoragePath,
|
||||
localStoragePath, StoragePoolType.Filesystem,
|
||||
Long.parseLong(capacity), Long.parseLong(available));
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public StartupCommand[] initialize() {
|
||||
return new StartupCommand[] {new StartupCommand(Host.Type.Storage)};
|
||||
Map<String, VmState> changes = null;
|
||||
|
||||
|
||||
final List<Object> info = getHostInfo();
|
||||
|
||||
final StartupRoutingCommand cmd = new StartupRoutingCommand((Integer)info.get(0), (Long)info.get(1), (Long)info.get(2), (Long)info.get(4), (String)info.get(3), HypervisorType.KVM, RouterPrivateIpStrategy.HostLocal, changes);
|
||||
fillNetworkInformation(cmd);
|
||||
cmd.getHostDetails().putAll(getVersionStrings());
|
||||
cmd.setCluster(getConfiguredProperty("cluster", "1"));
|
||||
StoragePoolInfo pi = initializeLocalStorage();
|
||||
StartupStorageCommand sscmd = new StartupStorageCommand();
|
||||
sscmd.setPoolInfo(pi);
|
||||
sscmd.setGuid(pi.getUuid());
|
||||
sscmd.setDataCenter((String)_params.get("zone"));
|
||||
sscmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL);
|
||||
|
||||
return new StartupCommand[]{cmd, sscmd};
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -73,9 +174,13 @@ public class DummyResource implements ServerResource {
|
|||
|
||||
value = (String)params.get("negative.reply");
|
||||
_negative = Boolean.parseBoolean(value);
|
||||
|
||||
setParams(params);
|
||||
return true;
|
||||
}
|
||||
|
||||
public void setParams(Map<String, Object> _params) {
|
||||
this._params = _params;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (C) 2010 Cloud.com, Inc. All rights reserved.
|
||||
/* Copyright (C) 2012 Citrix.com, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the GNU General Public License v3 or later.
|
||||
*
|
||||
|
|
@ -27,21 +27,27 @@ public class ClusterSyncAnswer extends Answer {
|
|||
private HashMap<String, Pair<String, State>> _newStates;
|
||||
private HashMap<String, Pair<String, State>> _allStates;
|
||||
private int _type = -1; // 0 for full, 1 for delta
|
||||
private boolean _isExecuted=false;
|
||||
|
||||
public static final int FULL_SYNC=0;
|
||||
public static final int DELTA_SYNC=1;
|
||||
|
||||
// this is here because a cron command answer is being sent twice
|
||||
// AgentAttache.processAnswers
|
||||
// AgentManagerImpl.notifyAnswersToMonitors
|
||||
public boolean isExceuted(){
|
||||
return _isExecuted;
|
||||
}
|
||||
|
||||
public ClusterSyncAnswer(long clusterId) {
|
||||
_clusterId = clusterId;
|
||||
result = false;
|
||||
this.details = "Ignore sync as this is not a pool master";
|
||||
_type = -1;
|
||||
public void setExecuted(){
|
||||
_isExecuted = true;
|
||||
}
|
||||
|
||||
|
||||
public ClusterSyncAnswer(long clusterId, HashMap<String, Pair<String, State>> newStates){
|
||||
_clusterId = clusterId;
|
||||
_newStates = newStates;
|
||||
_allStates = null;
|
||||
_type = DELTA_SYNC;
|
||||
result = true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (C) 2010 Cloud.com, Inc. All rights reserved.
|
||||
/* Copyright (C) 2012 Citrix.com, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the GNU General Public License v3 or later.
|
||||
*
|
||||
|
|
@ -46,11 +46,11 @@ public class ClusterSyncCommand extends Command implements CronCommand {
|
|||
|
||||
public void incrStep(){
|
||||
_steps++;
|
||||
if (_steps>_skipSteps)_steps=0;
|
||||
if (_steps>=_skipSteps)_steps=0;
|
||||
}
|
||||
|
||||
public boolean isRightStep(){
|
||||
return (_steps==_skipSteps);
|
||||
public boolean isRightStep(){
|
||||
return (_steps==0);
|
||||
}
|
||||
|
||||
public long getClusterId() {
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@
|
|||
package com.cloud.agent.api;
|
||||
|
||||
public class ReadyCommand extends Command {
|
||||
|
||||
private String details;
|
||||
public ReadyCommand() {
|
||||
super();
|
||||
}
|
||||
|
|
@ -38,5 +38,13 @@ public class ReadyCommand extends Command {
|
|||
public boolean executeInSequence() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void setDetails(String details) {
|
||||
this.details = details;
|
||||
}
|
||||
|
||||
public String getDetails() {
|
||||
return this.details;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import java.util.Map;
|
|||
import com.cloud.host.Host;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.network.Networks.RouterPrivateIpStrategy;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
|
||||
public class StartupRoutingCommand extends StartupCommand {
|
||||
|
|
@ -48,6 +49,7 @@ public class StartupRoutingCommand extends StartupCommand {
|
|||
long dom0MinMemory;
|
||||
boolean poolSync;
|
||||
Map<String, VmState> vms;
|
||||
HashMap<String, Pair<String, State>> _allStates;
|
||||
String caps;
|
||||
String pool;
|
||||
HypervisorType hypervisorType;
|
||||
|
|
@ -121,6 +123,10 @@ getHostDetails().put(RouterPrivateIpStrategy.class.getCanonicalName(), privIpStr
|
|||
this.vms.put(vm_name, new VmState(vms.get(vm_name), null));
|
||||
}
|
||||
}
|
||||
|
||||
public void setClusterVMStateChanges(HashMap<String, Pair<String, State>> allStates) {
|
||||
_allStates = allStates;
|
||||
}
|
||||
|
||||
public int getCpus() {
|
||||
return cpus;
|
||||
|
|
@ -145,6 +151,10 @@ getHostDetails().put(RouterPrivateIpStrategy.class.getCanonicalName(), privIpStr
|
|||
public Map<String, VmState> getVmStates() {
|
||||
return vms;
|
||||
}
|
||||
|
||||
public HashMap<String, Pair<String, State>> getClusterVMStateChanges() {
|
||||
return _allStates;
|
||||
}
|
||||
|
||||
public void setSpeed(long speed) {
|
||||
this.speed = speed;
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ public enum Status {
|
|||
Maintenance(false, false, false),
|
||||
Alert(true, true, true),
|
||||
Removed(true, false, true),
|
||||
Rebalancing(false, false, false);
|
||||
Rebalancing(true, false, true);
|
||||
|
||||
private final boolean updateManagementServer;
|
||||
private final boolean checkManagementServer;
|
||||
|
|
@ -194,6 +194,7 @@ public enum Status {
|
|||
s_fsm.addTransition(Status.Alert, Event.AgentDisconnected, Status.Alert);
|
||||
s_fsm.addTransition(Status.Rebalancing, Event.RebalanceFailed, Status.Disconnected);
|
||||
s_fsm.addTransition(Status.Rebalancing, Event.RebalanceCompleted, Status.Connecting);
|
||||
s_fsm.addTransition(Status.Rebalancing, Event.ManagementServerDown, Status.Disconnected);
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
|
|
|||
|
|
@ -97,7 +97,8 @@ public class Storage {
|
|||
SharedMountPoint(true),
|
||||
VMFS(true), // VMware VMFS storage
|
||||
PreSetup(true), // for XenServer, Storage Pool is set up by customers.
|
||||
OCFS2(true);
|
||||
OCFS2(true),
|
||||
EXT(false); // XenServer local EXT SR
|
||||
|
||||
|
||||
boolean shared;
|
||||
|
|
|
|||
|
|
@ -103,8 +103,8 @@
|
|||
<jvmarg value="-Dcom.sun.management.jmxremote.port=${jmxport}"/>
|
||||
<jvmarg value="-Dcom.sun.management.jmxremote.authenticate=false"/>
|
||||
<jvmarg value="-Dcom.sun.management.jmxremote.ssl=false"/>
|
||||
<jvmarg value="-Xms256m"/>
|
||||
<jvmarg value="-Xmx384m"/>
|
||||
<jvmarg value="-Xms${JAVAHEAPMEMMIN}"/>
|
||||
<jvmarg value="-Xmx${JAVAHEAPMEMMAX}"/>
|
||||
<jvmarg value="-ea"/>
|
||||
<arg value="start"/>
|
||||
</java>
|
||||
|
|
@ -119,8 +119,8 @@
|
|||
<jvmarg value="-Xdebug"/>
|
||||
<jvmarg value="${debug.jvmarg}"/>
|
||||
<jvmarg value="-ea"/>
|
||||
<jvmarg value="-Xms256m"/>
|
||||
<jvmarg value="-Xmx384m"/>
|
||||
<jvmarg value="-Xms${JAVAHEAPMEMMIN}"/>
|
||||
<jvmarg value="-Xmx${JAVAHEAPMEMMAX}"/>
|
||||
</java>
|
||||
</target>
|
||||
|
||||
|
|
|
|||
|
|
@ -7,3 +7,5 @@ AGENTLOGDIR=logs
|
|||
AGENTLOG=logs/agent.log
|
||||
MSMNTDIR=/mnt
|
||||
COMPONENTS-SPEC=components-premium.xml
|
||||
JAVAHEAPMEMMIN=256M
|
||||
JAVAHEAPMEMMAX=384M
|
||||
|
|
|
|||
|
|
@ -466,6 +466,12 @@ fi
|
|||
%files deps
|
||||
%defattr(0644,root,root,0755)
|
||||
%{_javadir}/%{name}-commons-codec-1.4.jar
|
||||
%{_javadir}/%{name}-commons-dbcp-1.4.jar
|
||||
%{_javadir}/%{name}-commons-pool-1.5.6.jar
|
||||
%{_javadir}/%{name}-commons-httpclient-3.1.jar
|
||||
%{_javadir}/%{name}-google-gson-1.7.1.jar
|
||||
%{_javadir}/%{name}-netscaler.jar
|
||||
%{_javadir}/%{name}-netscaler-sdx.jar
|
||||
%{_javadir}/%{name}-log4j-extras.jar
|
||||
%{_javadir}/%{name}-backport-util-concurrent-3.0.jar
|
||||
%{_javadir}/%{name}-ehcache.jar
|
||||
|
|
|
|||
|
|
@ -102,79 +102,82 @@ function JsX11KeyboardMapper() {
|
|||
KeyboardMapper.apply(this, arguments);
|
||||
|
||||
this.jsX11KeysymMap = [];
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_CAPSLOCK] = 0xffe5;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_BACKSPACE] = 0xff08;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_TAB] = 0xff09;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_ENTER] = 0xff0d;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_ESCAPE] = 0xff1b;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_INSERT] = 0xff63;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_DELETE] = 0xffff;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_HOME] = 0xff50;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_END] = 0xff57;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_PAGEUP] = 0xff55;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_PAGEDOWN] = 0xff56;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_LEFT] = 0xff51;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_UP] = 0xff52;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_RIGHT] = 0xff53;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_DOWN] = 0xff54;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F1] = 0xffbe;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F2] = 0xffbf;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F3] = 0xffc0;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F4] = 0xffc1;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F5] = 0xffc2;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F6] = 0xffc3;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F7] = 0xffc4;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F8] = 0xffc5;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F9] = 0xffc6;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F10] = 0xffc7;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F11] = 0xffc8;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F12] = 0xffc9;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_SHIFT] = 0xffe1;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_CTRL] = 0xffe3;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_ALT] = 0xffe9;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_GRAVE_ACCENT] = 0x60;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_SUBSTRACT] = 0x2d;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_ADD] = 0x2b;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_OPEN_BRACKET] = 0x5b;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_CLOSE_BRACKET] = 0x5d;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_BACK_SLASH] = 0x7c;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_SINGLE_QUOTE] = 0x22;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_COMMA] = 0x3c;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_PERIOD] = 0x3e;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_FORWARD_SLASH] = 0x3f;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_DASH] = 0x2d;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_SEMI_COLON] = 0x3b;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_CAPSLOCK] = AjaxViewer.X11_KEY_CAPSLOCK;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_BACKSPACE] = AjaxViewer.X11_KEY_BACKSPACE;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_TAB] = AjaxViewer.X11_KEY_TAB;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_ENTER] = AjaxViewer.X11_KEY_ENTER;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_ESCAPE] = AjaxViewer.X11_KEY_ESCAPE;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_INSERT] = AjaxViewer.X11_KEY_INSERT;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_DELETE] = AjaxViewer.X11_KEY_DELETE;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_HOME] = AjaxViewer.X11_KEY_HOME;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_END] = AjaxViewer.X11_KEY_END;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_PAGEUP] = AjaxViewer.X11_KEY_PAGEUP;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_PAGEDOWN] = AjaxViewer.X11_KEY_PAGEDOWN;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_LEFT] = AjaxViewer.X11_KEY_LEFT;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_UP] = AjaxViewer.X11_KEY_UP;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_RIGHT] = AjaxViewer.X11_KEY_RIGHT;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_DOWN] = AjaxViewer.X11_KEY_DOWN;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F1] = AjaxViewer.X11_KEY_F1;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F2] = AjaxViewer.X11_KEY_F2;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F3] = AjaxViewer.X11_KEY_F3;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F4] = AjaxViewer.X11_KEY_F4;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F5] = AjaxViewer.X11_KEY_F5;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F6] = AjaxViewer.X11_KEY_F6;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F7] = AjaxViewer.X11_KEY_F7;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F8] = AjaxViewer.X11_KEY_F8;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F9] = AjaxViewer.X11_KEY_F9;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F10] = AjaxViewer.X11_KEY_F10;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F11] = AjaxViewer.X11_KEY_F11;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_F12] = AjaxViewer.X11_KEY_F12;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_SHIFT] = AjaxViewer.X11_KEY_SHIFT;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_CTRL] = AjaxViewer.X11_KEY_CTRL;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_ALT] = AjaxViewer.X11_KEY_ALT;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_GRAVE_ACCENT] = AjaxViewer.X11_KEY_GRAVE_ACCENT;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_SUBSTRACT] = AjaxViewer.X11_KEY_SUBSTRACT;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_ADD] = AjaxViewer.X11_KEY_ADD;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_OPEN_BRACKET] = AjaxViewer.X11_KEY_OPEN_BRACKET;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_CLOSE_BRACKET] = AjaxViewer.X11_KEY_CLOSE_BRACKET;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_BACK_SLASH] = AjaxViewer.X11_KEY_BACK_SLASH;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_SINGLE_QUOTE] = AjaxViewer.X11_KEY_SINGLE_QUOTE;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_COMMA] = AjaxViewer.X11_KEY_COMMA;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_PERIOD] = AjaxViewer.X11_KEY_PERIOD;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_FORWARD_SLASH] = AjaxViewer.X11_KEY_FORWARD_SLASH;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_DASH] = AjaxViewer.X11_KEY_DASH;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_SEMI_COLON] = AjaxViewer.X11_KEY_SEMI_COLON;
|
||||
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD0] = 0x30;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD1] = 0x31;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD2] = 0x32;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD3] = 0x33;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD4] = 0x34;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD5] = 0x35;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD6] = 0x36;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD7] = 0x37;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD8] = 0x38;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD9] = 0x39;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_DECIMAL_POINT] = 0x2e;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_DIVIDE] = 0x3f;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD0] = AjaxViewer.X11_KEY_NUMPAD0;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD1] = AjaxViewer.X11_KEY_NUMPAD1;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD2] = AjaxViewer.X11_KEY_NUMPAD2;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD3] = AjaxViewer.X11_KEY_NUMPAD3;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD4] = AjaxViewer.X11_KEY_NUMPAD4;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD5] = AjaxViewer.X11_KEY_NUMPAD5;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD6] = AjaxViewer.X11_KEY_NUMPAD6;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD7] = AjaxViewer.X11_KEY_NUMPAD7;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD8] = AjaxViewer.X11_KEY_NUMPAD8;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_NUMPAD9] = AjaxViewer.X11_KEY_NUMPAD9;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_DECIMAL_POINT] = AjaxViewer.X11_KEY_DECIMAL_POINT;
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_DIVIDE] = AjaxViewer.X11_KEY_DIVIDE;
|
||||
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_MULTIPLY] = [
|
||||
{type: AjaxViewer.KEY_DOWN, code: 0xffe1, modifiers: 0 },
|
||||
{type: AjaxViewer.KEY_DOWN, code: 0x2a, modifiers: 0 },
|
||||
{type: AjaxViewer.KEY_UP, code: 0x2a, modifiers: 0 },
|
||||
{type: AjaxViewer.KEY_UP, code: 0xffe1, modifiers: 0 }
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_SHIFT, modifiers: 0 },
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_ASTERISK, modifiers: 0 },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_ASTERISK, modifiers: 0 },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_SHIFT, modifiers: 0 }
|
||||
];
|
||||
|
||||
this.jsX11KeysymMap[AjaxViewer.JS_KEY_ADD] = false;
|
||||
this.jsKeyPressX11KeysymMap = [];
|
||||
this.jsKeyPressX11KeysymMap[61] = [
|
||||
{type: AjaxViewer.KEY_DOWN, code: 0x2b, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_UP, code: 0x2b, modifiers: 0, shift: false }
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_ADD, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_ADD, modifiers: 0, shift: false }
|
||||
];
|
||||
this.jsKeyPressX11KeysymMap[43] = [
|
||||
{type: AjaxViewer.KEY_DOWN, code: 0xffe1, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_DOWN, code: 0x2b, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_UP, code: 0x2b, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_UP, code: 0xffe1, modifiers: 0, shift: false }
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_SHIFT, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_ADD, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_ADD, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_SHIFT, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_ADD, modifiers: 0, shift: true },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_ADD, modifiers: 0, shift: true }
|
||||
];
|
||||
}
|
||||
|
||||
|
|
@ -358,30 +361,93 @@ AjaxViewer.JS_KEY_F11 = 122;
|
|||
AjaxViewer.JS_KEY_F12 = 123;
|
||||
AjaxViewer.JS_KEY_NUMLOCK = 144;
|
||||
AjaxViewer.JS_KEY_SCROLLLOCK = 145;
|
||||
AjaxViewer.JS_KEY_SEMI_COLON = 186;
|
||||
AjaxViewer.JS_KEY_EQUAL_SIGN = 187;
|
||||
AjaxViewer.JS_KEY_COMMA = 188;
|
||||
AjaxViewer.JS_KEY_DASH = 189;
|
||||
AjaxViewer.JS_KEY_PERIOD = 190;
|
||||
AjaxViewer.JS_KEY_FORWARD_SLASH = 191;
|
||||
AjaxViewer.JS_KEY_GRAVE_ACCENT = 192;
|
||||
AjaxViewer.JS_KEY_OPEN_BRACKET = 219;
|
||||
AjaxViewer.JS_KEY_BACK_SLASH = 220;
|
||||
AjaxViewer.JS_KEY_CLOSE_BRACKET = 221;
|
||||
AjaxViewer.JS_KEY_SINGLE_QUOTE = 222;
|
||||
AjaxViewer.JS_KEY_SEMI_COLON = 186; // ;
|
||||
AjaxViewer.JS_KEY_EQUAL_SIGN = 187; // =
|
||||
AjaxViewer.JS_KEY_COMMA = 188; // ,
|
||||
AjaxViewer.JS_KEY_DASH = 189; // -
|
||||
AjaxViewer.JS_KEY_PERIOD = 190; // .
|
||||
AjaxViewer.JS_KEY_FORWARD_SLASH = 191; // /
|
||||
AjaxViewer.JS_KEY_GRAVE_ACCENT = 192; // `
|
||||
AjaxViewer.JS_KEY_OPEN_BRACKET = 219; // [
|
||||
AjaxViewer.JS_KEY_BACK_SLASH = 220; // \
|
||||
AjaxViewer.JS_KEY_CLOSE_BRACKET = 221; // ]
|
||||
AjaxViewer.JS_KEY_SINGLE_QUOTE = 222; // '
|
||||
AjaxViewer.JS_NUMPAD_PLUS = 43;
|
||||
AjaxViewer.JS_KEY_NUM8 = 56;
|
||||
|
||||
// keycode from Japanese keyboard
|
||||
AjaxViewer.JS_KEY_JP_COLON = 222;
|
||||
AjaxViewer.JS_KEY_JP_CLOSE_BRACKET = 220;
|
||||
AjaxViewer.JS_KEY_JP_AT_SIGN = 219;
|
||||
AjaxViewer.JS_KEY_JP_OPEN_BRACKET = 221;
|
||||
AjaxViewer.JS_KEY_JP_BACK_SLASH = 193;
|
||||
AjaxViewer.JS_KEY_JP_COLON = 222; // :* on JP keyboard
|
||||
AjaxViewer.JS_KEY_JP_CLOSE_BRACKET = 220; // [{ on JP keyboard
|
||||
AjaxViewer.JS_KEY_JP_AT_SIGN = 219; // @` on JP keyboard
|
||||
AjaxViewer.JS_KEY_JP_OPEN_BRACKET = 221; // [{ on JP keyboard
|
||||
AjaxViewer.JS_KEY_JP_BACK_SLASH = 193; // \| on JP keyboard
|
||||
AjaxViewer.JS_KEY_JP_YEN_MARK = 255;
|
||||
AjaxViewer.JS_KEY_JP_EQUAL = 109;
|
||||
AjaxViewer.JS_KEY_JP_ACUTE = 107;
|
||||
|
||||
|
||||
AjaxViewer.JS_KEY_JP_EQUAL = 109; // -= ON JP keyboard
|
||||
AjaxViewer.JS_KEY_JP_ACUTE = 107; // ^~ on JP keyboard
|
||||
|
||||
// X11 keysym definitions
|
||||
AjaxViewer.X11_KEY_CAPSLOCK = 0xffe5;
|
||||
AjaxViewer.X11_KEY_BACKSPACE = 0xff08;
|
||||
AjaxViewer.X11_KEY_TAB = 0xff09;
|
||||
AjaxViewer.X11_KEY_ENTER = 0xff0d;
|
||||
AjaxViewer.X11_KEY_ESCAPE = 0xff1b;
|
||||
AjaxViewer.X11_KEY_INSERT = 0xff63;
|
||||
AjaxViewer.X11_KEY_DELETE = 0xffff;
|
||||
AjaxViewer.X11_KEY_HOME = 0xff50;
|
||||
AjaxViewer.X11_KEY_END = 0xff57;
|
||||
AjaxViewer.X11_KEY_PAGEUP = 0xff55;
|
||||
AjaxViewer.X11_KEY_PAGEDOWN = 0xff56;
|
||||
AjaxViewer.X11_KEY_LEFT = 0xff51;
|
||||
AjaxViewer.X11_KEY_UP = 0xff52;
|
||||
AjaxViewer.X11_KEY_RIGHT = 0xff53;
|
||||
AjaxViewer.X11_KEY_DOWN = 0xff54;
|
||||
AjaxViewer.X11_KEY_F1 = 0xffbe;
|
||||
AjaxViewer.X11_KEY_F2 = 0xffbf;
|
||||
AjaxViewer.X11_KEY_F3 = 0xffc0;
|
||||
AjaxViewer.X11_KEY_F4 = 0xffc1;
|
||||
AjaxViewer.X11_KEY_F5 = 0xffc2;
|
||||
AjaxViewer.X11_KEY_F6 = 0xffc3;
|
||||
AjaxViewer.X11_KEY_F7 = 0xffc4;
|
||||
AjaxViewer.X11_KEY_F8 = 0xffc5;
|
||||
AjaxViewer.X11_KEY_F9 = 0xffc6;
|
||||
AjaxViewer.X11_KEY_F10 = 0xffc7;
|
||||
AjaxViewer.X11_KEY_F11 = 0xffc8;
|
||||
AjaxViewer.X11_KEY_F12 = 0xffc9;
|
||||
AjaxViewer.X11_KEY_SHIFT = 0xffe1;
|
||||
AjaxViewer.X11_KEY_CTRL = 0xffe3;
|
||||
AjaxViewer.X11_KEY_ALT = 0xffe9;
|
||||
AjaxViewer.X11_KEY_GRAVE_ACCENT = 0x60;
|
||||
AjaxViewer.X11_KEY_SUBSTRACT = 0x2d;
|
||||
AjaxViewer.X11_KEY_ADD = 0x2b;
|
||||
AjaxViewer.X11_KEY_OPEN_BRACKET = 0x5b;
|
||||
AjaxViewer.X11_KEY_CLOSE_BRACKET = 0x5d;
|
||||
AjaxViewer.X11_KEY_BACK_SLASH = 0x7c;
|
||||
AjaxViewer.X11_KEY_REVERSE_SOLIUS = 0x5c; // another back slash (back slash on JP keyboard)
|
||||
AjaxViewer.X11_KEY_SINGLE_QUOTE = 0x22;
|
||||
AjaxViewer.X11_KEY_COMMA = 0x3c;
|
||||
AjaxViewer.X11_KEY_PERIOD = 0x3e;
|
||||
AjaxViewer.X11_KEY_FORWARD_SLASH = 0x3f;
|
||||
AjaxViewer.X11_KEY_DASH = 0x2d;
|
||||
AjaxViewer.X11_KEY_COLON = 0x3a;
|
||||
AjaxViewer.X11_KEY_SEMI_COLON = 0x3b;
|
||||
AjaxViewer.X11_KEY_NUMPAD0 = 0x30;
|
||||
AjaxViewer.X11_KEY_NUMPAD1 = 0x31;
|
||||
AjaxViewer.X11_KEY_NUMPAD2 = 0x32;
|
||||
AjaxViewer.X11_KEY_NUMPAD3 = 0x33;
|
||||
AjaxViewer.X11_KEY_NUMPAD4 = 0x34;
|
||||
AjaxViewer.X11_KEY_NUMPAD5 = 0x35;
|
||||
AjaxViewer.X11_KEY_NUMPAD6 = 0x36;
|
||||
AjaxViewer.X11_KEY_NUMPAD7 = 0x37;
|
||||
AjaxViewer.X11_KEY_NUMPAD8 = 0x38;
|
||||
AjaxViewer.X11_KEY_NUMPAD9 = 0x39;
|
||||
AjaxViewer.X11_KEY_DECIMAL_POINT = 0x2e;
|
||||
AjaxViewer.X11_KEY_DIVIDE = 0x3f;
|
||||
AjaxViewer.X11_KEY_TILDE = 0x7e; // ~
|
||||
AjaxViewer.X11_KEY_CIRCUMFLEX_ACCENT = 0x5e; // ^
|
||||
AjaxViewer.X11_KEY_YEN_MARK = 0xa5; // Japanese YEN mark
|
||||
AjaxViewer.X11_KEY_ASTERISK = 0x2a;
|
||||
|
||||
AjaxViewer.getEventName = function(type) {
|
||||
switch(type) {
|
||||
case AjaxViewer.MOUSE_MOVE :
|
||||
|
|
@ -500,25 +566,58 @@ AjaxViewer.prototype = {
|
|||
// setup Japanese keyboard translation table
|
||||
var mapper = new JsX11KeyboardMapper();
|
||||
this.keyboardMappers[AjaxViewer.KEYBOARD_TYPE_JAPANESE] = mapper;
|
||||
mapper.jsX11KeysymMap[AjaxViewer.JS_KEY_JP_COLON] = 0x3a;
|
||||
mapper.jsX11KeysymMap[AjaxViewer.JS_KEY_JP_CLOSE_BRACKET] = 0x5d;
|
||||
mapper.jsX11KeysymMap[AjaxViewer.JS_KEY_JP_AT_SIGN] = 0x60;
|
||||
mapper.jsX11KeysymMap[AjaxViewer.JS_KEY_JP_OPEN_BRACKET] = 0x5b;
|
||||
mapper.jsX11KeysymMap[AjaxViewer.JS_KEY_JP_BACK_SLASH] = 0x5c; // X11 REVERSE SOLIDUS
|
||||
mapper.jsX11KeysymMap[AjaxViewer.JS_KEY_JP_YEN_MARK] = 0xa5; // X11 YEN SIGN
|
||||
|
||||
// JP keyboard plugged in a English host OS
|
||||
/*
|
||||
mapper.jsX11KeysymMap[AjaxViewer.JS_KEY_JP_COLON] = AjaxViewer.X11_KEY_COLON;
|
||||
mapper.jsX11KeysymMap[AjaxViewer.JS_KEY_JP_CLOSE_BRACKET] = AjaxViewer.X11_KEY_CLOSE_BRACKET;
|
||||
mapper.jsX11KeysymMap[AjaxViewer.JS_KEY_JP_AT_SIGN] = AjaxViewer.X11_KEY_GRAVE_ACCENT;
|
||||
mapper.jsX11KeysymMap[AjaxViewer.JS_KEY_JP_OPEN_BRACKET] = AjaxViewer.X11_KEY_OPEN_BRACKET;
|
||||
mapper.jsX11KeysymMap[AjaxViewer.JS_KEY_JP_BACK_SLASH] = AjaxViewer.X11_KEY_REVERSE_SOLIUS; // X11 REVERSE SOLIDUS
|
||||
mapper.jsX11KeysymMap[AjaxViewer.JS_KEY_JP_YEN_MARK] = AjaxViewer.X11_KEY_YEN_MARK; // X11 YEN SIGN
|
||||
mapper.jsKeyPressX11KeysymMap[61] = [
|
||||
{type: AjaxViewer.KEY_DOWN, code: 0x5e, modifiers: 0 },
|
||||
{type: AjaxViewer.KEY_UP, code: 0x5e, modifiers: 0 },
|
||||
];
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_CIRCUMFLEX_ACCENT, modifiers: 0 },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_CIRCUMFLEX_ACCENT, modifiers: 0 },
|
||||
];
|
||||
|
||||
mapper.jsKeyPressX11KeysymMap[43] = [
|
||||
{type: AjaxViewer.KEY_DOWN, code: 0xffe1, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_DOWN, code: 0x2b, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_UP, code: 0x2b, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_UP, code: 0xffe1, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_DOWN, code: 0x7e, modifiers: 0, shift: true },
|
||||
{type: AjaxViewer.KEY_UP, code: 0x7e, modifiers: 0, shift: true }
|
||||
];
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_SHIFT, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_ADD, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_ADD, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_SHIFT, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_TILDE, modifiers: 0, shift: true },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_TILDE, modifiers: 0, shift: true }
|
||||
];
|
||||
*/
|
||||
|
||||
// JP keyboard plugged in a Japanese host OS
|
||||
mapper.jsX11KeysymMap[222] = AjaxViewer.X11_KEY_CIRCUMFLEX_ACCENT;
|
||||
mapper.jsX11KeysymMap[220] = AjaxViewer.X11_KEY_YEN_MARK;
|
||||
mapper.jsX11KeysymMap[219] = AjaxViewer.X11_KEY_OPEN_BRACKET;
|
||||
mapper.jsX11KeysymMap[221] = AjaxViewer.X11_KEY_CLOSE_BRACKET;
|
||||
mapper.jsX11KeysymMap[59] = AjaxViewer.X11_KEY_COLON; // Firefox
|
||||
mapper.jsX11KeysymMap[186] = AjaxViewer.X11_KEY_COLON; // Chrome
|
||||
mapper.jsX11KeysymMap[226] = AjaxViewer.X11_KEY_REVERSE_SOLIUS; // \| key left to right SHIFT on JP keyboard
|
||||
mapper.jsX11KeysymMap[240] = [
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_CAPSLOCK, modifiers: 0 },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_CAPSLOCK, modifiers: 0 },
|
||||
];
|
||||
|
||||
// for keycode 107, keypress 59
|
||||
mapper.jsKeyPressX11KeysymMap[59] = [
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_SEMI_COLON, modifiers: 0 },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_SEMI_COLON, modifiers: 0 },
|
||||
];
|
||||
|
||||
// for keycode 107, keypress 43
|
||||
mapper.jsKeyPressX11KeysymMap[43] = [
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_SHIFT, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_ADD, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_ADD, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_SHIFT, modifiers: 0, shift: false },
|
||||
{type: AjaxViewer.KEY_DOWN, code: AjaxViewer.X11_KEY_ADD, modifiers: 0, shift: true },
|
||||
{type: AjaxViewer.KEY_UP, code: AjaxViewer.X11_KEY_ADD, modifiers: 0, shift: true },
|
||||
];
|
||||
},
|
||||
|
||||
getCurrentKeyboardMapper : function() {
|
||||
|
|
|
|||
|
|
@ -17,24 +17,21 @@
|
|||
*/
|
||||
package com.cloud.hypervisor.xen.resource;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
/**
|
||||
* Reduce bloat inside CitrixResourceBase
|
||||
*
|
||||
*
|
||||
*/
|
||||
public class CitrixHelper {
|
||||
private static final Logger s_logger = Logger.getLogger(CitrixHelper.class);
|
||||
private static final HashMap<String, String> _xcp100GuestOsMap = new HashMap<String, String>(70);
|
||||
private static final HashMap<String, String> _xenServerGuestOsMap = new HashMap<String, String>(70);
|
||||
private static final HashMap<String, String> _xenServer56FP1GuestOsMap = new HashMap<String, String>(70);
|
||||
private static final HashMap<String, String> _xenServer56FP2GuestOsMap = new HashMap<String, String>(70);
|
||||
|
||||
private static final Logger s_logger = Logger.getLogger(CitrixHelper.class);
|
||||
private static final HashMap<String, String> _xcp100GuestOsMap = new HashMap<String, String>(70);
|
||||
private static final HashMap<String, String> _xenServerGuestOsMap = new HashMap<String, String>(70);
|
||||
private static final HashMap<String, String> _xenServer56FP1GuestOsMap = new HashMap<String, String>(70);
|
||||
private static final HashMap<String, String> _xenServer56FP2GuestOsMap = new HashMap<String, String>(70);
|
||||
private static final HashMap<String, String> _xenServer600GuestOsMap = new HashMap<String, String>(70);
|
||||
|
||||
static {
|
||||
_xcp100GuestOsMap.put("CentOS 4.5 (32-bit)", "CentOS 4.5 (32-bit)");
|
||||
|
|
@ -112,79 +109,78 @@ public class CitrixHelper {
|
|||
_xcp100GuestOsMap.put("Other PV (32-bit)", "CentOS 5 (32-bit)");
|
||||
_xcp100GuestOsMap.put("Other PV (64-bit)", "CentOS 5 (64-bit)");
|
||||
}
|
||||
|
||||
|
||||
static {
|
||||
_xenServerGuestOsMap.put("CentOS 4.5 (32-bit)", "CentOS 4.5 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 4.6 (32-bit)", "CentOS 4.6 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 4.7 (32-bit)", "CentOS 4.7 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 4.8 (32-bit)", "CentOS 4.8 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.0 (32-bit)", "CentOS 5.0 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.0 (64-bit)", "CentOS 5.0 (64-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.1 (32-bit)", "CentOS 5.1 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.1 (64-bit)", "CentOS 5.1 (64-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.2 (32-bit)", "CentOS 5.2 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.2 (64-bit)", "CentOS 5.2 (64-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.3 (32-bit)", "CentOS 5.3 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.3 (64-bit)", "CentOS 5.3 (64-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.4 (32-bit)", "CentOS 5.4 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.4 (64-bit)", "CentOS 5.4 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Debian GNU/Linux 5.0 (32-bit)", "Debian Lenny 5.0 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.0 (32-bit)", "Oracle Enterprise Linux 5.0 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.0 (64-bit)", "Oracle Enterprise Linux 5.0 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.1 (32-bit)", "Oracle Enterprise Linux 5.1 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.1 (64-bit)", "Oracle Enterprise Linux 5.1 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.2 (32-bit)", "Oracle Enterprise Linux 5.2 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.2 (64-bit)", "Oracle Enterprise Linux 5.2 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.3 (32-bit)", "Oracle Enterprise Linux 5.3 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.3 (64-bit)", "Oracle Enterprise Linux 5.3 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.4 (32-bit)", "Oracle Enterprise Linux 5.4 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.4 (64-bit)", "Oracle Enterprise Linux 5.4 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 4.5 (32-bit)", "Red Hat Enterprise Linux 4.5 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 4.6 (32-bit)", "Red Hat Enterprise Linux 4.6 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 4.7 (32-bit)", "Red Hat Enterprise Linux 4.7 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 4.8 (32-bit)", "Red Hat Enterprise Linux 4.8 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.0 (32-bit)", "Red Hat Enterprise Linux 5.0 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.0 (64-bit)", "Red Hat Enterprise Linux 5.0 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.1 (32-bit)", "Red Hat Enterprise Linux 5.1 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.1 (64-bit)", "Red Hat Enterprise Linux 5.1 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.2 (32-bit)", "Red Hat Enterprise Linux 5.2 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.2 (64-bit)", "Red Hat Enterprise Linux 5.2 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.3 (32-bit)", "Red Hat Enterprise Linux 5.3 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.3 (64-bit)", "Red Hat Enterprise Linux 5.3 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.4 (32-bit)", "Red Hat Enterprise Linux 5.4 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.4 (64-bit)", "Red Hat Enterprise Linux 5.4 (64-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 9 SP4 (32-bit)", "SUSE Linux Enterprise Server 9 SP4 (32-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (32-bit)", "SUSE Linux Enterprise Server 10 SP1 (32-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (64-bit)", "SUSE Linux Enterprise Server 10 SP1 (64-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 10 SP2 (32-bit)", "SUSE Linux Enterprise Server 10 SP2 (32-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 10 SP2 (64-bit)", "SUSE Linux Enterprise Server 10 SP2 (64-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 10 SP3 (64-bit)", "SUSE Linux Enterprise Server 10 SP3 (64-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 11 (32-bit)", "SUSE Linux Enterprise Server 11 (32-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 11 (64-bit)", "SUSE Linux Enterprise Server 11 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows 7 (32-bit)", "Windows 7 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows 7 (64-bit)", "Windows 7 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2003 (32-bit)", "Windows Server 2003 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2003 (64-bit)", "Windows Server 2003 (64-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 4.5 (32-bit)", "CentOS 4.5 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 4.6 (32-bit)", "CentOS 4.6 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 4.7 (32-bit)", "CentOS 4.7 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 4.8 (32-bit)", "CentOS 4.8 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.0 (32-bit)", "CentOS 5.0 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.0 (64-bit)", "CentOS 5.0 (64-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.1 (32-bit)", "CentOS 5.1 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.1 (64-bit)", "CentOS 5.1 (64-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.2 (32-bit)", "CentOS 5.2 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.2 (64-bit)", "CentOS 5.2 (64-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.3 (32-bit)", "CentOS 5.3 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.3 (64-bit)", "CentOS 5.3 (64-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.4 (32-bit)", "CentOS 5.4 (32-bit)");
|
||||
_xenServerGuestOsMap.put("CentOS 5.4 (64-bit)", "CentOS 5.4 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Debian GNU/Linux 5.0 (32-bit)", "Debian Lenny 5.0 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.0 (32-bit)", "Oracle Enterprise Linux 5.0 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.0 (64-bit)", "Oracle Enterprise Linux 5.0 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.1 (32-bit)", "Oracle Enterprise Linux 5.1 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.1 (64-bit)", "Oracle Enterprise Linux 5.1 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.2 (32-bit)", "Oracle Enterprise Linux 5.2 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.2 (64-bit)", "Oracle Enterprise Linux 5.2 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.3 (32-bit)", "Oracle Enterprise Linux 5.3 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.3 (64-bit)", "Oracle Enterprise Linux 5.3 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.4 (32-bit)", "Oracle Enterprise Linux 5.4 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Oracle Enterprise Linux 5.4 (64-bit)", "Oracle Enterprise Linux 5.4 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 4.5 (32-bit)", "Red Hat Enterprise Linux 4.5 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 4.6 (32-bit)", "Red Hat Enterprise Linux 4.6 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 4.7 (32-bit)", "Red Hat Enterprise Linux 4.7 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 4.8 (32-bit)", "Red Hat Enterprise Linux 4.8 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.0 (32-bit)", "Red Hat Enterprise Linux 5.0 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.0 (64-bit)", "Red Hat Enterprise Linux 5.0 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.1 (32-bit)", "Red Hat Enterprise Linux 5.1 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.1 (64-bit)", "Red Hat Enterprise Linux 5.1 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.2 (32-bit)", "Red Hat Enterprise Linux 5.2 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.2 (64-bit)", "Red Hat Enterprise Linux 5.2 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.3 (32-bit)", "Red Hat Enterprise Linux 5.3 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.3 (64-bit)", "Red Hat Enterprise Linux 5.3 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.4 (32-bit)", "Red Hat Enterprise Linux 5.4 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Red Hat Enterprise Linux 5.4 (64-bit)", "Red Hat Enterprise Linux 5.4 (64-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 9 SP4 (32-bit)", "SUSE Linux Enterprise Server 9 SP4 (32-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (32-bit)", "SUSE Linux Enterprise Server 10 SP1 (32-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (64-bit)", "SUSE Linux Enterprise Server 10 SP1 (64-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 10 SP2 (32-bit)", "SUSE Linux Enterprise Server 10 SP2 (32-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 10 SP2 (64-bit)", "SUSE Linux Enterprise Server 10 SP2 (64-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 10 SP3 (64-bit)", "SUSE Linux Enterprise Server 10 SP3 (64-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 11 (32-bit)", "SUSE Linux Enterprise Server 11 (32-bit)");
|
||||
_xenServerGuestOsMap.put("SUSE Linux Enterprise Server 11 (64-bit)", "SUSE Linux Enterprise Server 11 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows 7 (32-bit)", "Windows 7 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows 7 (64-bit)", "Windows 7 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2003 (32-bit)", "Windows Server 2003 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2003 (64-bit)", "Windows Server 2003 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2003 DataCenter Edition(32-bit)", "Windows Server 2003 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2003 DataCenter Edition(64-bit)", "Windows Server 2003 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2003 Enterprise Edition(32-bit)", "Windows Server 2003 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2003 Enterprise Edition(64-bit)", "Windows Server 2003 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2003 Standard Edition(32-bit)", "Windows Server 2003 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2003 Standard Edition(64-bit)", "Windows Server 2003 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2008 (32-bit)", "Windows Server 2008 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2008 (64-bit)", "Windows Server 2008 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2008 R2 (64-bit)", "Windows Server 2008 R2 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows 2000 SP4 (32-bit)", "Windows 2000 SP4 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Vista (32-bit)", "Windows Vista (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows XP SP2 (32-bit)", "Windows XP SP2 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows XP SP3 (32-bit)", "Windows XP SP3 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Other Linux (32-bit)", "Other install media");
|
||||
_xenServerGuestOsMap.put("Other Linux (64-bit)", "Other install media");
|
||||
_xenServerGuestOsMap.put("Other PV (32-bit)", "CentOS 5.4 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Other PV (64-bit)", "CentOS 5.4 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2008 (32-bit)", "Windows Server 2008 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2008 (64-bit)", "Windows Server 2008 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Server 2008 R2 (64-bit)", "Windows Server 2008 R2 (64-bit)");
|
||||
_xenServerGuestOsMap.put("Windows 2000 SP4 (32-bit)", "Windows 2000 SP4 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows Vista (32-bit)", "Windows Vista (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows XP SP2 (32-bit)", "Windows XP SP2 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Windows XP SP3 (32-bit)", "Windows XP SP3 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Other Linux (32-bit)", "Other install media");
|
||||
_xenServerGuestOsMap.put("Other Linux (64-bit)", "Other install media");
|
||||
_xenServerGuestOsMap.put("Other PV (32-bit)", "CentOS 5.4 (32-bit)");
|
||||
_xenServerGuestOsMap.put("Other PV (64-bit)", "CentOS 5.4 (64-bit)");
|
||||
}
|
||||
|
||||
|
||||
static {
|
||||
_xenServer56FP1GuestOsMap.put("CentOS 4.5 (32-bit)", "CentOS 4.5 (32-bit)");
|
||||
_xenServer56FP1GuestOsMap.put("CentOS 4.6 (32-bit)", "CentOS 4.6 (32-bit)");
|
||||
|
|
@ -266,9 +262,7 @@ public class CitrixHelper {
|
|||
_xenServer56FP1GuestOsMap.put("Other PV (32-bit)", "CentOS 5 (32-bit)");
|
||||
_xenServer56FP1GuestOsMap.put("Other PV (64-bit)", "CentOS 5 (64-bit)");
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
static {
|
||||
_xenServer56FP2GuestOsMap.put("CentOS 4.5 (32-bit)", "CentOS 4.5 (32-bit)");
|
||||
_xenServer56FP2GuestOsMap.put("CentOS 4.6 (32-bit)", "CentOS 4.6 (32-bit)");
|
||||
|
|
@ -349,24 +343,113 @@ public class CitrixHelper {
|
|||
_xenServer56FP2GuestOsMap.put("Other PV (32-bit)", "CentOS 5 (32-bit)");
|
||||
_xenServer56FP2GuestOsMap.put("Other PV (64-bit)", "CentOS 5 (64-bit)");
|
||||
}
|
||||
|
||||
|
||||
|
||||
static {
|
||||
_xenServer600GuestOsMap.put("CentOS 4.5 (32-bit)", "CentOS 4.5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 4.6 (32-bit)", "CentOS 4.6 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 4.7 (32-bit)", "CentOS 4.7 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 4.8 (32-bit)", "CentOS 4.8 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.0 (32-bit)", "CentOS 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.0 (64-bit)", "CentOS 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.1 (32-bit)", "CentOS 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.1 (64-bit)", "CentOS 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.2 (32-bit)", "CentOS 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.2 (64-bit)", "CentOS 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.3 (32-bit)", "CentOS 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.3 (64-bit)", "CentOS 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.4 (32-bit)", "CentOS 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.4 (64-bit)", "CentOS 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.5 (32-bit)", "CentOS 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.5 (64-bit)", "CentOS 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.6 (32-bit)", "CentOS 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 5.6 (64-bit)", "CentOS 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("CentOS 6.0 (32-bit)", "CentOS 6 (32-bit) (experimental)");
|
||||
_xenServer600GuestOsMap.put("CentOS 6.0 (64-bit)", "CentOS 6 (64-bit) (experimental)");
|
||||
_xenServer600GuestOsMap.put("Debian GNU/Linux 5.0 (32-bit)", "Debian Lenny 5.0 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Debian GNU/Linux 6(32-bit)", "Debian Squeeze 6.0 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Debian GNU/Linux 6(64-bit)", "Debian Squeeze 6.0 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.0 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.0 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.1 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.1 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.2 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.2 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.3 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.3 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.4 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.4 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.5 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.5 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.6 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 5.6 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 6.0 (32-bit)", "Oracle Enterprise Linux 6.0 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Oracle Enterprise Linux 6.0 (64-bit)", "Oracle Enterprise Linux 6.0 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 4.5 (32-bit)", "Red Hat Enterprise Linux 4.5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 4.6 (32-bit)", "Red Hat Enterprise Linux 4.6 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 4.7 (32-bit)", "Red Hat Enterprise Linux 4.7 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 4.8 (32-bit)", "Red Hat Enterprise Linux 4.8 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.0 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.0 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.1 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.1 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.2 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.2 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.3 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.3 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.4 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.4 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.5 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.5 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.6 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 5.6 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Red Hat Enterprise Linux 6.0 (32-bit)", "Red Hat Enterprise Linux 6.0 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 9 SP4 (32-bit)", "SUSE Linux Enterprise Server 9 SP4 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (32-bit)", "SUSE Linux Enterprise Server 10 SP1 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (64-bit)", "SUSE Linux Enterprise Server 10 SP1 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 10 SP2 (32-bit)", "SUSE Linux Enterprise Server 10 SP2 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 10 SP2 (64-bit)", "SUSE Linux Enterprise Server 10 SP2 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 10 SP3 (32-bit)", "SUSE Linux Enterprise Server 10 SP3 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 10 SP3 (64-bit)", "SUSE Linux Enterprise Server 10 SP3 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 10 SP4 (32-bit)", "SUSE Linux Enterprise Server 10 SP4 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 10 SP4 (64-bit)", "SUSE Linux Enterprise Server 10 SP4 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 11 (32-bit)", "SUSE Linux Enterprise Server 11 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 11 (64-bit)", "SUSE Linux Enterprise Server 11 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 11 SP1 (32-bit)", "SUSE Linux Enterprise Server 11 SP1 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("SUSE Linux Enterprise Server 11 SP1 (64-bit)", "SUSE Linux Enterprise Server 11 SP1 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Windows 7 (32-bit)", "Windows 7 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Windows 7 (64-bit)", "Windows 7 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Windows Server 2003 (32-bit)", "Windows Server 2003 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Windows Server 2003 (64-bit)", "Windows Server 2003 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Windows Server 2008 (32-bit)", "Windows Server 2008 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Windows Server 2008 (64-bit)", "Windows Server 2008 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Windows Server 2008 R2 (64-bit)", "Windows Server 2008 R2 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Windows Vista (32-bit)", "Windows Vista (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Windows XP SP3 (32-bit)", "Windows XP SP3 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Ubuntu 10.04 (32-bit)", "Ubuntu Lucid Lynx 10.04 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Ubuntu 10.04 (64-bit)", "Ubuntu Lucid Lynx 10.04 (64-bit)");
|
||||
_xenServer600GuestOsMap.put("Ubuntu 10.10 (32-bit)", "Ubuntu Maverick Meerkat 10.10 (32-bit) (experimental)");
|
||||
_xenServer600GuestOsMap.put("Ubuntu 10.10 (64-bit)", "Ubuntu Maverick Meerkat 10.10 (64-bit) (experimental)");
|
||||
_xenServer600GuestOsMap.put("Other Linux (32-bit)", "Other install media");
|
||||
_xenServer600GuestOsMap.put("Other Linux (64-bit)", "Other install media");
|
||||
_xenServer600GuestOsMap.put("Other PV (32-bit)", "CentOS 5 (32-bit)");
|
||||
_xenServer600GuestOsMap.put("Other PV (64-bit)", "CentOS 5 (64-bit)");
|
||||
}
|
||||
|
||||
public static String getXcpGuestOsType(String stdType) {
|
||||
String guestOS = _xcp100GuestOsMap.get(stdType);
|
||||
String guestOS = _xcp100GuestOsMap.get(stdType);
|
||||
if (guestOS == null) {
|
||||
s_logger.debug("Can't find the guest os: " + stdType + " mapping into XCP's guestOS type, start it as HVM guest");
|
||||
guestOS = "Other install media";
|
||||
s_logger.debug("Can't find the guest os: " + stdType + " mapping into XCP's guestOS type, start it as HVM guest");
|
||||
guestOS = "Other install media";
|
||||
}
|
||||
return guestOS;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public static String getXenServerGuestOsType(String stdType, boolean bootFromCD) {
|
||||
String guestOS = _xenServerGuestOsMap.get(stdType);
|
||||
String guestOS = _xenServerGuestOsMap.get(stdType);
|
||||
if (guestOS == null) {
|
||||
if ( !bootFromCD ) {
|
||||
s_logger.debug("Can't find the guest os: " + stdType + " mapping into XenServer 5.6 guestOS type, start it as HVM guest");
|
||||
guestOS = "Other install media";
|
||||
if (!bootFromCD) {
|
||||
s_logger.debug("Can't find the guest os: " + stdType + " mapping into XenServer 5.6 guestOS type, start it as HVM guest");
|
||||
guestOS = "Other install media";
|
||||
} else {
|
||||
String msg = "XenServer 5.6 doesn't support Guest OS type " + stdType;
|
||||
s_logger.warn(msg);
|
||||
|
|
@ -374,11 +457,11 @@ public class CitrixHelper {
|
|||
}
|
||||
return guestOS;
|
||||
}
|
||||
|
||||
public static String getXenServer56FP1GuestOsType(String stdType, boolean bootFromCD) {
|
||||
String guestOS = _xenServer56FP1GuestOsMap.get(stdType);
|
||||
|
||||
public static String getXenServer56FP1GuestOsType(String stdType, boolean bootFromCD) {
|
||||
String guestOS = _xenServer56FP1GuestOsMap.get(stdType);
|
||||
if (guestOS == null) {
|
||||
if ( !bootFromCD ) {
|
||||
if (!bootFromCD) {
|
||||
s_logger.debug("Can't find the guest os: " + stdType + " mapping into XenServer 5.6 FP1 guestOS type, start it as HVM guest");
|
||||
guestOS = "Other install media";
|
||||
} else {
|
||||
|
|
@ -388,11 +471,11 @@ public class CitrixHelper {
|
|||
}
|
||||
return guestOS;
|
||||
}
|
||||
|
||||
|
||||
public static String getXenServer56SP2GuestOsType(String stdType, boolean bootFromCD) {
|
||||
String guestOS = _xenServer56FP2GuestOsMap.get(stdType);
|
||||
String guestOS = _xenServer56FP2GuestOsMap.get(stdType);
|
||||
if (guestOS == null) {
|
||||
if ( !bootFromCD ) {
|
||||
if (!bootFromCD) {
|
||||
s_logger.debug("Can't find the guest os: " + stdType + " mapping into XenServer 5.6 SP2 guestOS type, start it as HVM guest");
|
||||
guestOS = "Other install media";
|
||||
} else {
|
||||
|
|
@ -402,4 +485,19 @@ public class CitrixHelper {
|
|||
}
|
||||
return guestOS;
|
||||
}
|
||||
|
||||
public static String getXenServer600GuestOsType(String stdType, boolean bootFromCD) {
|
||||
String guestOS = _xenServer600GuestOsMap.get(stdType);
|
||||
if (guestOS == null) {
|
||||
if (!bootFromCD) {
|
||||
s_logger.debug("Can't find the guest os: " + stdType + " mapping into XenServer 6.0 guestOS type, start it as HVM guest");
|
||||
guestOS = "Other install media";
|
||||
} else {
|
||||
String msg = "XenServer 6.0 DOES NOT support Guest OS type " + stdType;
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
|
||||
}
|
||||
return guestOS;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -281,7 +281,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
protected String _localGateway;
|
||||
|
||||
public enum SRType {
|
||||
NFS, LVM, ISCSI, ISO, LVMOISCSI, LVMOHBA;
|
||||
NFS, LVM, ISCSI, ISO, LVMOISCSI, LVMOHBA, EXT;
|
||||
|
||||
String _str;
|
||||
|
||||
|
|
@ -1074,9 +1074,12 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
s_vms.put(_cluster, _name, vmName, State.Starting);
|
||||
|
||||
|
||||
synchronized (s_vms) {
|
||||
s_logger.debug("1. The VM " + vmName + " is in Starting state.");
|
||||
s_vms.put(_cluster, _name, vmName, State.Starting);
|
||||
}
|
||||
|
||||
Host host = Host.getByUuid(conn, _host.uuid);
|
||||
vm = createVmFromTemplate(conn, vmSpec, host);
|
||||
|
||||
|
|
@ -1156,11 +1159,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
return new StartAnswer(cmd, msg);
|
||||
} finally {
|
||||
synchronized (s_vms) {
|
||||
if (state != State.Stopped) {
|
||||
s_vms.put(_cluster, _name, vmName, state);
|
||||
} else {
|
||||
s_vms.remove(_cluster, _name, vmName);
|
||||
}
|
||||
if (state != State.Stopped) {
|
||||
s_logger.debug("2. The VM " + vmName + " is in " + state + " state.");
|
||||
s_vms.put(_cluster, _name, vmName, state);
|
||||
} else {
|
||||
s_logger.debug("The VM is in stopped state, detected problem during startup : " + vmName);
|
||||
s_vms.remove(_cluster, _name, vmName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -2134,6 +2139,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
Integer vncPort = null;
|
||||
if (state == State.Running) {
|
||||
synchronized (s_vms) {
|
||||
s_logger.debug("3. The VM " + vmName + " is in " + State.Running + " state");
|
||||
s_vms.put(_cluster, _name, vmName, State.Running);
|
||||
}
|
||||
}
|
||||
|
|
@ -2156,7 +2162,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
for (NicTO nic : nics) {
|
||||
getNetwork(conn, nic);
|
||||
}
|
||||
s_vms.put(_cluster, _name, vm.getName(), State.Migrating);
|
||||
synchronized (s_vms) {
|
||||
s_logger.debug("4. The VM " + vm.getName() + " is in " + State.Migrating + " state");
|
||||
s_vms.put(_cluster, _name, vm.getName(), State.Migrating);
|
||||
}
|
||||
|
||||
return new PrepareForMigrationAnswer(cmd);
|
||||
} catch (Exception e) {
|
||||
|
|
@ -2391,7 +2400,11 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
State state = null;
|
||||
|
||||
state = s_vms.getState(_cluster, vmName);
|
||||
s_vms.put(_cluster, _name, vmName, State.Stopping);
|
||||
|
||||
synchronized (s_vms) {
|
||||
s_logger.debug("5. The VM " + vmName + " is in " + State.Stopping + " state");
|
||||
s_vms.put(_cluster, _name, vmName, State.Stopping);
|
||||
}
|
||||
try {
|
||||
Set<VM> vms = VM.getByNameLabel(conn, vmName);
|
||||
|
||||
|
|
@ -2454,7 +2467,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
s_logger.warn(msg, e);
|
||||
return new MigrateAnswer(cmd, false, msg, null);
|
||||
} finally {
|
||||
s_vms.put(_cluster, _name, vmName, state);
|
||||
synchronized (s_vms) {
|
||||
s_logger.debug("6. The VM " + vmName + " is in " + State.Stopping + " state");
|
||||
s_vms.put(_cluster, _name, vmName, state);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -2564,7 +2580,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
hvm = "false";
|
||||
}
|
||||
|
||||
String vncport = callHostPlugin(conn, "vmops", "getvncport", "domID", record.domid.toString(), "hvm", hvm);
|
||||
String vncport = callHostPlugin(conn, "vmops", "getvncport", "domID", record.domid.toString(), "hvm", hvm, "version", _host.product_version);
|
||||
if (vncport == null || vncport.isEmpty()) {
|
||||
return -1;
|
||||
}
|
||||
|
|
@ -2576,7 +2592,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
@Override
|
||||
public RebootAnswer execute(RebootCommand cmd) {
|
||||
Connection conn = getConnection();
|
||||
s_vms.put(_cluster, _name, cmd.getVmName(), State.Starting);
|
||||
synchronized (s_vms) {
|
||||
s_logger.debug("7. The VM " + cmd.getVmName() + " is in " + State.Starting + " state");
|
||||
s_vms.put(_cluster, _name, cmd.getVmName(), State.Starting);
|
||||
}
|
||||
try {
|
||||
Set<VM> vms = null;
|
||||
try {
|
||||
|
|
@ -2599,7 +2618,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
}
|
||||
return new RebootAnswer(cmd, "reboot succeeded", null, null);
|
||||
} finally {
|
||||
s_vms.put(_cluster, _name, cmd.getVmName(), State.Running);
|
||||
synchronized (s_vms) {
|
||||
s_logger.debug("8. The VM " + cmd.getVmName() + " is in " + State.Running + " state");
|
||||
s_vms.put(_cluster, _name, cmd.getVmName(), State.Running);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3060,8 +3082,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
}
|
||||
|
||||
if (vms.size() == 0) {
|
||||
s_logger.warn("VM does not exist on XenServer" + _host.uuid);
|
||||
s_vms.remove(_cluster, _name, vmName);
|
||||
synchronized (s_vms) {
|
||||
s_logger.info("VM does not exist on XenServer" + _host.uuid);
|
||||
s_vms.remove(_cluster, _name, vmName);
|
||||
}
|
||||
return new StopAnswer(cmd, "VM does not exist", 0 , 0L, 0L);
|
||||
}
|
||||
Long bytesSent = 0L;
|
||||
|
|
@ -3082,7 +3106,11 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
}
|
||||
|
||||
State state = s_vms.getState(_cluster, vmName);
|
||||
s_vms.put(_cluster, _name, vmName, State.Stopping);
|
||||
|
||||
synchronized (s_vms) {
|
||||
s_logger.debug("9. The VM " + vmName + " is in " + State.Stopping + " state");
|
||||
s_vms.put(_cluster, _name, vmName, State.Stopping);
|
||||
}
|
||||
|
||||
try {
|
||||
if (vmr.powerState == VmPowerState.RUNNING) {
|
||||
|
|
@ -3143,7 +3171,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
String msg = "VM destroy failed in Stop " + vmName + " Command due to " + e.getMessage();
|
||||
s_logger.warn(msg, e);
|
||||
} finally {
|
||||
s_vms.put(_cluster, _name, vmName, state);
|
||||
synchronized (s_vms) {
|
||||
s_logger.debug("10. The VM " + vmName + " is in " + state + " state");
|
||||
s_vms.put(_cluster, _name, vmName, state);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3605,43 +3636,100 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
s_logger.warn(msg);
|
||||
}
|
||||
return null;
|
||||
|
||||
}
|
||||
|
||||
protected StartupStorageCommand initializeLocalSR(Connection conn) {
|
||||
|
||||
SR lvmsr = getLocalLVMSR(conn);
|
||||
if (lvmsr == null) {
|
||||
return null;
|
||||
}
|
||||
protected SR getLocalEXTSR(Connection conn) {
|
||||
try {
|
||||
String lvmuuid = lvmsr.getUuid(conn);
|
||||
long cap = lvmsr.getPhysicalSize(conn);
|
||||
if (cap < 0) {
|
||||
return null;
|
||||
Map<SR, SR.Record> map = SR.getAllRecords(conn);
|
||||
for (Map.Entry<SR, SR.Record> entry : map.entrySet()) {
|
||||
SR.Record srRec = entry.getValue();
|
||||
if (SRType.EXT.equals(srRec.type)) {
|
||||
Set<PBD> pbds = srRec.PBDs;
|
||||
if (pbds == null) {
|
||||
continue;
|
||||
}
|
||||
for (PBD pbd : pbds) {
|
||||
Host host = pbd.getHost(conn);
|
||||
if (!isRefNull(host) && host.getUuid(conn).equals(_host.uuid)) {
|
||||
if (!pbd.getCurrentlyAttached(conn)) {
|
||||
pbd.plug(conn);
|
||||
}
|
||||
SR sr = entry.getKey();
|
||||
sr.scan(conn);
|
||||
return sr;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
long avail = cap - lvmsr.getPhysicalUtilisation(conn);
|
||||
lvmsr.setNameLabel(conn, lvmuuid);
|
||||
String name = "Cloud Stack Local Storage Pool for " + _host.uuid;
|
||||
lvmsr.setNameDescription(conn, name);
|
||||
Host host = Host.getByUuid(conn, _host.uuid);
|
||||
String address = host.getAddress(conn);
|
||||
StoragePoolInfo pInfo = new StoragePoolInfo(lvmuuid, address, SRType.LVM.toString(), SRType.LVM.toString(), StoragePoolType.LVM, cap, avail);
|
||||
StartupStorageCommand cmd = new StartupStorageCommand();
|
||||
cmd.setPoolInfo(pInfo);
|
||||
cmd.setGuid(_host.uuid);
|
||||
cmd.setDataCenter(Long.toString(_dcId));
|
||||
cmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL);
|
||||
return cmd;
|
||||
} catch (XenAPIException e) {
|
||||
String msg = "build startupstoragecommand err in host:" + _host.uuid + e.toString();
|
||||
String msg = "Unable to get local EXTSR in host:" + _host.uuid + e.toString();
|
||||
s_logger.warn(msg);
|
||||
} catch (XmlRpcException e) {
|
||||
String msg = "build startupstoragecommand err in host:" + _host.uuid + e.getMessage();
|
||||
String msg = "Unable to get local EXTSR in host:" + _host.uuid + e.getCause();
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
protected StartupStorageCommand initializeLocalSR(Connection conn) {
|
||||
SR lvmsr = getLocalLVMSR(conn);
|
||||
if (lvmsr != null) {
|
||||
try {
|
||||
String lvmuuid = lvmsr.getUuid(conn);
|
||||
long cap = lvmsr.getPhysicalSize(conn);
|
||||
if (cap > 0) {
|
||||
long avail = cap - lvmsr.getPhysicalUtilisation(conn);
|
||||
lvmsr.setNameLabel(conn, lvmuuid);
|
||||
String name = "Cloud Stack Local LVM Storage Pool for " + _host.uuid;
|
||||
lvmsr.setNameDescription(conn, name);
|
||||
Host host = Host.getByUuid(conn, _host.uuid);
|
||||
String address = host.getAddress(conn);
|
||||
StoragePoolInfo pInfo = new StoragePoolInfo(lvmuuid, address, SRType.LVM.toString(), SRType.LVM.toString(), StoragePoolType.LVM, cap, avail);
|
||||
StartupStorageCommand cmd = new StartupStorageCommand();
|
||||
cmd.setPoolInfo(pInfo);
|
||||
cmd.setGuid(_host.uuid);
|
||||
cmd.setDataCenter(Long.toString(_dcId));
|
||||
cmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL);
|
||||
return cmd;
|
||||
}
|
||||
} catch (XenAPIException e) {
|
||||
String msg = "build local LVM info err in host:" + _host.uuid + e.toString();
|
||||
s_logger.warn(msg);
|
||||
} catch (XmlRpcException e) {
|
||||
String msg = "build local LVM info err in host:" + _host.uuid + e.getMessage();
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
}
|
||||
|
||||
SR extsr = getLocalEXTSR(conn);
|
||||
if (extsr != null) {
|
||||
try {
|
||||
String extuuid = extsr.getUuid(conn);
|
||||
long cap = extsr.getPhysicalSize(conn);
|
||||
if (cap > 0) {
|
||||
long avail = cap - extsr.getPhysicalUtilisation(conn);
|
||||
extsr.setNameLabel(conn, extuuid);
|
||||
String name = "Cloud Stack Local EXT Storage Pool for " + _host.uuid;
|
||||
extsr.setNameDescription(conn, name);
|
||||
Host host = Host.getByUuid(conn, _host.uuid);
|
||||
String address = host.getAddress(conn);
|
||||
StoragePoolInfo pInfo = new StoragePoolInfo(extuuid, address, SRType.EXT.toString(), SRType.EXT.toString(), StoragePoolType.EXT, cap, avail);
|
||||
StartupStorageCommand cmd = new StartupStorageCommand();
|
||||
cmd.setPoolInfo(pInfo);
|
||||
cmd.setGuid(_host.uuid);
|
||||
cmd.setDataCenter(Long.toString(_dcId));
|
||||
cmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL);
|
||||
return cmd;
|
||||
}
|
||||
} catch (XenAPIException e) {
|
||||
String msg = "build local EXT info err in host:" + _host.uuid + e.toString();
|
||||
s_logger.warn(msg);
|
||||
} catch (XmlRpcException e) {
|
||||
String msg = "build local EXT info err in host:" + _host.uuid + e.getMessage();
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -3712,6 +3800,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
_host.speed = hc.getSpeed(conn).intValue();
|
||||
break;
|
||||
}
|
||||
Host.Record hr = myself.getRecord(conn);
|
||||
_host.product_version = hr.softwareVersion.get("product_version").trim();
|
||||
|
||||
XsLocalNetwork privateNic = getManagementNetwork(conn);
|
||||
_privateNetworkName = privateNic.getNetworkRecord(conn).nameLabel;
|
||||
_host.privatePif = privateNic.getPifRecord(conn).uuid;
|
||||
|
|
@ -3898,6 +3989,20 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
cmd.setHypervisorType(HypervisorType.XenServer);
|
||||
cmd.setCluster(_cluster);
|
||||
cmd.setPoolSync(false);
|
||||
|
||||
Pool pool;
|
||||
try {
|
||||
pool = Pool.getByUuid(conn, _host.pool);
|
||||
Pool.Record poolr = pool.getRecord(conn);
|
||||
|
||||
Host.Record hostr = poolr.master.getRecord(conn);
|
||||
if (_host.uuid.equals(hostr.uuid)) {
|
||||
HashMap<String, Pair<String, State>> allStates=fullClusterSync(conn);
|
||||
cmd.setClusterVMStateChanges(allStates);
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
s_logger.warn("Check for master failed, failing the FULL Cluster sync command");
|
||||
}
|
||||
|
||||
StartupStorageCommand sscmd = initializeLocalSR(conn);
|
||||
if (sscmd != null) {
|
||||
|
|
@ -6391,7 +6496,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
public String pool;
|
||||
public int speed;
|
||||
public int cpus;
|
||||
|
||||
public String product_version;
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringBuilder("XS[").append(uuid).append("-").append(ip).append("]").toString();
|
||||
|
|
@ -6462,29 +6567,28 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
|
||||
Host.Record hostr = poolr.master.getRecord(conn);
|
||||
if (!_host.uuid.equals(hostr.uuid)) {
|
||||
return new ClusterSyncAnswer(cmd.getClusterId());
|
||||
return new Answer(cmd);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
} catch (Throwable e) {
|
||||
s_logger.warn("Check for master failed, failing the Cluster sync command");
|
||||
return new ClusterSyncAnswer(cmd.getClusterId());
|
||||
}
|
||||
return new Answer(cmd);
|
||||
}
|
||||
HashMap<String, Pair<String, State>> newStates = deltaClusterSync(conn);
|
||||
cmd.incrStep();
|
||||
if (cmd.isRightStep()){
|
||||
// do full sync
|
||||
HashMap<String, Pair<String, State>> allStates=fullClusterSync(conn);
|
||||
return new ClusterSyncAnswer(cmd.getClusterId(), newStates, allStates);
|
||||
HashMap<String, Pair<String, State>> allStates=fullClusterSync(conn);
|
||||
return new ClusterSyncAnswer(cmd.getClusterId(), newStates, allStates);
|
||||
}
|
||||
else {
|
||||
cmd.incrStep();
|
||||
return new ClusterSyncAnswer(cmd.getClusterId(), newStates);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
protected HashMap<String, Pair<String, State>> fullClusterSync(Connection conn) {
|
||||
s_vms.clear(_cluster);
|
||||
XenServerPoolVms vms = new XenServerPoolVms();
|
||||
try {
|
||||
Host lhost = Host.getByUuid(conn, _host.uuid);
|
||||
Map<VM, VM.Record> vm_map = VM.getAllRecords(conn); //USE THIS TO GET ALL VMS FROM A CLUSTER
|
||||
for (VM.Record record: vm_map.values()) {
|
||||
if (record.isControlDomain || record.isASnapshot || record.isATemplate) {
|
||||
|
|
@ -6497,35 +6601,32 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
String host_uuid = null;
|
||||
if( ! isRefNull(host) ) {
|
||||
host_uuid = host.getUuid(conn);
|
||||
s_vms.put(_cluster, host_uuid, vm_name, state);
|
||||
vms.put(_cluster, host_uuid, vm_name, state);
|
||||
}
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("VM " + vm_name + ": powerstate = " + ps + "; vm state=" + state.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (final Throwable e) {
|
||||
String msg = "Unable to get vms through host " + _host.uuid + " due to to " + e.toString();
|
||||
String msg = "Unable to get vms through host " + _host.uuid + " due to to " + e.toString();
|
||||
s_logger.warn(msg, e);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
return s_vms.getClusterVmState(_cluster);
|
||||
return vms.getClusterVmState(_cluster);
|
||||
}
|
||||
|
||||
|
||||
|
||||
protected HashMap<String, Pair<String, State>> deltaClusterSync(Connection conn) {
|
||||
HashMap<String, Pair<String, State>> newStates;
|
||||
HashMap<String, Pair<String, State>> oldStates = null;
|
||||
|
||||
final HashMap<String, Pair<String, State>> changes = new HashMap<String, Pair<String, State>>();
|
||||
|
||||
newStates = getAllVms(conn);
|
||||
if (newStates == null) {
|
||||
s_logger.warn("Unable to get the vm states so no state sync at this point.");
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
synchronized (s_vms) {
|
||||
oldStates = new HashMap<String, Pair<String, State>>(s_vms.size(_cluster));
|
||||
HashMap<String, Pair<String, State>> newStates = getAllVms(conn);
|
||||
if (newStates == null) {
|
||||
s_logger.warn("Unable to get the vm states so no state sync at this point.");
|
||||
return null;
|
||||
}
|
||||
|
||||
HashMap<String, Pair<String, State>> oldStates = new HashMap<String, Pair<String, State>>(s_vms.size(_cluster));
|
||||
oldStates.putAll(s_vms.getClusterVmState(_cluster));
|
||||
|
||||
for (final Map.Entry<String, Pair<String, State>> entry : newStates.entrySet()) {
|
||||
|
|
@ -6608,6 +6709,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
s_logger.warn("Ignoring VM " + vm + " in migrating state.");
|
||||
} else {
|
||||
State newState = State.Stopped;
|
||||
s_logger.warn("The VM is now missing marking it as Stopped " + vm);
|
||||
changes.put(vm, new Pair<String, State>(host_uuid, newState));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,56 @@
|
|||
/**
|
||||
*
|
||||
* This software is licensed under the GNU General Public License v3 or later.
|
||||
*
|
||||
* It is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or any later version.
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
package com.cloud.hypervisor.xen.resource;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.ejb.Local;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.resource.ServerResource;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
@Local(value = ServerResource.class)
|
||||
public class XenServer600Resource extends XenServer56Resource {
|
||||
private static final Logger s_logger = Logger.getLogger(XenServer600Resource.class);
|
||||
|
||||
public XenServer600Resource() {
|
||||
super();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getGuestOsType(String stdType, boolean bootFromCD) {
|
||||
return CitrixHelper.getXenServer600GuestOsType(stdType, bootFromCD);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<File> getPatchFiles() {
|
||||
List<File> files = new ArrayList<File>();
|
||||
String patch = "scripts/vm/hypervisor/xenserver/xenserver60/patch";
|
||||
String patchfilePath = Script.findScript("", patch);
|
||||
if (patchfilePath == null) {
|
||||
throw new CloudRuntimeException("Unable to find patch file " + patch);
|
||||
}
|
||||
File file = new File(patchfilePath);
|
||||
files.add(file);
|
||||
return files;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,9 +1,22 @@
|
|||
/* Copyright (C) 2012 Citrix.com, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the GNU General Public License v3 or later.
|
||||
*
|
||||
* It is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or any later version.
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
package com.cloud.hypervisor.xen.resource;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
|
|
@ -13,7 +26,7 @@ import com.cloud.vm.VirtualMachine.State;
|
|||
|
||||
public class XenServerPoolVms {
|
||||
private static final Logger s_logger = Logger.getLogger(XenServerPoolVms.class);
|
||||
HashMap<String/* clusterId */, HashMap<String/* vm name */, Pair<String/* host uuid */, State/* vm state */>>> _cluster_vms =
|
||||
private HashMap<String/* clusterId */, HashMap<String/* vm name */, Pair<String/* host uuid */, State/* vm state */>>> _cluster_vms =
|
||||
new HashMap<String, HashMap<String, Pair<String, State>>>();
|
||||
|
||||
public HashMap<String, Pair<String, State>> getClusterVmState(String clusterId){
|
||||
|
|
@ -28,9 +41,7 @@ public class XenServerPoolVms {
|
|||
|
||||
public void clear(String clusterId){
|
||||
HashMap<String, Pair<String, State>> _vms= getClusterVmState(clusterId);
|
||||
synchronized (_vms) {
|
||||
_vms.clear();
|
||||
}
|
||||
_vms.clear();
|
||||
}
|
||||
|
||||
public State getState(String clusterId, String name){
|
||||
|
|
@ -41,34 +52,23 @@ public class XenServerPoolVms {
|
|||
|
||||
public void put(String clusterId, String hostUuid, String name, State state){
|
||||
HashMap<String, Pair<String, State>> vms= getClusterVmState(clusterId);
|
||||
synchronized (vms) {
|
||||
vms.put(name, new Pair<String, State>(hostUuid, state));
|
||||
}
|
||||
vms.put(name, new Pair<String, State>(hostUuid, state));
|
||||
}
|
||||
|
||||
public void remove(String clusterId, String hostUuid, String name){
|
||||
HashMap<String, Pair<String, State>> vms= getClusterVmState(clusterId);
|
||||
synchronized (vms) {
|
||||
vms.remove(name);
|
||||
}
|
||||
vms.remove(name);
|
||||
}
|
||||
|
||||
public void putAll(String clusterId, HashMap<String, Pair<String, State>> new_vms){
|
||||
HashMap<String, Pair<String, State>> vms= getClusterVmState(clusterId);
|
||||
synchronized (vms) {
|
||||
vms.putAll(new_vms);
|
||||
}
|
||||
vms.putAll(new_vms);
|
||||
}
|
||||
|
||||
public int size(String clusterId){
|
||||
HashMap<String, Pair<String, State>> vms= getClusterVmState(clusterId);
|
||||
return vms.size();
|
||||
}
|
||||
|
||||
|
||||
public static void main(String args[]){
|
||||
XenServerPoolVms vms = new XenServerPoolVms();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(){
|
||||
|
|
|
|||
|
|
@ -85,6 +85,8 @@ public class JuniperSrxResource implements ServerResource {
|
|||
private String _objectNameWordSep;
|
||||
private PrintWriter _toSrx;
|
||||
private BufferedReader _fromSrx;
|
||||
private PrintWriter _toUsageSrx;
|
||||
private BufferedReader _fromUsageSrx;
|
||||
private static Integer _numRetries;
|
||||
private static Integer _timeoutInSeconds;
|
||||
private static String _publicZone;
|
||||
|
|
@ -423,7 +425,7 @@ public class JuniperSrxResource implements ServerResource {
|
|||
return new MaintainAnswer(cmd);
|
||||
}
|
||||
|
||||
private synchronized ExternalNetworkResourceUsageAnswer execute(ExternalNetworkResourceUsageCommand cmd) {
|
||||
private ExternalNetworkResourceUsageAnswer execute(ExternalNetworkResourceUsageCommand cmd) {
|
||||
try {
|
||||
return getUsageAnswer(cmd);
|
||||
} catch (ExecutionException e) {
|
||||
|
|
@ -485,6 +487,48 @@ public class JuniperSrxResource implements ServerResource {
|
|||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean usageLogin() throws ExecutionException {
|
||||
String xml = SrxXml.LOGIN.getXml();
|
||||
xml = replaceXmlValue(xml, "username", _username);
|
||||
xml = replaceXmlValue(xml, "password", _password);
|
||||
return sendUsageRequestAndCheckResponse(SrxCommand.LOGIN, xml);
|
||||
}
|
||||
|
||||
private boolean openUsageSocket() {
|
||||
try {
|
||||
Socket s = new Socket(_ip, 3221);
|
||||
s.setKeepAlive(true);
|
||||
s.setSoTimeout(_timeoutInSeconds * 1000);
|
||||
_toUsageSrx = new PrintWriter(s.getOutputStream(), true);
|
||||
_fromUsageSrx = new BufferedReader(new InputStreamReader(s.getInputStream()));
|
||||
|
||||
// return the result of login
|
||||
return usageLogin();
|
||||
} catch (Exception e) {
|
||||
s_logger.error(e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean closeUsageSocket() {
|
||||
try {
|
||||
if (_toUsageSrx != null) {
|
||||
_toUsageSrx.close();
|
||||
}
|
||||
|
||||
if (_fromUsageSrx != null) {
|
||||
_fromUsageSrx.close();
|
||||
}
|
||||
|
||||
return true;
|
||||
} catch (IOException e) {
|
||||
s_logger.error(e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Commit/rollback
|
||||
|
|
@ -2729,10 +2773,17 @@ public class JuniperSrxResource implements ServerResource {
|
|||
|
||||
private ExternalNetworkResourceUsageAnswer getUsageAnswer(ExternalNetworkResourceUsageCommand cmd) throws ExecutionException {
|
||||
try {
|
||||
String socOpenException = "Failed to open a connection for Usage data.";
|
||||
String socCloseException = "Unable to close connection for Usage data.";
|
||||
|
||||
if (!openUsageSocket()) {
|
||||
throw new ExecutionException(socOpenException);
|
||||
}
|
||||
|
||||
ExternalNetworkResourceUsageAnswer answer = new ExternalNetworkResourceUsageAnswer(cmd);
|
||||
|
||||
String xml = SrxXml.FIREWALL_FILTER_BYTES_GETALL.getXml();
|
||||
String rawUsageData = sendRequest(xml);
|
||||
String rawUsageData = sendUsageRequest(xml);
|
||||
Document doc = getDocument(rawUsageData);
|
||||
|
||||
NodeList counters = doc.getElementsByTagName("counter");
|
||||
|
|
@ -2763,6 +2814,10 @@ public class JuniperSrxResource implements ServerResource {
|
|||
}
|
||||
}
|
||||
|
||||
if (!closeUsageSocket()) {
|
||||
throw new ExecutionException(socCloseException);
|
||||
}
|
||||
|
||||
return answer;
|
||||
} catch (Exception e) {
|
||||
throw new ExecutionException(e.getMessage());
|
||||
|
|
@ -2900,6 +2955,56 @@ public class JuniperSrxResource implements ServerResource {
|
|||
}
|
||||
}
|
||||
|
||||
private String sendUsageRequest(String xmlRequest) throws ExecutionException {
|
||||
if (!xmlRequest.contains("request-login")) {
|
||||
s_logger.debug("Sending request: " + xmlRequest);
|
||||
} else {
|
||||
s_logger.debug("Sending login request");
|
||||
}
|
||||
|
||||
boolean timedOut = false;
|
||||
StringBuffer xmlResponseBuffer = new StringBuffer("");
|
||||
try {
|
||||
_toUsageSrx.write(xmlRequest);
|
||||
_toUsageSrx.flush();
|
||||
|
||||
String line = "";
|
||||
while ((line = _fromUsageSrx.readLine()) != null) {
|
||||
xmlResponseBuffer.append(line);
|
||||
if (line.contains("</rpc-reply>")) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} catch (SocketTimeoutException e) {
|
||||
s_logger.debug(e);
|
||||
timedOut = true;
|
||||
} catch (IOException e) {
|
||||
s_logger.debug(e);
|
||||
return null;
|
||||
}
|
||||
|
||||
String xmlResponse = xmlResponseBuffer.toString();
|
||||
String errorMsg = null;
|
||||
|
||||
if (timedOut) {
|
||||
errorMsg = "Timed out on XML request: " + xmlRequest;
|
||||
} else if (xmlResponse.isEmpty()) {
|
||||
errorMsg = "Received an empty XML response.";
|
||||
} else if (xmlResponse.contains("Unexpected XML tag type")) {
|
||||
errorMsg = "Sent a command without being logged in.";
|
||||
} else if (!xmlResponse.contains("</rpc-reply>")) {
|
||||
errorMsg = "Didn't find the rpc-reply tag in the XML response.";
|
||||
}
|
||||
|
||||
if (errorMsg == null) {
|
||||
return xmlResponse;
|
||||
} else {
|
||||
s_logger.error(errorMsg);
|
||||
throw new ExecutionException(errorMsg);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean checkResponse(String xmlResponse, boolean errorKeyAndValue, String key, String value) {
|
||||
if (!xmlResponse.contains("authentication-response")) {
|
||||
s_logger.debug("Checking response: " + xmlResponse);
|
||||
|
|
@ -2975,6 +3080,50 @@ public class JuniperSrxResource implements ServerResource {
|
|||
return checkResponse(xmlResponse, errorKeyAndValue, key, value);
|
||||
}
|
||||
|
||||
private boolean sendUsageRequestAndCheckResponse(SrxCommand command, String xmlRequest, String... keyAndValue) throws ExecutionException {
|
||||
boolean errorKeyAndValue = false;
|
||||
String key;
|
||||
String value;
|
||||
|
||||
switch (command) {
|
||||
|
||||
case LOGIN:
|
||||
key = "status";
|
||||
value = "success";
|
||||
break;
|
||||
|
||||
case OPEN_CONFIGURATION:
|
||||
case CLOSE_CONFIGURATION:
|
||||
errorKeyAndValue = true;
|
||||
key = "error";
|
||||
value = null;
|
||||
break;
|
||||
|
||||
case COMMIT:
|
||||
key = "commit-success";
|
||||
value = null;
|
||||
break;
|
||||
|
||||
case CHECK_IF_EXISTS:
|
||||
case CHECK_IF_IN_USE:
|
||||
assert (keyAndValue != null && keyAndValue.length == 2) : "If the SrxCommand is " + command + ", both a key and value must be specified.";
|
||||
|
||||
key = keyAndValue[0];
|
||||
value = keyAndValue[1];
|
||||
break;
|
||||
|
||||
default:
|
||||
key = "load-success";
|
||||
value = null;
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
String xmlResponse = sendUsageRequest(xmlRequest);
|
||||
return checkResponse(xmlResponse, errorKeyAndValue, key, value);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* XML utils
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -667,7 +667,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
command.add("-t", "nfs");
|
||||
if (_inSystemVM) {
|
||||
//Fedora Core 12 errors out with any -o option executed from java
|
||||
command.add("-o", "soft,timeo=133,retrans=2147483647,tcp,acdirmax=0,acdirmin=0");
|
||||
command.add("-o", "soft,timeo=133,retrans=1,tcp,acdirmax=0,acdirmin=0");
|
||||
}
|
||||
command.add(nfsPath);
|
||||
command.add(root);
|
||||
|
|
|
|||
|
|
@ -421,5 +421,32 @@ public class VMInstanceVO implements VirtualMachine, FiniteStateObject<State, Vi
|
|||
toString = new StringBuilder("VM[").append(type.toString()).append("|").append(instanceName).append("]").toString();
|
||||
}
|
||||
return toString;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + (int) (id ^ (id >>> 32));
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj)
|
||||
return true;
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
return false;
|
||||
VMInstanceVO other = (VMInstanceVO) obj;
|
||||
if (id != other.id)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,9 +42,7 @@
|
|||
<classpathentry exported="true" kind="lib" path="vmware-lib-jaxen-jdom.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="vmware-lib-jaxrpc.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="vmware-lib-jdom.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="vmware-lib-mailapi.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="vmware-lib-saxpath.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="vmware-lib-smtp.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="vmware-lib-wbem.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="vmware-lib-xalan.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="vmware-lib-xerces.jar"/>
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
|
|
@ -48,7 +48,7 @@ if [ $? -ne 0 ]; then
|
|||
exit 0
|
||||
fi
|
||||
|
||||
mount -o tcp,ro $mountpoint $localmp
|
||||
mount -o tcp,soft,ro,timeo=133,retrans=1 $mountpoint $localmp
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "6#can't mount $mountpoint to $localmp"
|
||||
exit 0
|
||||
|
|
@ -87,7 +87,7 @@ copyvhd()
|
|||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
if [ $type != "nfs" ]; then
|
||||
if [ $type != "nfs" -a $type != "ext" ]; then
|
||||
dd if=$srcvhd of=$desvhd bs=512 seek=$(($(($vsize/512))-1)) count=1
|
||||
$VHDUTIL modify -s $vsize -n $desvhd
|
||||
if [ $? -ne 0 ]; then
|
||||
|
|
@ -107,7 +107,7 @@ copyvhd()
|
|||
fi
|
||||
}
|
||||
|
||||
if [ $type == "nfs" ]; then
|
||||
if [ $type == "nfs" -o $type == "ext" ]; then
|
||||
uuid=$(uuidgen -r)
|
||||
desvhd=/var/run/sr-mount/$sruuid/$uuid
|
||||
copyvhd $desvhd $vhdfile 0 $type
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ if [ $? -ne 0 ]; then
|
|||
exit 0
|
||||
fi
|
||||
|
||||
mount -o tcp $mountpoint $localmp
|
||||
mount -o tcp,soft,timeo=133,retrans=1 $mountpoint $localmp
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "6#can't mount $mountpoint to $localmp"
|
||||
exit 0
|
||||
|
|
@ -64,7 +64,7 @@ fi
|
|||
|
||||
vhdfile=$localmp/${vdiuuid}.vhd
|
||||
|
||||
if [ $type == "nfs" ]; then
|
||||
if [ $type == "nfs" -o $type == "ext" ]; then
|
||||
dd if=/var/run/sr-mount/$sruuid/${vdiuuid}.vhd of=$vhdfile bs=2M
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "8#failed to copy /var/run/sr-mount/$sruuid/${vdiuuid}.vhd to secondarystorage"
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
usage() {
|
||||
printf "Usage: %s [vhd file in secondary storage] [template directory in secondary storage] \n" $(basename $0)
|
||||
}
|
||||
|
||||
options='tcp,soft,timeo=133,retrans=1'
|
||||
cleanup()
|
||||
{
|
||||
if [ ! -z $snapshotdir ]; then
|
||||
|
|
@ -47,7 +47,7 @@ if [ $? -ne 0 ]; then
|
|||
exit 0
|
||||
fi
|
||||
|
||||
mount -o tcp $snapshoturl $snapshotdir
|
||||
mount -o $options $snapshoturl $snapshotdir
|
||||
if [ $? -ne 0 ]; then
|
||||
rmdir $snapshotdir
|
||||
echo "5#can not mount $snapshoturl to $snapshotdir"
|
||||
|
|
@ -63,7 +63,7 @@ if [ $? -ne 0 ]; then
|
|||
exit 0
|
||||
fi
|
||||
|
||||
mount -o tcp $templateurl $templatedir
|
||||
mount -o $options $templateurl $templatedir
|
||||
if [ $? -ne 0 ]; then
|
||||
rmdir $templatedir
|
||||
templatedir=""
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/python
|
||||
# Version 2.2.13.20111117130644
|
||||
# Version @VERSION@
|
||||
#
|
||||
# A plugin for executing script needed by vmops cloud
|
||||
|
||||
|
|
@ -47,12 +47,17 @@ def setup_iscsi(session, args):
|
|||
def getvncport(session, args):
|
||||
domid = args['domID']
|
||||
hvm = args['hvm']
|
||||
version = args['version']
|
||||
if hvm == 'true':
|
||||
path1 = "/local/domain/" + domid + "/qemu-pid"
|
||||
path2 = "/local/domain/" + domid + "/console/vnc-port"
|
||||
else:
|
||||
path1 = "/local/domain/" + domid + "/serial/0/vncterm-pid"
|
||||
path2 = "/local/domain/" + domid + "/serial/0/vnc-port"
|
||||
if version == '6.0.0':
|
||||
path1 = "/local/domain/" + domid + "/vncterm-pid"
|
||||
path2 = "/local/domain/" + domid + "/console/vnc-port"
|
||||
else:
|
||||
path1 = "/local/domain/" + domid + "/serial/0/vncterm-pid"
|
||||
path2 = "/local/domain/" + domid + "/serial/0/vnc-port"
|
||||
try:
|
||||
cmd = ["xenstore-read", path1]
|
||||
pid = util.pread2(cmd)
|
||||
|
|
@ -381,7 +386,6 @@ def can_bridge_firewall(session, args):
|
|||
util.pread2(['iptables', '-D', 'FORWARD', '-j', 'RH-Firewall-1-INPUT'])
|
||||
except:
|
||||
util.SMlog('Chain BRIDGE-FIREWALL already exists')
|
||||
default_ebtables_rules()
|
||||
privnic = get_private_nic(session, args)
|
||||
result = 'true'
|
||||
try:
|
||||
|
|
@ -392,7 +396,8 @@ def can_bridge_firewall(session, args):
|
|||
util.pread2(['iptables', '-A', 'FORWARD', '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', privnic, '-j', 'ACCEPT'])
|
||||
util.pread2(['iptables', '-A', 'FORWARD', '-j', 'DROP'])
|
||||
except:
|
||||
result = 'false'
|
||||
return 'false'
|
||||
default_ebtables_rules()
|
||||
allow_egress_traffic(session)
|
||||
if not os.path.exists('/var/run/cloud'):
|
||||
os.makedirs('/var/run/cloud')
|
||||
|
|
@ -421,11 +426,20 @@ def default_ebtables_rules():
|
|||
util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'IPv6', '-j', 'DROP'])
|
||||
# deny vlan
|
||||
util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', '802_1Q', '-j', 'DROP'])
|
||||
# deny all other 802. frames
|
||||
util.pread2(['ebtables', '-A', 'FORWARD', '-j', 'DROP'])
|
||||
# deny all others (e.g., 802.1d, CDP)
|
||||
util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-j', 'DROP'])
|
||||
except:
|
||||
util.SMlog('Chain DEFAULT_EBTABLES already exists')
|
||||
|
||||
#deny traffic from vms into hypervisor. Note: does not protect from vms in other pods
|
||||
try:
|
||||
util.pread2(['ebtables', '-D', 'INPUT', '-s', '6:0:0:0:0:0/ff:0:0:0:0:0', '-j', 'DROP'])
|
||||
except:
|
||||
pass
|
||||
|
||||
util.pread2(['ebtables', '-A', 'INPUT', '-s', '6:0:0:0:0:0/ff:0:0:0:0:0', '-j', 'DROP'])
|
||||
|
||||
|
||||
@echo
|
||||
def allow_egress_traffic(session):
|
||||
devs = []
|
||||
|
|
@ -595,10 +609,12 @@ def default_ebtables_antispoof_rules(vm_chain, vifs, vm_ip, vm_mac):
|
|||
util.SMlog("Failed to create ebtables antispoof chain, skipping")
|
||||
return 'true'
|
||||
|
||||
# note all rules for packets into the bridge (-i) precede all output rules (-o)
|
||||
# always start after the first rule in the FORWARD chain that jumps to DEFAULT_EBTABLES chain
|
||||
try:
|
||||
for vif in vifs:
|
||||
util.pread2(['ebtables', '-I', 'FORWARD', '2', '-i', vif, '-j', vm_chain])
|
||||
util.pread2(['ebtables', '-I', 'FORWARD', '2', '-o', vif, '-j', vm_chain])
|
||||
util.pread2(['ebtables', '-A', 'FORWARD', '-o', vif, '-j', vm_chain])
|
||||
except:
|
||||
util.SMlog("Failed to program default ebtables FORWARD rules for %s" % vm_chain)
|
||||
return 'false'
|
||||
|
|
@ -632,9 +648,10 @@ def default_arp_antispoof(vm_chain, vifs, vm_ip, vm_mac):
|
|||
util.SMlog("Failed to create arptables rule, skipping")
|
||||
return 'true'
|
||||
|
||||
# note all rules for packets into the bridge (-i) precede all output rules (-o)
|
||||
try:
|
||||
for vif in vifs:
|
||||
util.pread2(['arptables', '-A', 'FORWARD', '-i', vif, '-j', vm_chain])
|
||||
util.pread2(['arptables', '-I', 'FORWARD', '-i', vif, '-j', vm_chain])
|
||||
util.pread2(['arptables', '-A', 'FORWARD', '-o', vif, '-j', vm_chain])
|
||||
except:
|
||||
util.SMlog("Failed to program default arptables rules in FORWARD chain vm=" + vm_chain)
|
||||
|
|
@ -645,7 +662,8 @@ def default_arp_antispoof(vm_chain, vifs, vm_ip, vm_mac):
|
|||
#accept arp replies into the bridge as long as the source mac and ips match the vm
|
||||
util.pread2(['arptables', '-A', vm_chain, '-i', vif, '--opcode', 'Reply', '--source-mac', vm_mac, '--source-ip', vm_ip, '-j', 'ACCEPT'])
|
||||
#accept any arp requests from this vm. In the future this can be restricted to deny attacks on hosts
|
||||
util.pread2(['arptables', '-A', vm_chain, '-i', vif, '--opcode', 'Request', '-j', 'ACCEPT'])
|
||||
#also important to restrict source ip and src mac in these requests as they can be used to update arp tables on destination
|
||||
util.pread2(['arptables', '-A', vm_chain, '-i', vif, '--opcode', 'Request', '--source-mac', vm_mac, '--source-ip', vm_ip, '-j', 'RETURN'])
|
||||
#accept any arp requests to this vm as long as the request is for this vm's ip
|
||||
util.pread2(['arptables', '-A', vm_chain, '-o', vif, '--opcode', 'Request', '--destination-ip', vm_ip, '-j', 'ACCEPT'])
|
||||
#accept any arp replies to this vm as long as the mac and ip matches
|
||||
|
|
@ -770,6 +788,8 @@ def default_network_rules(session, args):
|
|||
#don't let vm spoof its ip address
|
||||
for v in vifs:
|
||||
util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '--source', vm_ip, '-j', 'RETURN'])
|
||||
util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '--source', '!', vm_ip, '-j', 'DROP'])
|
||||
util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', v, '--destination', '!', vm_ip, '-j', 'DROP'])
|
||||
util.pread2(['iptables', '-A', vmchain_default, '-j', vmchain])
|
||||
except:
|
||||
util.SMlog("Failed to program default rules for vm " + vm_name)
|
||||
|
|
|
|||
|
|
@ -284,9 +284,9 @@ def makedirs(path):
|
|||
|
||||
def mount(remoteDir, localDir):
|
||||
makedirs(localDir)
|
||||
|
||||
options = "soft,tcp,timeo=133,retrans=1"
|
||||
try:
|
||||
cmd = ['mount', '-o', 'tcp', remoteDir, localDir]
|
||||
cmd = ['mount', '-o', options, remoteDir, localDir]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
txt = ''
|
||||
|
|
|
|||
|
|
@ -1,145 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (C) 2006-2007 XenSource Ltd.
|
||||
# Copyright (C) 2008-2009 Citrix Ltd.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License as published
|
||||
# by the Free Software Foundation; version 2.1 only.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# nfs.py: NFS related utility functions
|
||||
|
||||
import util, errno, os, xml.dom.minidom
|
||||
|
||||
# The algorithm for tcp and udp (at least in the linux kernel) for
|
||||
# NFS timeout on softmounts is as follows:
|
||||
#
|
||||
# UDP:
|
||||
# As long as the request wasn't started more than timeo * (2 ^ retrans)
|
||||
# in the past, keep doubling the timeout.
|
||||
#
|
||||
# TCP:
|
||||
# As long as the request wasn't started more than timeo * (1 + retrans)
|
||||
# in the past, keep increaing the timeout by timeo.
|
||||
#
|
||||
# The time when the retrans may retry has been made will be:
|
||||
# For udp: timeo * (2 ^ retrans * 2 - 1)
|
||||
# For tcp: timeo * n! where n is the smallest n for which n! > 1 + retrans
|
||||
#
|
||||
# thus for retrans=1, timeo can be the same for both tcp and udp,
|
||||
# because the first doubling (timeo*2) is the same as the first increment
|
||||
# (timeo+timeo).
|
||||
|
||||
SOFTMOUNT_TIMEOUT = int((40.0/3.0) * 10.0) # 1/10 s
|
||||
SOFTMOUNT_RETRANS = 10
|
||||
RPCINFO_BIN = "/usr/sbin/rpcinfo"
|
||||
SHOWMOUNT_BIN = "/usr/sbin/showmount"
|
||||
|
||||
|
||||
class NfsException(Exception):
|
||||
def __init__(self, errstr):
|
||||
self.errstr = errstr
|
||||
|
||||
|
||||
def check_server_tcp(server):
|
||||
"""Make sure that NFS over TCP/IP V3 is supported on the server.
|
||||
Returns True if everything is OK, False otherwise."""
|
||||
try:
|
||||
util.ioretry(lambda: util.pread([RPCINFO_BIN,"-t",
|
||||
"%s" % server, "nfs","3"]),
|
||||
errlist=[errno.EPERM], maxretry=2, nofail=True)
|
||||
except util.CommandException, inst:
|
||||
raise NfsException("rpcinfo failed or timed out: return code %d" %
|
||||
inst.code)
|
||||
|
||||
|
||||
def soft_mount(mountpoint, remoteserver, remotepath, transport):
|
||||
"""Mount the remote NFS export at 'mountpoint'"""
|
||||
try:
|
||||
if not util.ioretry(lambda: util.isdir(mountpoint)):
|
||||
util.ioretry(lambda: util.makedirs(mountpoint))
|
||||
except util.CommandException, inst:
|
||||
raise NfsException("Failed to make directory: code is %d" %
|
||||
inst.code)
|
||||
|
||||
options = "soft,timeo=%d,retrans=%d,%s,noac" % (SOFTMOUNT_TIMEOUT,
|
||||
SOFTMOUNT_RETRANS,
|
||||
transport)
|
||||
try:
|
||||
util.ioretry(lambda:
|
||||
util.pread(["mount.nfs", "%s:%s"
|
||||
% (remoteserver, remotepath),
|
||||
mountpoint, "-o", options]),
|
||||
errlist=[errno.EPIPE, errno.EIO],
|
||||
maxretry=2, nofail=True)
|
||||
except util.CommandException, inst:
|
||||
raise NfsException("mount failed with return code %d" % inst.code)
|
||||
|
||||
|
||||
def unmount(mountpoint, rmmountpoint):
|
||||
"""Unmount the mounted mountpoint"""
|
||||
try:
|
||||
util.pread(["umount", mountpoint])
|
||||
except util.CommandException, inst:
|
||||
raise NfsException("umount failed with return code %d" % inst.code)
|
||||
|
||||
if rmmountpoint:
|
||||
try:
|
||||
os.rmdir(mountpoint)
|
||||
except OSError, inst:
|
||||
raise NfsException("rmdir failed with error '%s'" % inst.strerror)
|
||||
|
||||
|
||||
def scan_exports(target):
|
||||
util.SMlog("scanning")
|
||||
cmd = [SHOWMOUNT_BIN, "--no-headers", "-e", target]
|
||||
dom = xml.dom.minidom.Document()
|
||||
element = dom.createElement("nfs-exports")
|
||||
dom.appendChild(element)
|
||||
for val in util.pread2(cmd).split('\n'):
|
||||
if not len(val):
|
||||
continue
|
||||
entry = dom.createElement('Export')
|
||||
element.appendChild(entry)
|
||||
|
||||
subentry = dom.createElement("Target")
|
||||
entry.appendChild(subentry)
|
||||
textnode = dom.createTextNode(target)
|
||||
subentry.appendChild(textnode)
|
||||
|
||||
(path, access) = val.split()
|
||||
subentry = dom.createElement("Path")
|
||||
entry.appendChild(subentry)
|
||||
textnode = dom.createTextNode(path)
|
||||
subentry.appendChild(textnode)
|
||||
|
||||
subentry = dom.createElement("Accesslist")
|
||||
entry.appendChild(subentry)
|
||||
textnode = dom.createTextNode(access)
|
||||
subentry.appendChild(textnode)
|
||||
|
||||
return dom
|
||||
|
||||
def scan_srlist(path):
|
||||
dom = xml.dom.minidom.Document()
|
||||
element = dom.createElement("SRlist")
|
||||
dom.appendChild(element)
|
||||
for val in filter(util.match_uuid, util.ioretry( \
|
||||
lambda: util.listdir(path))):
|
||||
fullpath = os.path.join(path, val)
|
||||
if not util.ioretry(lambda: util.isdir(fullpath)):
|
||||
continue
|
||||
|
||||
entry = dom.createElement('SR')
|
||||
element.appendChild(entry)
|
||||
|
||||
subentry = dom.createElement("UUID")
|
||||
entry.appendChild(subentry)
|
||||
textnode = dom.createTextNode(val)
|
||||
subentry.appendChild(textnode)
|
||||
|
||||
return dom.toprettyxml()
|
||||
|
|
@ -10,7 +10,6 @@
|
|||
# If [source path] starts with '~', then it is path relative to management server home directory.
|
||||
# If [source path] does not start with '/' or '~', then it is relative path to the location of the patch file.
|
||||
NFSSR.py=/opt/xensource/sm
|
||||
nfs.py=/opt/xensource/sm
|
||||
vmops=..,0755,/etc/xapi.d/plugins
|
||||
vmopsSnapshot=..,0755,/etc/xapi.d/plugins
|
||||
hostvmstats.py=..,0755,/opt/xensource/sm
|
||||
|
|
|
|||
|
|
@ -1,145 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (C) 2006-2007 XenSource Ltd.
|
||||
# Copyright (C) 2008-2009 Citrix Ltd.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License as published
|
||||
# by the Free Software Foundation; version 2.1 only.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# nfs.py: NFS related utility functions
|
||||
|
||||
import util, errno, os, xml.dom.minidom
|
||||
|
||||
# The algorithm for tcp and udp (at least in the linux kernel) for
|
||||
# NFS timeout on softmounts is as follows:
|
||||
#
|
||||
# UDP:
|
||||
# As long as the request wasn't started more than timeo * (2 ^ retrans)
|
||||
# in the past, keep doubling the timeout.
|
||||
#
|
||||
# TCP:
|
||||
# As long as the request wasn't started more than timeo * (1 + retrans)
|
||||
# in the past, keep increaing the timeout by timeo.
|
||||
#
|
||||
# The time when the retrans may retry has been made will be:
|
||||
# For udp: timeo * (2 ^ retrans * 2 - 1)
|
||||
# For tcp: timeo * n! where n is the smallest n for which n! > 1 + retrans
|
||||
#
|
||||
# thus for retrans=1, timeo can be the same for both tcp and udp,
|
||||
# because the first doubling (timeo*2) is the same as the first increment
|
||||
# (timeo+timeo).
|
||||
|
||||
SOFTMOUNT_TIMEOUT = int((40.0/3.0) * 10.0) # 1/10 s
|
||||
SOFTMOUNT_RETRANS = 10
|
||||
RPCINFO_BIN = "/usr/sbin/rpcinfo"
|
||||
SHOWMOUNT_BIN = "/usr/sbin/showmount"
|
||||
|
||||
|
||||
class NfsException(Exception):
|
||||
def __init__(self, errstr):
|
||||
self.errstr = errstr
|
||||
|
||||
|
||||
def check_server_tcp(server):
|
||||
"""Make sure that NFS over TCP/IP V3 is supported on the server.
|
||||
Returns True if everything is OK, False otherwise."""
|
||||
try:
|
||||
util.ioretry(lambda: util.pread([RPCINFO_BIN,"-t",
|
||||
"%s" % server, "nfs","3"]),
|
||||
errlist=[errno.EPERM], maxretry=2, nofail=True)
|
||||
except util.CommandException, inst:
|
||||
raise NfsException("rpcinfo failed or timed out: return code %d" %
|
||||
inst.code)
|
||||
|
||||
|
||||
def soft_mount(mountpoint, remoteserver, remotepath, transport):
|
||||
"""Mount the remote NFS export at 'mountpoint'"""
|
||||
try:
|
||||
if not util.ioretry(lambda: util.isdir(mountpoint)):
|
||||
util.ioretry(lambda: util.makedirs(mountpoint))
|
||||
except util.CommandException, inst:
|
||||
raise NfsException("Failed to make directory: code is %d" %
|
||||
inst.code)
|
||||
|
||||
options = "soft,timeo=%d,retrans=%d,%s,noac" % (SOFTMOUNT_TIMEOUT,
|
||||
SOFTMOUNT_RETRANS,
|
||||
transport)
|
||||
try:
|
||||
util.ioretry(lambda:
|
||||
util.pread(["mount.nfs", "%s:%s"
|
||||
% (remoteserver, remotepath),
|
||||
mountpoint, "-o", options]),
|
||||
errlist=[errno.EPIPE, errno.EIO],
|
||||
maxretry=2, nofail=True)
|
||||
except util.CommandException, inst:
|
||||
raise NfsException("mount failed with return code %d" % inst.code)
|
||||
|
||||
|
||||
def unmount(mountpoint, rmmountpoint):
|
||||
"""Unmount the mounted mountpoint"""
|
||||
try:
|
||||
util.pread(["umount", mountpoint])
|
||||
except util.CommandException, inst:
|
||||
raise NfsException("umount failed with return code %d" % inst.code)
|
||||
|
||||
if rmmountpoint:
|
||||
try:
|
||||
os.rmdir(mountpoint)
|
||||
except OSError, inst:
|
||||
raise NfsException("rmdir failed with error '%s'" % inst.strerror)
|
||||
|
||||
|
||||
def scan_exports(target):
|
||||
util.SMlog("scanning")
|
||||
cmd = [SHOWMOUNT_BIN, "--no-headers", "-e", target]
|
||||
dom = xml.dom.minidom.Document()
|
||||
element = dom.createElement("nfs-exports")
|
||||
dom.appendChild(element)
|
||||
for val in util.pread2(cmd).split('\n'):
|
||||
if not len(val):
|
||||
continue
|
||||
entry = dom.createElement('Export')
|
||||
element.appendChild(entry)
|
||||
|
||||
subentry = dom.createElement("Target")
|
||||
entry.appendChild(subentry)
|
||||
textnode = dom.createTextNode(target)
|
||||
subentry.appendChild(textnode)
|
||||
|
||||
(path, access) = val.split()
|
||||
subentry = dom.createElement("Path")
|
||||
entry.appendChild(subentry)
|
||||
textnode = dom.createTextNode(path)
|
||||
subentry.appendChild(textnode)
|
||||
|
||||
subentry = dom.createElement("Accesslist")
|
||||
entry.appendChild(subentry)
|
||||
textnode = dom.createTextNode(access)
|
||||
subentry.appendChild(textnode)
|
||||
|
||||
return dom
|
||||
|
||||
def scan_srlist(path):
|
||||
dom = xml.dom.minidom.Document()
|
||||
element = dom.createElement("SRlist")
|
||||
dom.appendChild(element)
|
||||
for val in filter(util.match_uuid, util.ioretry( \
|
||||
lambda: util.listdir(path))):
|
||||
fullpath = os.path.join(path, val)
|
||||
if not util.ioretry(lambda: util.isdir(fullpath)):
|
||||
continue
|
||||
|
||||
entry = dom.createElement('SR')
|
||||
element.appendChild(entry)
|
||||
|
||||
subentry = dom.createElement("UUID")
|
||||
entry.appendChild(subentry)
|
||||
textnode = dom.createTextNode(val)
|
||||
subentry.appendChild(textnode)
|
||||
|
||||
return dom.toprettyxml()
|
||||
|
|
@ -10,7 +10,6 @@
|
|||
# If [source path] starts with '~', then it is path relative to management server home directory.
|
||||
# If [source path] does not start with '/' or '~', then it is relative path to the location of the patch file.
|
||||
NFSSR.py=/opt/xensource/sm
|
||||
nfs.py=/opt/xensource/sm
|
||||
vmops=..,0755,/etc/xapi.d/plugins
|
||||
ovsgre=..,0755,/etc/xapi.d/plugins
|
||||
ovstunnel=..,0755,/etc/xapi.d/plugins
|
||||
|
|
|
|||
|
|
@ -0,0 +1,277 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (C) 2006-2007 XenSource Ltd.
|
||||
# Copyright (C) 2008-2009 Citrix Ltd.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License as published
|
||||
# by the Free Software Foundation; version 2.1 only.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# FileSR: local-file storage repository
|
||||
|
||||
import SR, VDI, SRCommand, FileSR, util
|
||||
import errno
|
||||
import os, re, sys
|
||||
import xml.dom.minidom
|
||||
import xmlrpclib
|
||||
import xs_errors
|
||||
import nfs
|
||||
import vhdutil
|
||||
from lock import Lock
|
||||
import cleanup
|
||||
|
||||
CAPABILITIES = ["SR_PROBE","SR_UPDATE", "SR_CACHING",
|
||||
"VDI_CREATE","VDI_DELETE","VDI_ATTACH","VDI_DETACH",
|
||||
"VDI_UPDATE", "VDI_CLONE","VDI_SNAPSHOT","VDI_RESIZE",
|
||||
"VDI_GENERATE_CONFIG",
|
||||
"VDI_RESET_ON_BOOT", "ATOMIC_PAUSE"]
|
||||
|
||||
CONFIGURATION = [ [ 'server', 'hostname or IP address of NFS server (required)' ], \
|
||||
[ 'serverpath', 'path on remote server (required)' ] ]
|
||||
|
||||
|
||||
DRIVER_INFO = {
|
||||
'name': 'NFS VHD',
|
||||
'description': 'SR plugin which stores disks as VHD files on a remote NFS filesystem',
|
||||
'vendor': 'Citrix Systems Inc',
|
||||
'copyright': '(C) 2008 Citrix Systems Inc',
|
||||
'driver_version': '1.0',
|
||||
'required_api_version': '1.0',
|
||||
'capabilities': CAPABILITIES,
|
||||
'configuration': CONFIGURATION
|
||||
}
|
||||
|
||||
DRIVER_CONFIG = {"ATTACH_FROM_CONFIG_WITH_TAPDISK": True}
|
||||
|
||||
|
||||
# The mountpoint for the directory when performing an sr_probe. All probes
|
||||
# are guaranteed to be serialised by xapi, so this single mountpoint is fine.
|
||||
PROBE_MOUNTPOINT = "probe"
|
||||
NFSPORT = 2049
|
||||
DEFAULT_TRANSPORT = "tcp"
|
||||
|
||||
|
||||
class NFSSR(FileSR.FileSR):
|
||||
"""NFS file-based storage repository"""
|
||||
def handles(type):
|
||||
return type == 'nfs'
|
||||
handles = staticmethod(handles)
|
||||
|
||||
|
||||
def load(self, sr_uuid):
|
||||
self.ops_exclusive = FileSR.OPS_EXCLUSIVE
|
||||
self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
|
||||
self.sr_vditype = SR.DEFAULT_TAP
|
||||
self.driver_config = DRIVER_CONFIG
|
||||
if not self.dconf.has_key('server'):
|
||||
raise xs_errors.XenError('ConfigServerMissing')
|
||||
self.remoteserver = self.dconf['server']
|
||||
self.path = os.path.join(SR.MOUNT_BASE, sr_uuid)
|
||||
|
||||
# Test for the optional 'nfsoptions' dconf attribute
|
||||
self.transport = DEFAULT_TRANSPORT
|
||||
if self.dconf.has_key('useUDP') and self.dconf['useUDP'] == 'true':
|
||||
self.transport = "udp"
|
||||
|
||||
|
||||
def validate_remotepath(self, scan):
|
||||
if not self.dconf.has_key('serverpath'):
|
||||
if scan:
|
||||
try:
|
||||
self.scan_exports(self.dconf['server'])
|
||||
except:
|
||||
pass
|
||||
raise xs_errors.XenError('ConfigServerPathMissing')
|
||||
if not self._isvalidpathstring(self.dconf['serverpath']):
|
||||
raise xs_errors.XenError('ConfigServerPathBad', \
|
||||
opterr='serverpath is %s' % self.dconf['serverpath'])
|
||||
|
||||
def check_server(self):
|
||||
try:
|
||||
nfs.check_server_tcp(self.remoteserver)
|
||||
except nfs.NfsException, exc:
|
||||
raise xs_errors.XenError('NFSVersion',
|
||||
opterr=exc.errstr)
|
||||
|
||||
|
||||
def mount(self, mountpoint, remotepath):
|
||||
try:
|
||||
nfs.soft_mount(mountpoint, self.remoteserver, remotepath, self.transport)
|
||||
except nfs.NfsException, exc:
|
||||
raise xs_errors.XenError('NFSMount', opterr=exc.errstr)
|
||||
|
||||
|
||||
def attach(self, sr_uuid):
|
||||
self.validate_remotepath(False)
|
||||
#self.remotepath = os.path.join(self.dconf['serverpath'], sr_uuid)
|
||||
self.remotepath = self.dconf['serverpath']
|
||||
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
|
||||
self.mount_remotepath(sr_uuid)
|
||||
|
||||
|
||||
def mount_remotepath(self, sr_uuid):
|
||||
if not self._checkmount():
|
||||
self.check_server()
|
||||
self.mount(self.path, self.remotepath)
|
||||
|
||||
return super(NFSSR, self).attach(sr_uuid)
|
||||
|
||||
|
||||
def probe(self):
|
||||
# Verify NFS target and port
|
||||
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
|
||||
|
||||
self.validate_remotepath(True)
|
||||
self.check_server()
|
||||
|
||||
temppath = os.path.join(SR.MOUNT_BASE, PROBE_MOUNTPOINT)
|
||||
|
||||
self.mount(temppath, self.dconf['serverpath'])
|
||||
try:
|
||||
return nfs.scan_srlist(temppath)
|
||||
finally:
|
||||
try:
|
||||
nfs.unmount(temppath, True)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def detach(self, sr_uuid):
|
||||
"""Detach the SR: Unmounts and removes the mountpoint"""
|
||||
if not self._checkmount():
|
||||
return
|
||||
util.SMlog("Aborting GC/coalesce")
|
||||
cleanup.abort(self.uuid)
|
||||
|
||||
# Change directory to avoid unmount conflicts
|
||||
os.chdir(SR.MOUNT_BASE)
|
||||
|
||||
try:
|
||||
nfs.unmount(self.path, True)
|
||||
except nfs.NfsException, exc:
|
||||
raise xs_errors.XenError('NFSUnMount', opterr=exc.errstr)
|
||||
|
||||
return super(NFSSR, self).detach(sr_uuid)
|
||||
|
||||
|
||||
def create(self, sr_uuid, size):
|
||||
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
|
||||
self.validate_remotepath(True)
|
||||
if self._checkmount():
|
||||
raise xs_errors.XenError('NFSAttached')
|
||||
|
||||
# Set the target path temporarily to the base dir
|
||||
# so that we can create the target SR directory
|
||||
self.remotepath = self.dconf['serverpath']
|
||||
try:
|
||||
self.mount_remotepath(sr_uuid)
|
||||
except Exception, exn:
|
||||
try:
|
||||
os.rmdir(self.path)
|
||||
except:
|
||||
pass
|
||||
raise exn
|
||||
|
||||
#newpath = os.path.join(self.path, sr_uuid)
|
||||
#if util.ioretry(lambda: util.pathexists(newpath)):
|
||||
# if len(util.ioretry(lambda: util.listdir(newpath))) != 0:
|
||||
# self.detach(sr_uuid)
|
||||
# raise xs_errors.XenError('SRExists')
|
||||
#else:
|
||||
# try:
|
||||
# util.ioretry(lambda: util.makedirs(newpath))
|
||||
# except util.CommandException, inst:
|
||||
# if inst.code != errno.EEXIST:
|
||||
# self.detach(sr_uuid)
|
||||
# raise xs_errors.XenError('NFSCreate',
|
||||
# opterr='remote directory creation error is %d'
|
||||
# % inst.code)
|
||||
self.detach(sr_uuid)
|
||||
|
||||
def delete(self, sr_uuid):
|
||||
# try to remove/delete non VDI contents first
|
||||
super(NFSSR, self).delete(sr_uuid)
|
||||
try:
|
||||
if self._checkmount():
|
||||
self.detach(sr_uuid)
|
||||
|
||||
# Set the target path temporarily to the base dir
|
||||
# so that we can remove the target SR directory
|
||||
self.remotepath = self.dconf['serverpath']
|
||||
self.mount_remotepath(sr_uuid)
|
||||
newpath = os.path.join(self.path, sr_uuid)
|
||||
|
||||
if util.ioretry(lambda: util.pathexists(newpath)):
|
||||
util.ioretry(lambda: os.rmdir(newpath))
|
||||
self.detach(sr_uuid)
|
||||
except util.CommandException, inst:
|
||||
self.detach(sr_uuid)
|
||||
if inst.code != errno.ENOENT:
|
||||
raise xs_errors.XenError('NFSDelete')
|
||||
|
||||
def vdi(self, uuid, loadLocked = False):
|
||||
if not loadLocked:
|
||||
return NFSFileVDI(self, uuid)
|
||||
return NFSFileVDI(self, uuid)
|
||||
|
||||
def _checkmount(self):
|
||||
return util.ioretry(lambda: util.pathexists(self.path)) \
|
||||
and util.ioretry(lambda: util.ismount(self.path))
|
||||
|
||||
def scan_exports(self, target):
|
||||
util.SMlog("scanning2 (target=%s)" % target)
|
||||
dom = nfs.scan_exports(target)
|
||||
print >>sys.stderr,dom.toprettyxml()
|
||||
|
||||
class NFSFileVDI(FileSR.FileVDI):
|
||||
def attach(self, sr_uuid, vdi_uuid):
|
||||
if self.sr.srcmd.params.has_key("vdi_ref"):
|
||||
try:
|
||||
vdi_ref = self.sr.srcmd.params['vdi_ref']
|
||||
self.session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, \
|
||||
"vdi-type")
|
||||
self.session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, \
|
||||
"storage-type")
|
||||
self.session.xenapi.VDI.add_to_xenstore_data(vdi_ref, \
|
||||
"storage-type", "nfs")
|
||||
except:
|
||||
util.logException("NFSSR:attach")
|
||||
pass
|
||||
|
||||
return super(NFSFileVDI, self).attach(sr_uuid, vdi_uuid)
|
||||
|
||||
def generate_config(self, sr_uuid, vdi_uuid):
|
||||
util.SMlog("NFSFileVDI.generate_config")
|
||||
if not util.pathexists(self.path):
|
||||
raise xs_errors.XenError('VDIUnavailable')
|
||||
resp = {}
|
||||
resp['device_config'] = self.sr.dconf
|
||||
resp['sr_uuid'] = sr_uuid
|
||||
resp['vdi_uuid'] = vdi_uuid
|
||||
resp['command'] = 'vdi_attach_from_config'
|
||||
# Return the 'config' encoded within a normal XMLRPC response so that
|
||||
# we can use the regular response/error parsing code.
|
||||
config = xmlrpclib.dumps(tuple([resp]), "vdi_attach_from_config")
|
||||
return xmlrpclib.dumps((config,), "", True)
|
||||
|
||||
def attach_from_config(self, sr_uuid, vdi_uuid):
|
||||
"""Used for HA State-file only. Will not just attach the VDI but
|
||||
also start a tapdisk on the file"""
|
||||
util.SMlog("NFSFileVDI.attach_from_config")
|
||||
try:
|
||||
if not util.pathexists(self.sr.path):
|
||||
self.sr.attach(sr_uuid)
|
||||
except:
|
||||
util.logException("NFSFileVDI.attach_from_config")
|
||||
raise xs_errors.XenError('SRUnavailable', \
|
||||
opterr='Unable to attach from config')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
SRCommand.run(NFSSR, DRIVER_INFO)
|
||||
else:
|
||||
SR.registerSR(NFSSR)
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
# This file specifies the files that need
|
||||
# to be transferred over to the XenServer.
|
||||
# The format of this file is as follows:
|
||||
# [Name of file]=[source path],[file permission],[destination path]
|
||||
# [destination path] is required.
|
||||
# If [file permission] is missing, 755 is assumed.
|
||||
# If [source path] is missing, it looks in the same
|
||||
# directory as the patch file.
|
||||
# If [source path] starts with '/', then it is absolute path.
|
||||
# If [source path] starts with '~', then it is path relative to management server home directory.
|
||||
# If [source path] does not start with '/' or '~', then it is relative path to the location of the patch file.
|
||||
NFSSR.py=/opt/xensource/sm
|
||||
vmops=..,0755,/etc/xapi.d/plugins
|
||||
ovsgre=..,0755,/etc/xapi.d/plugins
|
||||
ovstunnel=..,0755,/etc/xapi.d/plugins
|
||||
vmopsSnapshot=..,0755,/etc/xapi.d/plugins
|
||||
hostvmstats.py=..,0755,/opt/xensource/sm
|
||||
systemvm.iso=../../../../../vms,0644,/opt/xensource/packages/iso
|
||||
id_rsa.cloud=../../../systemvm,0600,/root/.ssh
|
||||
network_info.sh=..,0755,/opt/xensource/bin
|
||||
setupxenserver.sh=..,0755,/opt/xensource/bin
|
||||
make_migratable.sh=..,0755,/opt/xensource/bin
|
||||
setup_iscsi.sh=..,0755,/opt/xensource/bin
|
||||
pingtest.sh=../../..,0755,/opt/xensource/bin
|
||||
dhcp_entry.sh=../../../../network/domr/,0755,/opt/xensource/bin
|
||||
ipassoc.sh=../../../../network/domr/,0755,/opt/xensource/bin
|
||||
vm_data.sh=../../../../network/domr/,0755,/opt/xensource/bin
|
||||
save_password_to_domr.sh=../../../../network/domr/,0755,/opt/xensource/bin
|
||||
networkUsage.sh=../../../../network/domr/,0755,/opt/xensource/bin
|
||||
call_firewall.sh=../../../../network/domr/,0755,/opt/xensource/bin
|
||||
call_loadbalancer.sh=../../../../network/domr/,0755,/opt/xensource/bin
|
||||
l2tp_vpn.sh=../../../../network/domr/,0755,/opt/xensource/bin
|
||||
cloud-setup-bonding.sh=..,0755,/opt/xensource/bin
|
||||
copy_vhd_to_secondarystorage.sh=..,0755,/opt/xensource/bin
|
||||
copy_vhd_from_secondarystorage.sh=..,0755,/opt/xensource/bin
|
||||
setup_heartbeat_sr.sh=..,0755,/opt/xensource/bin
|
||||
setup_heartbeat_file.sh=..,0755,/opt/xensource/bin
|
||||
check_heartbeat.sh=..,0755,/opt/xensource/bin
|
||||
xenheartbeat.sh=..,0755,/opt/xensource/bin
|
||||
launch_hb.sh=..,0755,/opt/xensource/bin
|
||||
vhd-util=..,0755,/opt/xensource/bin
|
||||
vmopspremium=..,0755,/etc/xapi.d/plugins
|
||||
create_privatetemplate_from_snapshot.sh=..,0755,/opt/xensource/bin
|
||||
upgrade_snapshot.sh=..,0755,/opt/xensource/bin
|
||||
cloud-clean-vlan.sh=..,0755,/opt/xensource/bin
|
||||
cloud-prepare-upgrade.sh=..,0755,/opt/xensource/bin
|
||||
getRouterStatus.sh=../../../../network/domr/,0755,/opt/xensource/bin
|
||||
bumpUpPriority.sh=../../../../network/domr/,0755,/opt/xensource/bin
|
||||
|
|
@ -113,8 +113,8 @@ import com.cloud.host.Status.Event;
|
|||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.host.dao.HostDetailsDao;
|
||||
import com.cloud.host.dao.HostTagsDao;
|
||||
import com.cloud.hypervisor.HypervisorGuruManager;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.hypervisor.HypervisorGuruManager;
|
||||
import com.cloud.hypervisor.kvm.resource.KvmDummyResourceBase;
|
||||
import com.cloud.network.IPAddressVO;
|
||||
import com.cloud.network.dao.IPAddressDao;
|
||||
|
|
@ -921,7 +921,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
}
|
||||
}
|
||||
|
||||
public void removeAgent(AgentAttache attache, Status nextState) {
|
||||
public void removeAgent(AgentAttache attache, Status nextState, Event event, Boolean investigate) {
|
||||
if (attache == null) {
|
||||
return;
|
||||
}
|
||||
|
|
@ -945,6 +945,20 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
if (removed != null) {
|
||||
removed.disconnect(nextState);
|
||||
}
|
||||
|
||||
HostVO host = _hostDao.findById(hostId);
|
||||
if (event != null && investigate != null) {
|
||||
if (!event.equals(Event.PrepareUnmanaged) && !event.equals(Event.HypervisorVersionChanged) && (host.getStatus() == Status.Alert || host.getStatus() == Status.Down)) {
|
||||
_haMgr.scheduleRestartForVmsOnHost(host, investigate);
|
||||
}
|
||||
}
|
||||
|
||||
for (Pair<Integer, Listener> monitor : _hostMonitors) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName());
|
||||
}
|
||||
monitor.second().processDisconnect(hostId, nextState);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -998,7 +1012,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
HostVO host = _hostDao.findById(hostId);
|
||||
if (host == null) {
|
||||
s_logger.warn("Can't find host with " + hostId);
|
||||
removeAgent(attache, Status.Removed);
|
||||
removeAgent(attache, Status.Removed, event, investigate);
|
||||
return true;
|
||||
|
||||
}
|
||||
|
|
@ -1008,7 +1022,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
s_logger.debug("Host " + hostId + " is already " + currentState);
|
||||
}
|
||||
if (currentState != Status.PrepareForMaintenance) {
|
||||
removeAgent(attache, currentState);
|
||||
removeAgent(attache, currentState, event, investigate);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
@ -1096,21 +1110,9 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Deregistering link for " + hostId + " with state " + nextState);
|
||||
}
|
||||
removeAgent(attache, nextState);
|
||||
removeAgent(attache, nextState, event, investigate);
|
||||
_hostDao.disconnect(host, event, _nodeId);
|
||||
|
||||
host = _hostDao.findById(host.getId());
|
||||
if (!event.equals(Event.PrepareUnmanaged) && !event.equals(Event.HypervisorVersionChanged) && (host.getStatus() == Status.Alert || host.getStatus() == Status.Down)) {
|
||||
_haMgr.scheduleRestartForVmsOnHost(host, investigate);
|
||||
}
|
||||
|
||||
for (Pair<Integer, Listener> monitor : _hostMonitors) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName());
|
||||
}
|
||||
monitor.second().processDisconnect(hostId, nextState);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -1129,25 +1131,25 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
ConnectionException ce = (ConnectionException)e;
|
||||
if (ce.isSetupError()) {
|
||||
s_logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage());
|
||||
handleDisconnect(attache, Event.AgentDisconnected, false);
|
||||
handleDisconnect(attache, Event.AgentDisconnected, true);
|
||||
throw ce;
|
||||
} else {
|
||||
s_logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage());
|
||||
handleDisconnect(attache, Event.ShutdownRequested, false);
|
||||
handleDisconnect(attache, Event.ShutdownRequested, true);
|
||||
return attache;
|
||||
}
|
||||
} else if (e instanceof HypervisorVersionChangedException) {
|
||||
handleDisconnect(attache, Event.HypervisorVersionChanged, false);
|
||||
handleDisconnect(attache, Event.HypervisorVersionChanged, true);
|
||||
throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
|
||||
} else {
|
||||
s_logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage(), e);
|
||||
handleDisconnect(attache, Event.AgentDisconnected, false);
|
||||
handleDisconnect(attache, Event.AgentDisconnected, true);
|
||||
throw new CloudRuntimeException("Unable to connect " + attache.getId(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Long dcId = host.getDataCenterId();
|
||||
ReadyCommand ready = new ReadyCommand(dcId);
|
||||
Answer answer = easySend(hostId, ready);
|
||||
|
|
@ -1155,7 +1157,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
// this is tricky part for secondary storage
|
||||
// make it as disconnected, wait for secondary storage VM to be up
|
||||
// return the attache instead of null, even it is disconnectede
|
||||
handleDisconnect(attache, Event.AgentDisconnected, false);
|
||||
handleDisconnect(attache, Event.AgentDisconnected, true);
|
||||
}
|
||||
|
||||
_hostDao.updateStatus(host, Event.Ready, _nodeId);
|
||||
|
|
@ -1531,7 +1533,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
return false;
|
||||
}
|
||||
|
||||
if (host.getStatus() != Status.Up && host.getStatus() != Status.Alert) {
|
||||
if (host.getStatus() != Status.Up && host.getStatus() != Status.Alert && host.getStatus() != Status.Rebalancing) {
|
||||
s_logger.info("Unable to disconnect host because it is not in the correct state: host=" + hostId + "; Status=" + host.getStatus());
|
||||
return false;
|
||||
}
|
||||
|
|
@ -1590,7 +1592,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
AgentAttache attache = null;
|
||||
attache = findAttache(hostId);
|
||||
if (attache != null) {
|
||||
handleDisconnect(attache, Event.AgentDisconnected, false);
|
||||
handleDisconnect(attache, Event.AgentDisconnected, true);
|
||||
}
|
||||
return true;
|
||||
} else if (event == Event.ShutdownRequested) {
|
||||
|
|
@ -1923,26 +1925,101 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
}
|
||||
|
||||
public AgentAttache handleConnect(final Link link,
|
||||
final StartupCommand[] startup) throws IllegalArgumentException,
|
||||
ConnectionException {
|
||||
final StartupCommand[] startup, Request request) {
|
||||
HostVO server = null;
|
||||
boolean handled = notifyCreatorsOfConnection(startup);
|
||||
if (!handled) {
|
||||
server = createHost(startup, null, null, false, null, null);
|
||||
StartupAnswer[] answers = new StartupAnswer[startup.length];
|
||||
AgentAttache attache = null;
|
||||
try {
|
||||
boolean handled = notifyCreatorsOfConnection(startup);
|
||||
if (!handled) {
|
||||
server = createHost(startup, null, null, false, null, null);
|
||||
} else {
|
||||
server = _hostDao.findByGuid(startup[0].getGuid());
|
||||
}
|
||||
|
||||
if (server == null) {
|
||||
return null;
|
||||
}
|
||||
long id = server.getId();
|
||||
|
||||
attache = createAttache(id, server, link);
|
||||
|
||||
Command cmd;
|
||||
for (int i = 0; i < startup.length; i++) {
|
||||
cmd = startup[i];
|
||||
if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) {
|
||||
answers[i] = new StartupAnswer(startup[i], attache.getId(), getPingInterval());
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (ConnectionException e) {
|
||||
Command cmd;
|
||||
for (int i = 0; i < startup.length; i++) {
|
||||
cmd = startup[i];
|
||||
if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) {
|
||||
answers[i] = new StartupAnswer(startup[i], e.toString());
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (IllegalArgumentException e) {
|
||||
Command cmd;
|
||||
for (int i = 0; i < startup.length; i++) {
|
||||
cmd = startup[i];
|
||||
if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) {
|
||||
answers[i] = new StartupAnswer(startup[i], e.toString());
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (CloudRuntimeException e) {
|
||||
Command cmd;
|
||||
for (int i = 0; i < startup.length; i++) {
|
||||
cmd = startup[i];
|
||||
if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) {
|
||||
answers[i] = new StartupAnswer(startup[i], e.toString());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Response response = null;
|
||||
if (attache != null) {
|
||||
response = new Response(request, answers[0], _nodeId, attache.getId());
|
||||
} else {
|
||||
server = _hostDao.findByGuid(startup[0].getGuid());
|
||||
response = new Response(request, answers[0], _nodeId, -1);
|
||||
}
|
||||
|
||||
if (server == null) {
|
||||
return null;
|
||||
|
||||
try {
|
||||
link.send(response.toBytes());
|
||||
} catch (ClosedChannelException e) {
|
||||
s_logger.debug("Failed to send startupanswer: " + e.toString());
|
||||
return null;
|
||||
}
|
||||
if (attache == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
attache = notifyMonitorsOfConnection(attache, startup, false);
|
||||
return attache;
|
||||
} catch (ConnectionException e) {
|
||||
ReadyCommand ready = new ReadyCommand(null);
|
||||
ready.setDetails(e.toString());
|
||||
try {
|
||||
easySend(attache.getId(), ready);
|
||||
} catch (Exception e1) {
|
||||
s_logger.debug("Failed to send readycommand, due to " + e.toString());
|
||||
}
|
||||
return null;
|
||||
} catch (CloudRuntimeException e) {
|
||||
ReadyCommand ready = new ReadyCommand(null);
|
||||
ready.setDetails(e.toString());
|
||||
try {
|
||||
easySend(attache.getId(), ready);
|
||||
} catch (Exception e1) {
|
||||
s_logger.debug("Failed to send readycommand, due to " + e.toString());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
long id = server.getId();
|
||||
|
||||
AgentAttache attache = createAttache(id, server, link);
|
||||
|
||||
attache = notifyMonitorsOfConnection(attache, startup, false);
|
||||
|
||||
return attache;
|
||||
}
|
||||
|
||||
protected AgentAttache createAttache(long id, HostVO server, Link link) {
|
||||
|
|
@ -2243,55 +2320,17 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
s_logger.warn("Throwing away a request because it came through as the first command on a connect: " + request);
|
||||
return;
|
||||
}
|
||||
StartupCommand startup = (StartupCommand) cmd;
|
||||
// if ((_upgradeMgr.registerForUpgrade(-1, startup.getVersion())
|
||||
// == UpgradeManager.State.RequiresUpdate) &&
|
||||
// (_upgradeMgr.getAgentUrl() != null)) {
|
||||
// final UpgradeCommand upgrade = new
|
||||
// UpgradeCommand(_upgradeMgr.getAgentUrl());
|
||||
// final Request req = new Request(1, -1, -1, new Command[] {
|
||||
// upgrade }, true, true);
|
||||
// s_logger.info("Agent requires upgrade: " + req.toString());
|
||||
// try {
|
||||
// link.send(req.toBytes());
|
||||
// } catch (ClosedChannelException e) {
|
||||
// s_logger.warn("Unable to tell agent it should update.");
|
||||
// }
|
||||
// return;
|
||||
// }
|
||||
try {
|
||||
StartupCommand[] startups = new StartupCommand[cmds.length];
|
||||
for (int i = 0; i < cmds.length; i++) {
|
||||
startups[i] = (StartupCommand) cmds[i];
|
||||
}
|
||||
attache = handleConnect(link, startups);
|
||||
} catch (final IllegalArgumentException e) {
|
||||
_alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, 0, new Long(0), "Agent from " + startup.getPrivateIpAddress() + " is unable to connect due to " + e.getMessage(), "Agent from "
|
||||
+ startup.getPrivateIpAddress() + " is unable to connect with " + request + " because of " + e.getMessage());
|
||||
s_logger.warn("Unable to create attache for agent: " + request, e);
|
||||
response = new Response(request, new StartupAnswer((StartupCommand) cmd, e.getMessage()), _nodeId, -1);
|
||||
} catch (ConnectionException e) {
|
||||
_alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, 0, new Long(0), "Agent from " + startup.getPrivateIpAddress() + " is unable to connect due to " + e.getMessage(), "Agent from "
|
||||
+ startup.getPrivateIpAddress() + " is unable to connect with " + request + " because of " + e.getMessage());
|
||||
s_logger.warn("Unable to create attache for agent: " + request, e);
|
||||
response = new Response(request, new StartupAnswer((StartupCommand) cmd, e.getMessage()), _nodeId, -1);
|
||||
} catch (final CloudRuntimeException e) {
|
||||
_alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, 0, new Long(0), "Agent from " + startup.getPrivateIpAddress() + " is unable to connect due to " + e.getMessage(), "Agent from "
|
||||
+ startup.getPrivateIpAddress() + " is unable to connect with " + request + " because of " + e.getMessage());
|
||||
s_logger.warn("Unable to create attache for agent: " + request, e);
|
||||
|
||||
StartupCommand[] startups = new StartupCommand[cmds.length];
|
||||
for (int i = 0; i < cmds.length; i++) {
|
||||
startups[i] = (StartupCommand) cmds[i];
|
||||
}
|
||||
attache = handleConnect(link, startups, request);
|
||||
|
||||
if (attache == null) {
|
||||
if (response == null) {
|
||||
s_logger.warn("Unable to create attache for agent: " + request);
|
||||
response = new Response(request, new StartupAnswer((StartupCommand) request.getCommand(), "Unable to register this agent"), _nodeId, -1);
|
||||
}
|
||||
try {
|
||||
link.send(response.toBytes(), true);
|
||||
} catch (final ClosedChannelException e) {
|
||||
s_logger.warn("Response was not sent: " + response);
|
||||
}
|
||||
return;
|
||||
s_logger.warn("Unable to create attache for agent: " + request);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
final long hostId = attache.getId();
|
||||
|
|
@ -2316,23 +2355,15 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
}
|
||||
|
||||
final Answer[] answers = new Answer[cmds.length];
|
||||
boolean startupSend = false;
|
||||
for (int i = 0; i < cmds.length; i++) {
|
||||
cmd = cmds[i];
|
||||
Answer answer = null;
|
||||
try {
|
||||
if (cmd instanceof StartupRoutingCommand) {
|
||||
final StartupRoutingCommand startup = (StartupRoutingCommand) cmd;
|
||||
answer = new StartupAnswer(startup, attache.getId(), getPingInterval());
|
||||
} else if (cmd instanceof StartupProxyCommand) {
|
||||
final StartupProxyCommand startup = (StartupProxyCommand) cmd;
|
||||
answer = new StartupAnswer(startup, attache.getId(), getPingInterval());
|
||||
} else if (cmd instanceof StartupSecondaryStorageCommand) {
|
||||
final StartupSecondaryStorageCommand startup = (StartupSecondaryStorageCommand) cmd;
|
||||
answer = new StartupAnswer(startup, attache.getId(), getPingInterval());
|
||||
} else if (cmd instanceof StartupStorageCommand) {
|
||||
final StartupStorageCommand startup = (StartupStorageCommand) cmd;
|
||||
answer = new StartupAnswer(startup, attache.getId(), getPingInterval());
|
||||
} else if (cmd instanceof ShutdownCommand) {
|
||||
if ((cmd instanceof StartupRoutingCommand) || (cmd instanceof StartupProxyCommand) || (cmd instanceof StartupSecondaryStorageCommand) || (cmd instanceof StartupStorageCommand)) {
|
||||
startupSend = true;
|
||||
continue;
|
||||
} else if (cmd instanceof ShutdownCommand) {
|
||||
final ShutdownCommand shutdown = (ShutdownCommand) cmd;
|
||||
final String reason = shutdown.getReason();
|
||||
s_logger.info("Host " + attache.getId() + " has informed us that it is shutting down with reason " + reason + " and detail " + shutdown.getDetail());
|
||||
|
|
@ -2387,18 +2418,20 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
|||
answers[i] = answer;
|
||||
}
|
||||
|
||||
response = new Response(request, answers, _nodeId, attache.getId());
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
if (logD) {
|
||||
s_logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);
|
||||
} else {
|
||||
s_logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);
|
||||
}
|
||||
}
|
||||
try {
|
||||
link.send(response.toBytes());
|
||||
} catch (final ClosedChannelException e) {
|
||||
s_logger.warn("Unable to send response because connection is closed: " + response);
|
||||
if (!startupSend) {
|
||||
response = new Response(request, answers, _nodeId, attache.getId());
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
if (logD) {
|
||||
s_logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);
|
||||
} else {
|
||||
s_logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response);
|
||||
}
|
||||
}
|
||||
try {
|
||||
link.send(response.toBytes());
|
||||
} catch (final ClosedChannelException e) {
|
||||
s_logger.warn("Unable to send response because connection is closed: " + response);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -169,7 +169,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
if (s_logger.isInfoEnabled()) {
|
||||
s_logger.info(host + " is detected down, but we have a forward attache running, disconnect this one before launching the host");
|
||||
}
|
||||
removeAgent(agentattache, Status.Disconnected);
|
||||
removeAgent(agentattache, Status.Disconnected, null, null);
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
|
@ -709,32 +709,27 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
}
|
||||
|
||||
@Override
|
||||
public void removeAgent(AgentAttache attache, Status nextState) {
|
||||
public void removeAgent(AgentAttache attache, Status nextState, Event event, Boolean investigate) {
|
||||
if (attache == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
super.removeAgent(attache, nextState);
|
||||
super.removeAgent(attache, nextState, event, investigate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeRebalanceRequest(long agentId, long currentOwnerId, long futureOwnerId, Event event) throws AgentUnavailableException, OperationTimedoutException {
|
||||
boolean result = false;
|
||||
if (event == Event.RequestAgentRebalance) {
|
||||
return setToWaitForRebalance(agentId, currentOwnerId, futureOwnerId);
|
||||
} else if (event == Event.StartAgentRebalance) {
|
||||
boolean result = false;
|
||||
try {
|
||||
result = rebalanceHost(agentId, currentOwnerId, futureOwnerId);
|
||||
result = rebalanceHost(agentId, currentOwnerId, futureOwnerId);
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to rebalance host id=" + agentId, e);
|
||||
} finally {
|
||||
if (!result) {
|
||||
failRebalance(agentId);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -958,18 +953,17 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
if (currentOwnerId == _nodeId) {
|
||||
if (!startRebalance(hostId)) {
|
||||
s_logger.debug("Failed to start agent rebalancing");
|
||||
failRebalance(hostId);
|
||||
finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed);
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
Answer[] answer = sendRebalanceCommand(futureOwnerId, hostId, currentOwnerId, futureOwnerId, Event.StartAgentRebalance);
|
||||
if (answer == null || !answer[0].getResult()) {
|
||||
s_logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process");
|
||||
result = false;
|
||||
}
|
||||
|
||||
} catch (Exception ex) {
|
||||
s_logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex);
|
||||
s_logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex);
|
||||
result = false;
|
||||
}
|
||||
|
||||
|
|
@ -977,7 +971,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
s_logger.debug("Successfully transfered host id=" + hostId + " to management server " + futureOwnerId);
|
||||
finishRebalance(hostId, futureOwnerId, Event.RebalanceCompleted);
|
||||
} else {
|
||||
s_logger.debug("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId);
|
||||
s_logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId);
|
||||
finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed);
|
||||
}
|
||||
|
||||
|
|
@ -985,13 +979,19 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
HostVO host = _hostDao.findById(hostId);
|
||||
try {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") as a part of rebalance process");
|
||||
s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
|
||||
}
|
||||
result = loadDirectlyConnectedHost(host, true);
|
||||
} catch (Exception ex) {
|
||||
s_logger.warn("Unable to load directly connected host " + host.getId() + " as a part of rebalance due to exception: ", ex);
|
||||
s_logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process due to:", ex);
|
||||
result = false;
|
||||
}
|
||||
|
||||
if (result) {
|
||||
s_logger.debug("Successfully loaded directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
|
||||
} else {
|
||||
s_logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process");
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
@ -1002,7 +1002,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
|
||||
boolean success = (event == Event.RebalanceCompleted) ? true : false;
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Finishing rebalancing for the agent " + hostId + " with result " + success);
|
||||
s_logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event);
|
||||
}
|
||||
|
||||
AgentAttache attache = findAttache(hostId);
|
||||
|
|
@ -1042,13 +1042,12 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
try {
|
||||
s_logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId);
|
||||
_hostTransferDao.completeAgentTransfer(hostId);
|
||||
reconnect(hostId);
|
||||
handleDisconnect(findAttache(hostId), Event.RebalanceFailed, false);
|
||||
} catch (Exception ex) {
|
||||
s_logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup");
|
||||
}
|
||||
}
|
||||
|
||||
@DB
|
||||
protected boolean startRebalance(final long hostId) {
|
||||
HostVO host = _hostDao.findById(hostId);
|
||||
|
||||
|
|
@ -1060,7 +1059,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
synchronized (_agents) {
|
||||
ClusteredDirectAgentAttache attache = (ClusteredDirectAgentAttache)_agents.get(hostId);
|
||||
if (attache != null && attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) {
|
||||
removeAgent(attache, Status.Rebalancing);
|
||||
handleDisconnect(attache, Event.StartAgentRebalance, false);
|
||||
ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId);
|
||||
if (forwardAttache == null) {
|
||||
s_logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process");
|
||||
|
|
@ -1079,15 +1078,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
}
|
||||
}
|
||||
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
txn.start();
|
||||
|
||||
s_logger.debug("Updating host id=" + hostId + " with the status " + Status.Rebalancing);
|
||||
host.setManagementServerId(null);
|
||||
_hostDao.updateStatus(host, Event.StartAgentRebalance, _nodeId);
|
||||
_hostTransferDao.startAgentTransfer(hostId);
|
||||
txn.commit();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -1119,19 +1110,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
|
||||
@Override
|
||||
public void run() {
|
||||
boolean result = false;
|
||||
try {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Rebalancing host id=" + hostId);
|
||||
}
|
||||
result = rebalanceHost(hostId, currentOwnerId, futureOwnerId);
|
||||
rebalanceHost(hostId, currentOwnerId, futureOwnerId);
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to rebalance host id=" + hostId, e);
|
||||
|
||||
} finally {
|
||||
if (!result) {
|
||||
failRebalance(hostId);
|
||||
}
|
||||
StackMaid.current().exitCleanup();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,31 @@
|
|||
/**
|
||||
* Copyright (C) 2010 Cloud.com, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the GNU General Public License v3 or later.
|
||||
*
|
||||
* It is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or any later version.
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
package com.cloud.cluster;
|
||||
|
||||
public class ActiveFencingException extends Exception {
|
||||
private static final long serialVersionUID = -3975376101728211726L;
|
||||
|
||||
public ActiveFencingException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
public ActiveFencingException(String message, Throwable th) {
|
||||
super(message, th);
|
||||
}
|
||||
}
|
||||
|
|
@ -28,7 +28,7 @@ import com.cloud.utils.component.Manager;
|
|||
|
||||
public interface ClusterManager extends Manager {
|
||||
public static final int DEFAULT_HEARTBEAT_INTERVAL = 1500;
|
||||
public static final int DEFAULT_HEARTBEAT_THRESHOLD = 60000;
|
||||
public static final int DEFAULT_HEARTBEAT_THRESHOLD = 150000;
|
||||
public static final String ALERT_SUBJECT = "cluster-alert";
|
||||
|
||||
public Answer[] execute(String strPeer, long agentId, Command [] cmds, boolean stopOnError);
|
||||
|
|
|
|||
|
|
@ -50,8 +50,10 @@ import com.cloud.agent.api.Answer;
|
|||
import com.cloud.agent.api.ChangeAgentCommand;
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.manager.Commands;
|
||||
import com.cloud.cluster.ManagementServerHost.State;
|
||||
import com.cloud.cluster.agentlb.dao.HostTransferMapDao;
|
||||
import com.cloud.cluster.dao.ManagementServerHostDao;
|
||||
import com.cloud.cluster.dao.ManagementServerHostPeerDao;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.configuration.dao.ConfigurationDao;
|
||||
import com.cloud.exception.AgentUnavailableException;
|
||||
|
|
@ -86,14 +88,14 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1000; // 1 second
|
||||
|
||||
|
||||
private final List<ClusterManagerListener> listeners = new ArrayList<ClusterManagerListener>();
|
||||
private final Map<Long, ManagementServerHostVO> activePeers = new HashMap<Long, ManagementServerHostVO>();
|
||||
private int heartbeatInterval = ClusterManager.DEFAULT_HEARTBEAT_INTERVAL;
|
||||
private int heartbeatThreshold = ClusterManager.DEFAULT_HEARTBEAT_THRESHOLD;
|
||||
private final List<ClusterManagerListener> _listeners = new ArrayList<ClusterManagerListener>();
|
||||
private final Map<Long, ManagementServerHostVO> _activePeers = new HashMap<Long, ManagementServerHostVO>();
|
||||
private int _heartbeatInterval = ClusterManager.DEFAULT_HEARTBEAT_INTERVAL;
|
||||
private int _heartbeatThreshold = ClusterManager.DEFAULT_HEARTBEAT_THRESHOLD;
|
||||
|
||||
private final Map<String, ClusterService> clusterPeers;
|
||||
private final Map<String, Listener> asyncCalls;
|
||||
private final Gson gson;
|
||||
private final Map<String, ClusterService> _clusterPeers;
|
||||
private final Map<String, Listener> _asyncCalls;
|
||||
private final Gson _gson;
|
||||
|
||||
@Inject
|
||||
private AgentManager _agentMgr;
|
||||
|
|
@ -110,6 +112,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
private ClusterServiceAdapter _currentServiceAdapter;
|
||||
|
||||
private ManagementServerHostDao _mshostDao;
|
||||
private ManagementServerHostPeerDao _mshostPeerDao;
|
||||
private HostDao _hostDao;
|
||||
private HostTransferMapDao _hostTransferDao;
|
||||
|
||||
|
|
@ -132,10 +135,10 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
|
||||
|
||||
public ClusterManagerImpl() {
|
||||
clusterPeers = new HashMap<String, ClusterService>();
|
||||
asyncCalls = new HashMap<String, Listener>();
|
||||
_clusterPeers = new HashMap<String, ClusterService>();
|
||||
_asyncCalls = new HashMap<String, Listener>();
|
||||
|
||||
gson = GsonHelper.getGson();
|
||||
_gson = GsonHelper.getGson();
|
||||
|
||||
// executor to perform remote-calls in another thread context, to avoid potential
|
||||
// recursive remote calls between nodes
|
||||
|
|
@ -207,7 +210,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
public void broadcast(long agentId, Command[] cmds) {
|
||||
Date cutTime = DateUtil.currentGMTTime();
|
||||
|
||||
List<ManagementServerHostVO> peers = _mshostDao.getActiveList(new Date(cutTime.getTime() - heartbeatThreshold));
|
||||
List<ManagementServerHostVO> peers = _mshostDao.getActiveList(new Date(cutTime.getTime() - _heartbeatThreshold));
|
||||
for (ManagementServerHostVO peer : peers) {
|
||||
String peerName = Long.toString(peer.getMsid());
|
||||
if (getSelfPeerName().equals(peerName)) {
|
||||
|
|
@ -230,7 +233,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
|
||||
if(s_logger.isDebugEnabled()) {
|
||||
s_logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " " +
|
||||
gson.toJson(cmds, Command[].class));
|
||||
_gson.toJson(cmds, Command[].class));
|
||||
}
|
||||
|
||||
for(int i = 0; i < 2; i++) {
|
||||
|
|
@ -247,7 +250,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
}
|
||||
|
||||
long startTick = System.currentTimeMillis();
|
||||
String strResult = peerService.execute(getSelfPeerName(), agentId, gson.toJson(cmds, Command[].class), stopOnError);
|
||||
String strResult = peerService.execute(getSelfPeerName(), agentId, _gson.toJson(cmds, Command[].class), stopOnError);
|
||||
if(s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Completed " + getSelfPeerName() + " -> " + strPeer + "." + agentId + "in " +
|
||||
(System.currentTimeMillis() - startTick) + " ms, result: " + strResult);
|
||||
|
|
@ -255,7 +258,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
|
||||
if(strResult != null) {
|
||||
try {
|
||||
return gson.fromJson(strResult, Answer[].class);
|
||||
return _gson.fromJson(strResult, Answer[].class);
|
||||
} catch(Throwable e) {
|
||||
s_logger.error("Exception on parsing gson package from remote call to " + strPeer);
|
||||
}
|
||||
|
|
@ -280,7 +283,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
|
||||
if(s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Async " + getSelfPeerName() + " -> " + strPeer + "." + agentId + " " +
|
||||
gson.toJson(cmds, Command[].class));
|
||||
_gson.toJson(cmds, Command[].class));
|
||||
}
|
||||
|
||||
for(int i = 0; i < 2; i++) {
|
||||
|
|
@ -298,7 +301,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
}
|
||||
|
||||
long startTick = System.currentTimeMillis();
|
||||
seq = peerService.executeAsync(getSelfPeerName(), agentId, gson.toJson(cmds, Command[].class), stopOnError);
|
||||
seq = peerService.executeAsync(getSelfPeerName(), agentId, _gson.toJson(cmds, Command[].class), stopOnError);
|
||||
if(seq > 0) {
|
||||
if(s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Completed Async " + getSelfPeerName() + " -> " + strPeer + "." + agentId
|
||||
|
|
@ -330,7 +333,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
public boolean onAsyncResult(String executingPeer, long agentId, long seq, Answer[] answers) {
|
||||
if(s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Process Async-call result from remote peer " + executingPeer + ", {" +
|
||||
agentId + "-" + seq + "} answers: " + (answers != null ? gson.toJson(answers, Answer[].class): "null"));
|
||||
agentId + "-" + seq + "} answers: " + (answers != null ? _gson.toJson(answers, Answer[].class): "null"));
|
||||
}
|
||||
|
||||
Listener listener = null;
|
||||
|
|
@ -381,7 +384,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
public boolean forwardAnswer(String targetPeer, long agentId, long seq, Answer[] answers) {
|
||||
if(s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Forward -> " + targetPeer + " Async-call answer {" + agentId + "-" + seq +
|
||||
"} " + (answers != null? gson.toJson(answers, Answer[].class):""));
|
||||
"} " + (answers != null? _gson.toJson(answers, Answer[].class):""));
|
||||
}
|
||||
|
||||
final String targetPeerF = targetPeer;
|
||||
|
|
@ -407,7 +410,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
s_logger.debug("Start forwarding Async-call answer {" + agentId + "-" + seq + "} to remote");
|
||||
}
|
||||
|
||||
result = peerService.onAsyncResult(getSelfPeerName(), agentIdF, seqF, gson.toJson(answersF, Answer[].class));
|
||||
result = peerService.onAsyncResult(getSelfPeerName(), agentIdF, seqF, _gson.toJson(answersF, Answer[].class));
|
||||
|
||||
if(s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Completed forwarding Async-call answer {" + agentId + "-" + seq + "} in " +
|
||||
|
|
@ -460,19 +463,19 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
@Override
|
||||
public void registerListener(ClusterManagerListener listener) {
|
||||
// Note : we don't check duplicates
|
||||
synchronized (listeners) {
|
||||
synchronized (_listeners) {
|
||||
s_logger.info("register cluster listener " + listener.getClass());
|
||||
|
||||
listeners.add(listener);
|
||||
_listeners.add(listener);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unregisterListener(ClusterManagerListener listener) {
|
||||
synchronized(listeners) {
|
||||
synchronized(_listeners) {
|
||||
s_logger.info("unregister cluster listener " + listener.getClass());
|
||||
|
||||
listeners.remove(listener);
|
||||
_listeners.remove(listener);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -485,8 +488,8 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
}
|
||||
}
|
||||
|
||||
synchronized(listeners) {
|
||||
for(ClusterManagerListener listener : listeners) {
|
||||
synchronized(_listeners) {
|
||||
for(ClusterManagerListener listener : _listeners) {
|
||||
listener.onManagementNodeJoined(nodeList, _mshostId);
|
||||
}
|
||||
}
|
||||
|
|
@ -504,8 +507,8 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
}
|
||||
}
|
||||
|
||||
synchronized(listeners) {
|
||||
for(ClusterManagerListener listener : listeners) {
|
||||
synchronized(_listeners) {
|
||||
for(ClusterManagerListener listener : _listeners) {
|
||||
listener.onManagementNodeLeft(nodeList, _mshostId);
|
||||
}
|
||||
}
|
||||
|
|
@ -518,28 +521,28 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
if(s_logger.isDebugEnabled())
|
||||
s_logger.debug("Notify management server node isolation to listeners");
|
||||
|
||||
synchronized(listeners) {
|
||||
for(ClusterManagerListener listener : listeners) {
|
||||
synchronized(_listeners) {
|
||||
for(ClusterManagerListener listener : _listeners) {
|
||||
listener.onManagementNodeIsolated();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public ClusterService getPeerService(String strPeer) throws RemoteException {
|
||||
synchronized(clusterPeers) {
|
||||
if(clusterPeers.containsKey(strPeer)) {
|
||||
return clusterPeers.get(strPeer);
|
||||
synchronized(_clusterPeers) {
|
||||
if(_clusterPeers.containsKey(strPeer)) {
|
||||
return _clusterPeers.get(strPeer);
|
||||
}
|
||||
}
|
||||
|
||||
ClusterService service = _currentServiceAdapter.getPeerService(strPeer);
|
||||
|
||||
if(service != null) {
|
||||
synchronized(clusterPeers) {
|
||||
synchronized(_clusterPeers) {
|
||||
// re-check the peer map again to deal with the
|
||||
// race conditions
|
||||
if(!clusterPeers.containsKey(strPeer)) {
|
||||
clusterPeers.put(strPeer, service);
|
||||
if(!_clusterPeers.containsKey(strPeer)) {
|
||||
_clusterPeers.put(strPeer, service);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -548,9 +551,9 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
}
|
||||
|
||||
public void invalidatePeerService(String strPeer) {
|
||||
synchronized(clusterPeers) {
|
||||
if(clusterPeers.containsKey(strPeer)) {
|
||||
clusterPeers.remove(strPeer);
|
||||
synchronized(_clusterPeers) {
|
||||
if(_clusterPeers.containsKey(strPeer)) {
|
||||
_clusterPeers.remove(strPeer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -558,9 +561,9 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
private void registerAsyncCall(String strPeer, long seq, Listener listener) {
|
||||
String key = strPeer + "/" + seq;
|
||||
|
||||
synchronized(asyncCalls) {
|
||||
if(!asyncCalls.containsKey(key)) {
|
||||
asyncCalls.put(key, listener);
|
||||
synchronized(_asyncCalls) {
|
||||
if(!_asyncCalls.containsKey(key)) {
|
||||
_asyncCalls.put(key, listener);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -568,9 +571,9 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
private Listener getAsyncCallListener(String strPeer, long seq) {
|
||||
String key = strPeer + "/" + seq;
|
||||
|
||||
synchronized(asyncCalls) {
|
||||
if(asyncCalls.containsKey(key)) {
|
||||
return asyncCalls.get(key);
|
||||
synchronized(_asyncCalls) {
|
||||
if(_asyncCalls.containsKey(key)) {
|
||||
return _asyncCalls.get(key);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -580,9 +583,9 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
private void unregisterAsyncCall(String strPeer, long seq) {
|
||||
String key = strPeer + "/" + seq;
|
||||
|
||||
synchronized(asyncCalls) {
|
||||
if(asyncCalls.containsKey(key)) {
|
||||
asyncCalls.remove(key);
|
||||
synchronized(_asyncCalls) {
|
||||
if(_asyncCalls.containsKey(key)) {
|
||||
_asyncCalls.remove(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -593,43 +596,66 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
public void run() {
|
||||
Transaction txn = Transaction.open("ClusterHeartBeat");
|
||||
try {
|
||||
txn.transitToUserManagedConnection(getHeartbeatConnection());
|
||||
if(s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Cluster manager heartbeat update, id:" + _mshostId);
|
||||
}
|
||||
|
||||
_mshostDao.update(_mshostId, getCurrentRunId(), DateUtil.currentGMTTime());
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Cluster manager peer-scan, id:" + _mshostId);
|
||||
}
|
||||
|
||||
if (!_peerScanInited) {
|
||||
_peerScanInited = true;
|
||||
initPeerScan();
|
||||
}
|
||||
|
||||
peerScan();
|
||||
Profiler profiler = new Profiler();
|
||||
Profiler profilerHeartbeatUpdate = new Profiler();
|
||||
Profiler profilerPeerScan = new Profiler();
|
||||
Profiler profilerAgentLB = new Profiler();
|
||||
|
||||
//initiate agent lb task will be scheduled and executed only once, and only when number of agents loaded exceeds _connectedAgentsThreshold
|
||||
if (_agentLBEnabled && !_agentLbHappened) {
|
||||
List<HostVO> allManagedRoutingAgents = _hostDao.listManagedRoutingAgents();
|
||||
List<HostVO> allAgents = _hostDao.listAllRoutingAgents();
|
||||
double allHostsCount = allAgents.size();
|
||||
double managedHostsCount = allManagedRoutingAgents.size();
|
||||
if (allHostsCount > 0.0) {
|
||||
double load = managedHostsCount/allHostsCount;
|
||||
if (load >= _connectedAgentsThreshold) {
|
||||
s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + _connectedAgentsThreshold);
|
||||
_rebalanceService.scheduleRebalanceAgents();
|
||||
_agentLbHappened = true;
|
||||
} else {
|
||||
s_logger.trace("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " + _connectedAgentsThreshold);
|
||||
}
|
||||
}
|
||||
try {
|
||||
profiler.start();
|
||||
|
||||
profilerHeartbeatUpdate.start();
|
||||
txn.transitToUserManagedConnection(getHeartbeatConnection());
|
||||
if(s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Cluster manager heartbeat update, id:" + _mshostId);
|
||||
}
|
||||
|
||||
_mshostDao.update(_mshostId, getCurrentRunId(), DateUtil.currentGMTTime());
|
||||
profilerHeartbeatUpdate.stop();
|
||||
|
||||
profilerPeerScan.start();
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Cluster manager peer-scan, id:" + _mshostId);
|
||||
}
|
||||
|
||||
if (!_peerScanInited) {
|
||||
_peerScanInited = true;
|
||||
initPeerScan();
|
||||
}
|
||||
|
||||
peerScan();
|
||||
profilerPeerScan.stop();
|
||||
|
||||
profilerAgentLB.start();
|
||||
//initiate agent lb task will be scheduled and executed only once, and only when number of agents loaded exceeds _connectedAgentsThreshold
|
||||
if (_agentLBEnabled && !_agentLbHappened) {
|
||||
List<HostVO> allManagedRoutingAgents = _hostDao.listManagedRoutingAgents();
|
||||
List<HostVO> allAgents = _hostDao.listAllRoutingAgents();
|
||||
double allHostsCount = allAgents.size();
|
||||
double managedHostsCount = allManagedRoutingAgents.size();
|
||||
if (allHostsCount > 0.0) {
|
||||
double load = managedHostsCount/allHostsCount;
|
||||
if (load >= _connectedAgentsThreshold) {
|
||||
s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + _connectedAgentsThreshold);
|
||||
_rebalanceService.scheduleRebalanceAgents();
|
||||
_agentLbHappened = true;
|
||||
} else {
|
||||
s_logger.trace("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " + _connectedAgentsThreshold);
|
||||
}
|
||||
}
|
||||
}
|
||||
profilerAgentLB.stop();
|
||||
} finally {
|
||||
profiler.stop();
|
||||
|
||||
if(profiler.getDuration() >= _heartbeatInterval) {
|
||||
s_logger.warn("Management server heartbeat takes too long to finish. profiler: " + profiler.toString() +
|
||||
", profilerHeartbeatUpdate: " + profilerHeartbeatUpdate.toString() +
|
||||
", profilerPeerScan: " + profilerPeerScan.toString() +
|
||||
", profilerAgentLB: " + profilerAgentLB.toString());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} catch(CloudRuntimeException e) {
|
||||
s_logger.error("Runtime DB exception ", e.getCause());
|
||||
|
||||
|
|
@ -644,6 +670,8 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
}
|
||||
|
||||
invalidHeartbeatConnection();
|
||||
} catch(ActiveFencingException e) {
|
||||
queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeIsolated));
|
||||
} catch (Throwable e) {
|
||||
if(isRootCauseConnectionRelated(e.getCause())) {
|
||||
s_logger.error("DB communication problem detected");
|
||||
|
|
@ -765,6 +793,34 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
this._notificationMsgs.add(msg);
|
||||
this._notificationMsgs.notifyAll();
|
||||
}
|
||||
|
||||
switch(msg.getMessageType()) {
|
||||
case nodeAdded:
|
||||
{
|
||||
List<ManagementServerHostVO> l = msg.getNodes();
|
||||
if(l != null && l.size() > 0) {
|
||||
for(ManagementServerHostVO mshost: l) {
|
||||
_mshostPeerDao.updatePeerInfo(_mshostId, mshost.getId(), mshost.getRunid(), ManagementServerHost.State.Up);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case nodeRemoved:
|
||||
{
|
||||
List<ManagementServerHostVO> l = msg.getNodes();
|
||||
if(l != null && l.size() > 0) {
|
||||
for(ManagementServerHostVO mshost: l) {
|
||||
_mshostPeerDao.updatePeerInfo(_mshostId, mshost.getId(), mshost.getRunid(), ManagementServerHost.State.Down);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default :
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private ClusterManagerMessage getNextNotificationMessage() {
|
||||
|
|
@ -781,7 +837,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
// upon startup, for all inactive management server nodes that we see at startup time, we will send notification also to help upper layer perform
|
||||
// missed cleanup
|
||||
Date cutTime = DateUtil.currentGMTTime();
|
||||
List<ManagementServerHostVO> inactiveList = _mshostDao.getInactiveList(new Date(cutTime.getTime() - heartbeatThreshold));
|
||||
List<ManagementServerHostVO> inactiveList = _mshostDao.getInactiveList(new Date(cutTime.getTime() - _heartbeatThreshold));
|
||||
|
||||
// We don't have foreign key constraints to enforce the mgmt_server_id integrity in host table, when user manually
|
||||
// remove records from mshost table, this will leave orphan mgmt_serve_id reference in host table.
|
||||
|
|
@ -809,17 +865,24 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
}
|
||||
}
|
||||
|
||||
private void peerScan() {
|
||||
private void peerScan() throws ActiveFencingException {
|
||||
Date cutTime = DateUtil.currentGMTTime();
|
||||
|
||||
List<ManagementServerHostVO> currentList = _mshostDao.getActiveList(new Date(cutTime.getTime() - heartbeatThreshold));
|
||||
List<ManagementServerHostVO> currentList = _mshostDao.getActiveList(new Date(cutTime.getTime() - _heartbeatThreshold));
|
||||
|
||||
List<ManagementServerHostVO> removedNodeList = new ArrayList<ManagementServerHostVO>();
|
||||
List<ManagementServerHostVO> invalidatedNodeList = new ArrayList<ManagementServerHostVO>();
|
||||
|
||||
if(_mshostId != null) {
|
||||
|
||||
if(_mshostPeerDao.countStateSeenInPeers(_mshostId, _runId, ManagementServerHost.State.Down) > 0) {
|
||||
String msg = "We have detected that at least one management server peer reports that this management server is down, perform active fencing to avoid split-brain situation";
|
||||
s_logger.error(msg);
|
||||
throw new ActiveFencingException(msg);
|
||||
}
|
||||
|
||||
// only if we have already attached to cluster, will we start to check leaving nodes
|
||||
for(Map.Entry<Long, ManagementServerHostVO> entry : activePeers.entrySet()) {
|
||||
for(Map.Entry<Long, ManagementServerHostVO> entry : _activePeers.entrySet()) {
|
||||
|
||||
ManagementServerHostVO current = getInListById(entry.getKey(), currentList);
|
||||
if(current == null) {
|
||||
|
|
@ -853,7 +916,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
// process invalidated node list
|
||||
if(invalidatedNodeList.size() > 0) {
|
||||
for(ManagementServerHostVO mshost : invalidatedNodeList) {
|
||||
activePeers.remove(mshost.getId());
|
||||
_activePeers.remove(mshost.getId());
|
||||
try {
|
||||
JmxUtil.unregisterMBean("ClusterManager", "Node " + mshost.getId());
|
||||
} catch(Exception e) {
|
||||
|
|
@ -870,7 +933,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
ManagementServerHostVO mshost = it.next();
|
||||
if(!pingManagementNode(mshost)) {
|
||||
s_logger.warn("Management node " + mshost.getId() + " is detected inactive by timestamp and also not pingable");
|
||||
activePeers.remove(mshost.getId());
|
||||
_activePeers.remove(mshost.getId());
|
||||
try {
|
||||
JmxUtil.unregisterMBean("ClusterManager", "Node " + mshost.getId());
|
||||
} catch(Exception e) {
|
||||
|
|
@ -888,8 +951,8 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
|
||||
List<ManagementServerHostVO> newNodeList = new ArrayList<ManagementServerHostVO>();
|
||||
for(ManagementServerHostVO mshost : currentList) {
|
||||
if(!activePeers.containsKey(mshost.getId())) {
|
||||
activePeers.put(mshost.getId(), mshost);
|
||||
if(!_activePeers.containsKey(mshost.getId())) {
|
||||
_activePeers.put(mshost.getId(), mshost);
|
||||
|
||||
if(s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Detected management node joined, id:" + mshost.getId() + ", nodeIP:" + mshost.getServiceIP());
|
||||
|
|
@ -968,9 +1031,11 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
if (s_logger.isInfoEnabled()) {
|
||||
s_logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort());
|
||||
}
|
||||
|
||||
_mshostPeerDao.clearPeerInfo(_mshostId);
|
||||
|
||||
// use seperate thread for heartbeat updates
|
||||
_heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), heartbeatInterval, heartbeatInterval, TimeUnit.MILLISECONDS);
|
||||
_heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), _heartbeatInterval, _heartbeatInterval, TimeUnit.MILLISECONDS);
|
||||
_notificationExecutor.submit(getNotificationTask());
|
||||
|
||||
} catch (Throwable e) {
|
||||
|
|
@ -1028,7 +1093,12 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
if (_mshostDao == null) {
|
||||
throw new ConfigurationException("Unable to get " + ManagementServerHostDao.class.getName());
|
||||
}
|
||||
|
||||
|
||||
_mshostPeerDao = locator.getDao(ManagementServerHostPeerDao.class);
|
||||
if (_mshostPeerDao == null) {
|
||||
throw new ConfigurationException("Unable to get " + ManagementServerHostPeerDao.class.getName());
|
||||
}
|
||||
|
||||
_hostDao = locator.getDao(HostDao.class);
|
||||
if (_hostDao == null) {
|
||||
throw new ConfigurationException("Unable to get " + HostDao.class.getName());
|
||||
|
|
@ -1048,12 +1118,12 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
|
||||
String value = configs.get("cluster.heartbeat.interval");
|
||||
if (value != null) {
|
||||
heartbeatInterval = NumbersUtil.parseInt(value, ClusterManager.DEFAULT_HEARTBEAT_INTERVAL);
|
||||
_heartbeatInterval = NumbersUtil.parseInt(value, ClusterManager.DEFAULT_HEARTBEAT_INTERVAL);
|
||||
}
|
||||
|
||||
value = configs.get("cluster.heartbeat.threshold");
|
||||
if (value != null) {
|
||||
heartbeatThreshold = NumbersUtil.parseInt(value, ClusterManager.DEFAULT_HEARTBEAT_THRESHOLD);
|
||||
_heartbeatThreshold = NumbersUtil.parseInt(value, ClusterManager.DEFAULT_HEARTBEAT_THRESHOLD);
|
||||
}
|
||||
|
||||
File dbPropsFile = PropertiesUtil.findConfigFile("db.properties");
|
||||
|
|
@ -1125,7 +1195,7 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
public boolean isManagementNodeAlive(long msid) {
|
||||
ManagementServerHostVO mshost = _mshostDao.findByMsid(msid);
|
||||
if(mshost != null) {
|
||||
if(mshost.getLastUpdateTime().getTime() >= DateUtil.currentGMTTime().getTime() - heartbeatThreshold) {
|
||||
if(mshost.getLastUpdateTime().getTime() >= DateUtil.currentGMTTime().getTime() - _heartbeatThreshold) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -1178,20 +1248,20 @@ public class ClusterManagerImpl implements ClusterManager {
|
|||
|
||||
@Override
|
||||
public int getHeartbeatThreshold() {
|
||||
return this.heartbeatThreshold;
|
||||
return this._heartbeatThreshold;
|
||||
}
|
||||
|
||||
public int getHeartbeatInterval() {
|
||||
return this.heartbeatInterval;
|
||||
return this._heartbeatInterval;
|
||||
}
|
||||
|
||||
public void setHeartbeatThreshold(int threshold) {
|
||||
heartbeatThreshold = threshold;
|
||||
_heartbeatThreshold = threshold;
|
||||
}
|
||||
|
||||
private void checkConflicts() throws ConfigurationException {
|
||||
Date cutTime = DateUtil.currentGMTTime();
|
||||
List<ManagementServerHostVO> peers = _mshostDao.getActiveList(new Date(cutTime.getTime() - heartbeatThreshold));
|
||||
List<ManagementServerHostVO> peers = _mshostDao.getActiveList(new Date(cutTime.getTime() - _heartbeatThreshold));
|
||||
for(ManagementServerHostVO peer : peers) {
|
||||
String peerIP = peer.getServiceIP().trim();
|
||||
if(_clusterNodeIP.equals(peerIP)) {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,121 @@
|
|||
/**
|
||||
* Copyright (C) 2010 Cloud.com, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the GNU General Public License v3 or later.
|
||||
*
|
||||
* It is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or any later version.
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
package com.cloud.cluster;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
import javax.persistence.Column;
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.EnumType;
|
||||
import javax.persistence.Enumerated;
|
||||
import javax.persistence.GeneratedValue;
|
||||
import javax.persistence.GenerationType;
|
||||
import javax.persistence.Id;
|
||||
import javax.persistence.Table;
|
||||
import javax.persistence.Temporal;
|
||||
import javax.persistence.TemporalType;
|
||||
|
||||
import com.cloud.utils.DateUtil;
|
||||
|
||||
@Entity
|
||||
@Table(name="mshost_peer")
|
||||
public class ManagementServerHostPeerVO {
|
||||
|
||||
@Id
|
||||
@GeneratedValue(strategy=GenerationType.IDENTITY)
|
||||
@Column(name="id")
|
||||
private long id;
|
||||
|
||||
@Column(name="owner_mshost", updatable=true, nullable=false)
|
||||
private long ownerMshost;
|
||||
|
||||
@Column(name="peer_mshost", updatable=true, nullable=false)
|
||||
private long peerMshost;
|
||||
|
||||
@Column(name="peer_runid", updatable=true, nullable=false)
|
||||
private long peerRunid;
|
||||
|
||||
@Column(name="peer_state", updatable = true, nullable=false)
|
||||
@Enumerated(value=EnumType.STRING)
|
||||
private ManagementServerHost.State peerState;
|
||||
|
||||
@Temporal(TemporalType.TIMESTAMP)
|
||||
@Column(name="last_update", updatable=true, nullable=true)
|
||||
private Date lastUpdateTime;
|
||||
|
||||
public ManagementServerHostPeerVO() {
|
||||
}
|
||||
|
||||
public ManagementServerHostPeerVO(long ownerMshost, long peerMshost, long peerRunid, ManagementServerHost.State peerState) {
|
||||
this.ownerMshost = ownerMshost;
|
||||
this.peerMshost = peerMshost;
|
||||
this.peerRunid = peerRunid;
|
||||
this.peerState = peerState;
|
||||
|
||||
this.lastUpdateTime = DateUtil.currentGMTTime();
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(long id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public long getOwnerMshost() {
|
||||
return ownerMshost;
|
||||
}
|
||||
|
||||
public void setOwnerMshost(long ownerMshost) {
|
||||
this.ownerMshost = ownerMshost;
|
||||
}
|
||||
|
||||
public long getPeerMshost() {
|
||||
return peerMshost;
|
||||
}
|
||||
|
||||
public void setPeerMshost(long peerMshost) {
|
||||
this.peerMshost = peerMshost;
|
||||
}
|
||||
|
||||
public long getPeerRunid() {
|
||||
return peerRunid;
|
||||
}
|
||||
|
||||
public void setPeerRunid(long peerRunid) {
|
||||
this.peerRunid = peerRunid;
|
||||
}
|
||||
|
||||
public ManagementServerHost.State getPeerState() {
|
||||
return peerState;
|
||||
}
|
||||
|
||||
public void setPeerState(ManagementServerHost.State peerState) {
|
||||
this.peerState = peerState;
|
||||
}
|
||||
|
||||
public Date getLastUpdateTime() {
|
||||
return lastUpdateTime;
|
||||
}
|
||||
|
||||
public void setLastUpdateTime(Date lastUpdateTime) {
|
||||
this.lastUpdateTime = lastUpdateTime;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
/**
|
||||
* Copyright (C) 2010 Cloud.com, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the GNU General Public License v3 or later.
|
||||
*
|
||||
* It is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or any later version.
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
package com.cloud.cluster.dao;
|
||||
|
||||
import com.cloud.cluster.ManagementServerHost;
|
||||
import com.cloud.cluster.ManagementServerHostPeerVO;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
|
||||
public interface ManagementServerHostPeerDao extends GenericDao<ManagementServerHostPeerVO, Long> {
|
||||
void clearPeerInfo(long ownerMshost);
|
||||
void updatePeerInfo(long ownerMshost, long peerMshost, long peerRunid, ManagementServerHost.State peerState);
|
||||
int countStateSeenInPeers(long mshost, long runid, ManagementServerHost.State state);
|
||||
}
|
||||
|
|
@ -0,0 +1,108 @@
|
|||
/**
|
||||
* Copyright (C) 2010 Cloud.com, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the GNU General Public License v3 or later.
|
||||
*
|
||||
* It is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or any later version.
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
package com.cloud.cluster.dao;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import javax.ejb.Local;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.cluster.ManagementServerHost;
|
||||
import com.cloud.cluster.ManagementServerHostPeerVO;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
|
||||
@Local(value={ManagementServerHostPeerDao.class})
|
||||
public class ManagementServerHostPeerDaoImpl extends GenericDaoBase<ManagementServerHostPeerVO, Long> implements ManagementServerHostPeerDao {
|
||||
private static final Logger s_logger = Logger.getLogger(ManagementServerHostPeerDaoImpl.class);
|
||||
|
||||
private final SearchBuilder<ManagementServerHostPeerVO> ClearPeerSearch;
|
||||
private final SearchBuilder<ManagementServerHostPeerVO> FindForUpdateSearch;
|
||||
private final SearchBuilder<ManagementServerHostPeerVO> CountSearch;
|
||||
|
||||
public ManagementServerHostPeerDaoImpl() {
|
||||
ClearPeerSearch = createSearchBuilder();
|
||||
ClearPeerSearch.and("ownerMshost", ClearPeerSearch.entity().getOwnerMshost(), SearchCriteria.Op.EQ);
|
||||
ClearPeerSearch.done();
|
||||
|
||||
FindForUpdateSearch = createSearchBuilder();
|
||||
FindForUpdateSearch.and("ownerMshost", FindForUpdateSearch.entity().getOwnerMshost(), SearchCriteria.Op.EQ);
|
||||
FindForUpdateSearch.and("peerMshost", FindForUpdateSearch.entity().getPeerMshost(), SearchCriteria.Op.EQ);
|
||||
FindForUpdateSearch.and("peerRunid", FindForUpdateSearch.entity().getPeerRunid(), SearchCriteria.Op.EQ);
|
||||
FindForUpdateSearch.done();
|
||||
|
||||
CountSearch = createSearchBuilder();
|
||||
CountSearch.and("peerMshost", CountSearch.entity().getPeerMshost(), SearchCriteria.Op.EQ);
|
||||
CountSearch.and("peerRunid", CountSearch.entity().getPeerRunid(), SearchCriteria.Op.EQ);
|
||||
CountSearch.and("peerState", CountSearch.entity().getPeerState(), SearchCriteria.Op.EQ);
|
||||
CountSearch.done();
|
||||
}
|
||||
|
||||
@Override
|
||||
@DB
|
||||
public void clearPeerInfo(long ownerMshost) {
|
||||
SearchCriteria<ManagementServerHostPeerVO> sc = ClearPeerSearch.create();
|
||||
sc.setParameters("ownerMshost", ownerMshost);
|
||||
|
||||
expunge(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
@DB
|
||||
public void updatePeerInfo(long ownerMshost, long peerMshost, long peerRunid, ManagementServerHost.State peerState) {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
try {
|
||||
txn.start();
|
||||
|
||||
SearchCriteria<ManagementServerHostPeerVO> sc = FindForUpdateSearch.create();
|
||||
sc.setParameters("ownerMshost", ownerMshost);
|
||||
sc.setParameters("peerMshost", peerMshost);
|
||||
sc.setParameters("peerRunid", peerRunid);
|
||||
List<ManagementServerHostPeerVO> l = listBy(sc);
|
||||
if(l.size() == 1) {
|
||||
ManagementServerHostPeerVO peer = l.get(0);
|
||||
peer.setPeerState(peerState);
|
||||
update(peer.getId(), peer);
|
||||
} else {
|
||||
ManagementServerHostPeerVO peer = new ManagementServerHostPeerVO(ownerMshost, peerMshost, peerRunid, peerState);
|
||||
persist(peer);
|
||||
}
|
||||
txn.commit();
|
||||
} catch(Exception e) {
|
||||
s_logger.warn("Unexpected exception, ", e);
|
||||
txn.rollback();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@DB
|
||||
public int countStateSeenInPeers(long mshost, long runid, ManagementServerHost.State state) {
|
||||
SearchCriteria<ManagementServerHostPeerVO> sc = CountSearch.create();
|
||||
sc.setParameters("peerMshost", mshost);
|
||||
sc.setParameters("peerRunid", runid);
|
||||
sc.setParameters("peerState", state);
|
||||
|
||||
List<ManagementServerHostPeerVO> l = listBy(sc);
|
||||
return l.size();
|
||||
}
|
||||
}
|
||||
|
|
@ -18,7 +18,6 @@
|
|||
package com.cloud.configuration;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
|
@ -38,9 +37,9 @@ import com.cloud.certificate.dao.CertificateDaoImpl;
|
|||
import com.cloud.cluster.CheckPointManagerImpl;
|
||||
import com.cloud.cluster.ClusterFenceManagerImpl;
|
||||
import com.cloud.cluster.ClusterManagerImpl;
|
||||
import com.cloud.cluster.ManagementServerNode;
|
||||
import com.cloud.cluster.agentlb.dao.HostTransferMapDaoImpl;
|
||||
import com.cloud.cluster.dao.ManagementServerHostDaoImpl;
|
||||
import com.cloud.cluster.dao.ManagementServerHostPeerDaoImpl;
|
||||
import com.cloud.cluster.dao.StackMaidDaoImpl;
|
||||
import com.cloud.configuration.dao.ConfigurationDaoImpl;
|
||||
import com.cloud.configuration.dao.ResourceCountDaoImpl;
|
||||
|
|
@ -73,8 +72,8 @@ import com.cloud.maint.dao.AgentUpgradeDaoImpl;
|
|||
import com.cloud.network.NetworkManagerImpl;
|
||||
import com.cloud.network.dao.FirewallRulesCidrsDaoImpl;
|
||||
import com.cloud.network.dao.FirewallRulesDaoImpl;
|
||||
import com.cloud.network.dao.InlineLoadBalancerNicMapDaoImpl;
|
||||
import com.cloud.network.dao.IPAddressDaoImpl;
|
||||
import com.cloud.network.dao.InlineLoadBalancerNicMapDaoImpl;
|
||||
import com.cloud.network.dao.LoadBalancerDaoImpl;
|
||||
import com.cloud.network.dao.LoadBalancerVMMapDaoImpl;
|
||||
import com.cloud.network.dao.NetworkDaoImpl;
|
||||
|
|
@ -98,7 +97,6 @@ import com.cloud.network.ovs.dao.VmFlowLogDaoImpl;
|
|||
import com.cloud.network.router.VirtualNetworkApplianceManagerImpl;
|
||||
import com.cloud.network.rules.RulesManagerImpl;
|
||||
import com.cloud.network.rules.dao.PortForwardingRulesDaoImpl;
|
||||
import com.cloud.network.security.SecurityGroupManagerImpl;
|
||||
import com.cloud.network.security.SecurityGroupManagerImpl2;
|
||||
import com.cloud.network.security.dao.IngressRuleDaoImpl;
|
||||
import com.cloud.network.security.dao.SecurityGroupDaoImpl;
|
||||
|
|
@ -138,7 +136,6 @@ import com.cloud.template.HyervisorTemplateAdapter;
|
|||
import com.cloud.template.TemplateAdapter;
|
||||
import com.cloud.template.TemplateAdapter.TemplateAdapterType;
|
||||
import com.cloud.template.TemplateManagerImpl;
|
||||
import com.cloud.upgrade.DatabaseUpgradeChecker;
|
||||
import com.cloud.user.AccountManagerImpl;
|
||||
import com.cloud.user.dao.AccountDaoImpl;
|
||||
import com.cloud.user.dao.SSHKeyPairDaoImpl;
|
||||
|
|
@ -150,7 +147,6 @@ import com.cloud.utils.component.ComponentLibrary;
|
|||
import com.cloud.utils.component.ComponentLibraryBase;
|
||||
import com.cloud.utils.component.ComponentLocator.ComponentInfo;
|
||||
import com.cloud.utils.component.Manager;
|
||||
import com.cloud.utils.component.SystemIntegrityChecker;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
import com.cloud.vm.ClusteredVirtualMachineManagerImpl;
|
||||
import com.cloud.vm.ItWorkDaoImpl;
|
||||
|
|
@ -228,6 +224,7 @@ public class DefaultComponentLibrary extends ComponentLibraryBase implements Com
|
|||
addDao("ConsoleProxyDao", ConsoleProxyDaoImpl.class);
|
||||
addDao("SecondaryStorageVmDao", SecondaryStorageVmDaoImpl.class);
|
||||
addDao("ManagementServerHostDao", ManagementServerHostDaoImpl.class);
|
||||
addDao("ManagementServerHostPeerDao", ManagementServerHostPeerDaoImpl.class);
|
||||
addDao("AgentUpgradeDao", AgentUpgradeDaoImpl.class);
|
||||
addDao("SnapshotDao", SnapshotDaoImpl.class);
|
||||
addDao("AsyncJobDao", AsyncJobDaoImpl.class);
|
||||
|
|
@ -286,8 +283,13 @@ public class DefaultComponentLibrary extends ComponentLibraryBase implements Com
|
|||
|
||||
protected void populateManagers() {
|
||||
addManager("StackMaidManager", CheckPointManagerImpl.class);
|
||||
addManager("account manager", AccountManagerImpl.class);
|
||||
addManager("Cluster Manager", ClusterManagerImpl.class);
|
||||
addManager("ClusteredAgentManager", ClusteredAgentManagerImpl.class);
|
||||
addManager("SyncQueueManager", SyncQueueManagerImpl.class);
|
||||
addManager("AsyncJobManager", AsyncJobManagerImpl.class);
|
||||
addManager("AsyncJobExecutorContext", AsyncJobExecutorContextImpl.class);
|
||||
addManager("configuration manager", ConfigurationManagerImpl.class);
|
||||
addManager("account manager", AccountManagerImpl.class);
|
||||
addManager("network manager", NetworkManagerImpl.class);
|
||||
addManager("download manager", DownloadMonitorImpl.class);
|
||||
addManager("upload manager", UploadMonitorImpl.class);
|
||||
|
|
@ -296,9 +298,6 @@ public class DefaultComponentLibrary extends ComponentLibraryBase implements Com
|
|||
addManager("vm manager", UserVmManagerImpl.class);
|
||||
addManager("upgrade manager", UpgradeManagerImpl.class);
|
||||
addManager("StorageManager", StorageManagerImpl.class);
|
||||
addManager("SyncQueueManager", SyncQueueManagerImpl.class);
|
||||
addManager("AsyncJobManager", AsyncJobManagerImpl.class);
|
||||
addManager("AsyncJobExecutorContext", AsyncJobExecutorContextImpl.class);
|
||||
addManager("HA Manager", HighAvailabilityManagerImpl.class);
|
||||
addManager("Alert Manager", AlertManagerImpl.class);
|
||||
addManager("Template Manager", TemplateManagerImpl.class);
|
||||
|
|
@ -313,7 +312,6 @@ public class DefaultComponentLibrary extends ComponentLibraryBase implements Com
|
|||
addManager("OvsNetworkManager", OvsNetworkManagerImpl.class);
|
||||
addManager("OvsTunnelManager", OvsTunnelManagerImpl.class);
|
||||
addManager("Capacity Manager", CapacityManagerImpl.class);
|
||||
addManager("Cluster Manager", ClusterManagerImpl.class);
|
||||
addManager("VirtualMachineManager", ClusteredVirtualMachineManagerImpl.class);
|
||||
addManager("HypervisorGuruManager", HypervisorGuruManagerImpl.class);
|
||||
addManager("ClusterFenceManager", ClusterFenceManagerImpl.class);
|
||||
|
|
@ -322,7 +320,6 @@ public class DefaultComponentLibrary extends ComponentLibraryBase implements Com
|
|||
addManager("OCFS2Manager", OCFS2ManagerImpl.class);
|
||||
ComponentInfo<? extends Manager> info = addManager("ConsoleProxyManager", ConsoleProxyManagerImpl.class);
|
||||
info.addParameter("consoleproxy.sslEnabled", "true");
|
||||
addManager("ClusteredAgentManager", ClusteredAgentManagerImpl.class);
|
||||
addManager("ElasticLoadBalancerManager", ElasticLoadBalancerManagerImpl.class);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -167,6 +167,12 @@ public class KvmServerDiscoverer extends DiscovererBase implements Discoverer,
|
|||
return null;
|
||||
}
|
||||
|
||||
// place a place holder guid derived from cluster ID
|
||||
if (cluster.getGuid() == null) {
|
||||
cluster.setGuid(UUID.nameUUIDFromBytes(String.valueOf(clusterId).getBytes()).toString());
|
||||
_clusterDao.update(clusterId, cluster);
|
||||
}
|
||||
|
||||
String parameters = " -m " + _hostIp + " -z " + dcId + " -p " + podId + " -c " + clusterId + " -g " + guid + " -a";
|
||||
|
||||
if (_kvmPublicNic != null) {
|
||||
|
|
@ -199,12 +205,6 @@ public class KvmServerDiscoverer extends DiscovererBase implements Discoverer,
|
|||
return null;
|
||||
|
||||
details.put("guid", guidWithTail);
|
||||
|
||||
// place a place holder guid derived from cluster ID
|
||||
if (cluster.getGuid() == null) {
|
||||
cluster.setGuid(UUID.nameUUIDFromBytes(String.valueOf(clusterId).getBytes()).toString());
|
||||
_clusterDao.update(clusterId, cluster);
|
||||
}
|
||||
return resources;
|
||||
} catch (DiscoveredWithErrorException e){
|
||||
throw e;
|
||||
|
|
|
|||
|
|
@ -63,6 +63,7 @@ import com.cloud.hypervisor.xen.resource.XcpServerResource;
|
|||
import com.cloud.hypervisor.xen.resource.XenServer56FP1Resource;
|
||||
import com.cloud.hypervisor.xen.resource.XenServer56Resource;
|
||||
import com.cloud.hypervisor.xen.resource.XenServer56SP2Resource;
|
||||
import com.cloud.hypervisor.xen.resource.XenServer600Resource;
|
||||
import com.cloud.hypervisor.xen.resource.XenServerConnectionPool;
|
||||
import com.cloud.resource.Discoverer;
|
||||
import com.cloud.resource.DiscovererBase;
|
||||
|
|
@ -393,6 +394,9 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
|||
if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.0"))
|
||||
return new XenServer56Resource();
|
||||
|
||||
if(prodBrand.equals("XenServer") && prodVersion.equals("6.0.0"))
|
||||
return new XenServer600Resource();
|
||||
|
||||
if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.100")) {
|
||||
String prodVersionTextShort = record.softwareVersion.get("product_version_text_short").trim();
|
||||
if("5.6 SP2".equals(prodVersionTextShort)) {
|
||||
|
|
@ -533,6 +537,8 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
|||
resource = XcpServerResource.class.getName();
|
||||
} else if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.0")) {
|
||||
resource = XenServer56Resource.class.getName();
|
||||
} else if(prodBrand.equals("XenServer") && prodVersion.equals("6.0.0")) {
|
||||
resource = XenServer600Resource.class.getName();
|
||||
} else if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.100")) {
|
||||
String prodVersionTextShort = details.get("product_version_text_short").trim();
|
||||
if("5.6 SP2".equals(prodVersionTextShort)) {
|
||||
|
|
|
|||
|
|
@ -23,14 +23,18 @@ import java.util.Map;
|
|||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.Listener;
|
||||
import com.cloud.agent.api.AgentControlAnswer;
|
||||
import com.cloud.agent.api.AgentControlCommand;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.ModifySshKeysCommand;
|
||||
import com.cloud.agent.api.StartupCommand;
|
||||
import com.cloud.agent.api.StartupRoutingCommand;
|
||||
import com.cloud.agent.manager.Commands;
|
||||
import com.cloud.configuration.dao.ConfigurationDao;
|
||||
import com.cloud.exception.AgentUnavailableException;
|
||||
import com.cloud.exception.ConnectionException;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.Status;
|
||||
|
|
@ -42,11 +46,11 @@ import com.cloud.network.router.VirtualNetworkApplianceManager;
|
|||
|
||||
public class SshKeysDistriMonitor implements Listener {
|
||||
private static final Logger s_logger = Logger.getLogger(SshKeysDistriMonitor.class);
|
||||
private final VirtualNetworkApplianceManager _routerMgr;
|
||||
AgentManager _agentMgr;
|
||||
private final HostDao _hostDao;
|
||||
private ConfigurationDao _configDao;
|
||||
public SshKeysDistriMonitor(VirtualNetworkApplianceManager mgr, HostDao host, ConfigurationDao config) {
|
||||
this._routerMgr = mgr;
|
||||
public SshKeysDistriMonitor(AgentManager mgr, HostDao host, ConfigurationDao config) {
|
||||
this._agentMgr = mgr;
|
||||
_hostDao = host;
|
||||
_configDao = config;
|
||||
}
|
||||
|
|
@ -81,9 +85,13 @@ public class SshKeysDistriMonitor implements Listener {
|
|||
Map<String, String> configs = _configDao.getConfiguration("management-server", new HashMap<String, Object>());
|
||||
String pubKey = configs.get("ssh.publickey");
|
||||
String prvKey = configs.get("ssh.privatekey");
|
||||
if (!_routerMgr.sendSshKeysToHost(host.getId(), pubKey, prvKey)) {
|
||||
|
||||
try {
|
||||
ModifySshKeysCommand cmds = new ModifySshKeysCommand(pubKey, prvKey);
|
||||
Commands c = new Commands(cmds);
|
||||
_agentMgr.send(host.getId(), c, this);
|
||||
} catch (AgentUnavailableException e) {
|
||||
s_logger.debug("Failed to send keys to agent: " + host.getId());
|
||||
throw new ConnectionException(true, "Unable to send keys to the agent");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -627,7 +627,7 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian
|
|||
throw new ConfigurationException("Unable to get " + UserStatisticsDao.class.getName());
|
||||
}
|
||||
|
||||
_agentMgr.registerForHostEvents(new SshKeysDistriMonitor(this, _hostDao, _configDao), true, false, false);
|
||||
_agentMgr.registerForHostEvents(new SshKeysDistriMonitor(_agentMgr, _hostDao, _configDao), true, false, false);
|
||||
_itMgr.registerGuru(VirtualMachine.Type.DomainRouter, this);
|
||||
|
||||
boolean useLocalStorage = Boolean.parseBoolean(configs.get(Config.SystemVMUseLocalStorage.key()));
|
||||
|
|
|
|||
|
|
@ -1054,7 +1054,7 @@ public class SecurityGroupManagerImpl implements SecurityGroupManager, SecurityG
|
|||
Filter searchFilter = new Filter(SecurityGroupVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal());
|
||||
Object keyword = cmd.getKeyword();
|
||||
|
||||
SearchBuilder<SecurityGroupVO> sb = _securityGroupDao.createSearchBuilder();
|
||||
SearchBuilder<SecurityGroupRulesVO> sb = _securityGroupRulesDao.createSearchBuilder();
|
||||
sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ);
|
||||
sb.and("accountId", sb.entity().getAccountId(), SearchCriteria.Op.EQ);
|
||||
sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ);
|
||||
|
|
@ -1067,7 +1067,7 @@ public class SecurityGroupManagerImpl implements SecurityGroupManager, SecurityG
|
|||
sb.join("domainSearch", domainSearch, sb.entity().getDomainId(), domainSearch.entity().getId(), JoinBuilder.JoinType.INNER);
|
||||
}
|
||||
|
||||
SearchCriteria<SecurityGroupVO> sc = sb.create();
|
||||
SearchCriteria<SecurityGroupRulesVO> sc = sb.create();
|
||||
|
||||
if (id != null) {
|
||||
sc.setParameters("id", id);
|
||||
|
|
@ -1094,11 +1094,8 @@ public class SecurityGroupManagerImpl implements SecurityGroupManager, SecurityG
|
|||
sc.addAnd("name", SearchCriteria.Op.SC, ssc);
|
||||
}
|
||||
|
||||
List<SecurityGroupVO> securityGroups = _securityGroupDao.search(sc, searchFilter);
|
||||
for (SecurityGroupVO group : securityGroups) {
|
||||
securityRulesList.addAll(_securityGroupRulesDao.listSecurityRulesByGroupId(group.getId()));
|
||||
}
|
||||
|
||||
securityRulesList = _securityGroupRulesDao.search(sc, searchFilter);
|
||||
|
||||
return securityRulesList;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -386,7 +386,7 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag
|
|||
List<StoragePoolHostVO> storagePoolHostRefs = _storagePoolHostDao.listByHostId(host.getId());
|
||||
for (StoragePoolHostVO storagePoolHostRef : storagePoolHostRefs) {
|
||||
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolHostRef.getPoolId());
|
||||
if (storagePool.getPoolType() == StoragePoolType.LVM) {
|
||||
if (storagePool.getPoolType() == StoragePoolType.LVM || storagePool.getPoolType() == StoragePoolType.EXT) {
|
||||
SearchBuilder<VolumeVO> volumeSB = _volsDao.createSearchBuilder();
|
||||
volumeSB.and("poolId", volumeSB.entity().getPoolId(), SearchCriteria.Op.EQ);
|
||||
volumeSB.and("removed", volumeSB.entity().getRemoved(), SearchCriteria.Op.NULL);
|
||||
|
|
@ -1335,7 +1335,7 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag
|
|||
throw new InvalidParameterValueException("Unable to find pool by id " + id);
|
||||
}
|
||||
|
||||
if (sPool.getPoolType().equals(StoragePoolType.LVM)) {
|
||||
if (sPool.getPoolType().equals(StoragePoolType.LVM) || sPool.getPoolType().equals(StoragePoolType.EXT)) {
|
||||
s_logger.warn("Unable to delete local storage id:" + id);
|
||||
throw new InvalidParameterValueException("Unable to delete local storage id: " + id);
|
||||
}
|
||||
|
|
@ -3082,7 +3082,7 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag
|
|||
@DB
|
||||
public StoragePoolVO findLocalStorageOnHost(long hostId) {
|
||||
SearchCriteria<StoragePoolVO> sc = LocalStorageSearch.create();
|
||||
sc.setParameters("type", new Object[]{StoragePoolType.Filesystem, StoragePoolType.LVM});
|
||||
sc.setParameters("type", new Object[]{StoragePoolType.Filesystem, StoragePoolType.LVM, StoragePoolType.EXT});
|
||||
sc.setJoinParameters("poolHost", "hostId", hostId);
|
||||
List<StoragePoolVO> storagePools = _storagePoolDao.search(sc, null);
|
||||
if (!storagePools.isEmpty()) {
|
||||
|
|
|
|||
|
|
@ -92,8 +92,6 @@ public interface StoragePoolDao extends GenericDao<StoragePoolVO, Long> {
|
|||
|
||||
List<StoragePoolVO> listPoolByHostPath(String host, String path);
|
||||
|
||||
void deleteStoragePoolRecords(ArrayList<Long> ids);
|
||||
|
||||
void updateDetails(long poolId, Map<String, String> details);
|
||||
|
||||
Map<String, String> getDetails(long poolId);
|
||||
|
|
|
|||
|
|
@ -238,17 +238,6 @@ public class StoragePoolDaoImpl extends GenericDaoBase<StoragePoolVO, Long> imp
|
|||
return pool;
|
||||
}
|
||||
|
||||
@DB
|
||||
@Override
|
||||
public void deleteStoragePoolRecords(ArrayList<Long> ids)
|
||||
{
|
||||
SearchCriteria<StoragePoolVO> sc = DeleteLvmSearch.create();
|
||||
sc.setParameters("ids", ids.toArray());
|
||||
sc.setParameters("LVM", StoragePoolType.LVM);
|
||||
sc.setParameters("Filesystem", StoragePoolType.Filesystem);
|
||||
remove(sc);
|
||||
}
|
||||
|
||||
@DB
|
||||
@Override
|
||||
public List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details) {
|
||||
|
|
|
|||
|
|
@ -19,11 +19,14 @@ package com.cloud.vm;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
|
|
@ -104,6 +107,7 @@ import com.cloud.network.NetworkManager;
|
|||
import com.cloud.network.NetworkVO;
|
||||
import com.cloud.offering.ServiceOffering;
|
||||
import com.cloud.org.Cluster;
|
||||
import com.cloud.resource.ResourceManager;
|
||||
import com.cloud.service.ServiceOfferingVO;
|
||||
import com.cloud.service.dao.ServiceOfferingDao;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
|
|
@ -223,7 +227,11 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
|
|||
protected Adapters<DeploymentPlanner> _planners;
|
||||
|
||||
@Inject(adapter = HostAllocator.class)
|
||||
protected Adapters<HostAllocator> _hostAllocators;
|
||||
protected Adapters<HostAllocator> _hostAllocators;
|
||||
|
||||
@Inject
|
||||
protected ResourceManager _resourceMgr;
|
||||
|
||||
|
||||
Map<VirtualMachine.Type, VirtualMachineGuru<? extends VMInstanceVO>> _vmGurus = new HashMap<VirtualMachine.Type, VirtualMachineGuru<? extends VMInstanceVO>>();
|
||||
protected StateMachine2<State, VirtualMachine.Event, VirtualMachine> _stateMachine;
|
||||
|
|
@ -1603,55 +1611,96 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
|
|||
}
|
||||
|
||||
|
||||
|
||||
public Commands deltaSync(Map<String, Pair<String, State>> newStates) {
|
||||
public void deltaSync(Map<String, Pair<String, State>> newStates) {
|
||||
Map<Long, AgentVmInfo> states = convertToInfos(newStates);
|
||||
Commands commands = new Commands(OnError.Continue);
|
||||
|
||||
for (Map.Entry<Long, AgentVmInfo> entry : states.entrySet()) {
|
||||
AgentVmInfo info = entry.getValue();
|
||||
|
||||
VMInstanceVO vm = info.vm;
|
||||
|
||||
Command command = null;
|
||||
if (vm != null) {
|
||||
String host_guid = info.getHostUuid();
|
||||
Host host = _hostDao.findByGuid(host_guid);
|
||||
Host host = _hostDao.findByGuid(info.getHostUuid());
|
||||
long hId = host.getId();
|
||||
|
||||
HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
|
||||
command = compareState(hId, vm, info, false, hvGuru.trackVmHostChange());
|
||||
} else {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Cleaning up a VM that is no longer found: " + info.name);
|
||||
s_logger.debug("Cleaning up a VM that is no longer found <deltaSync>: " + info.name);
|
||||
}
|
||||
command = cleanup(info.name);
|
||||
}
|
||||
if (command != null){
|
||||
try {
|
||||
Host host = _hostDao.findByGuid(info.getHostUuid());
|
||||
if (host != null){
|
||||
Answer answer = _agentMgr.send(host.getId(), cleanup(info.name));
|
||||
if (!answer.getResult()) {
|
||||
s_logger.warn("Unable to stop a VM due to " + answer.getDetails());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to stop a VM due to " + e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (command != null) {
|
||||
commands.addCommand(command);
|
||||
|
||||
public void fullSync(final long clusterId, Map<String, Pair<String, State>> newStates, boolean init) {
|
||||
Map<Long, AgentVmInfo> infos = convertToInfos(newStates);
|
||||
Set<VMInstanceVO> set_vms = Collections.synchronizedSet(new HashSet<VMInstanceVO>());
|
||||
set_vms.addAll(_vmDao.listByClusterId(clusterId));
|
||||
set_vms.addAll(_vmDao.listStartingByClusterId(clusterId));
|
||||
|
||||
for (VMInstanceVO vm : set_vms) {
|
||||
if (vm.isRemoved() || vm.getState() == State.Destroyed || vm.getState() == State.Expunging) continue;
|
||||
AgentVmInfo info = infos.remove(vm.getId());
|
||||
if (init){ // mark the VMs real state on initial sync
|
||||
VMInstanceVO castedVm = null;
|
||||
if (info == null){
|
||||
if (vm.getState() == State.Running || vm.getState() == State.Starting) { // only work on VMs which were supposed to be starting/running earlier
|
||||
info = new AgentVmInfo(vm.getInstanceName(), getVmGuru(vm), vm, State.Stopped);
|
||||
castedVm = info.guru.findById(vm.getId());
|
||||
try {
|
||||
Host host = _hostDao.findByGuid(info.getHostUuid());
|
||||
long hostId = host == null ? (vm.getHostId() == null ? vm.getLastHostId() : vm.getHostId()) : host.getId();
|
||||
HypervisorGuru hvGuru = _hvGuruMgr.getGuru(castedVm.getHypervisorType());
|
||||
Command command = compareState(hostId, castedVm, info, true, hvGuru.trackVmHostChange());
|
||||
if (command != null){
|
||||
Answer answer = _agentMgr.send(hostId, command);
|
||||
if (!answer.getResult()) {
|
||||
s_logger.warn("Failed to update state of the VM due to " + answer.getDetails());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to update state of the VM due to exception " + e.getMessage());
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for (final AgentVmInfo left : infos.values()) {
|
||||
try {
|
||||
Host host = _hostDao.findByGuid(left.getHostUuid());
|
||||
if (host != null){
|
||||
s_logger.warn("Stopping a VM which we do not have any record of " + left.name);
|
||||
Answer answer = _agentMgr.send(host.getId(), cleanup(left.name));
|
||||
if (!answer.getResult()) {
|
||||
s_logger.warn("Unable to stop a VM due to " + answer.getDetails());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to stop a VM due to " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
return commands;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public Commands fullSync(final long clusterId, Map<String, Pair<String, State>> newStates) {
|
||||
Commands commands = new Commands(OnError.Continue);
|
||||
Map<Long, AgentVmInfo> infos = convertToInfos(newStates);
|
||||
final List<VMInstanceVO> vms = _vmDao.listByClusterId(clusterId);
|
||||
for (VMInstanceVO vm : vms) {
|
||||
infos.remove(vm.getId());
|
||||
}
|
||||
for (final AgentVmInfo left : infos.values()) {
|
||||
s_logger.warn("Stopping a VM that we have no record of: " + left.name);
|
||||
commands.addCommand(cleanup(left.name));
|
||||
}
|
||||
return commands;
|
||||
}
|
||||
|
||||
protected Map<Long, AgentVmInfo> convertToInfos(final Map<String, Pair<String, State>> newStates) {
|
||||
final HashMap<Long, AgentVmInfo> map = new HashMap<Long, AgentVmInfo>();
|
||||
if (newStates == null) {
|
||||
|
|
@ -1960,7 +2009,7 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
|
|||
ClusterSyncAnswer hs = (ClusterSyncAnswer) answer;
|
||||
if (hs.isFull()) {
|
||||
deltaSync(hs.getNewStates());
|
||||
fullSync(hs.getClusterId(), hs.getAllStates());
|
||||
fullSync(hs.getClusterId(), hs.getAllStates(), false);
|
||||
} else if (hs.isDelta()) {
|
||||
deltaSync(hs.getNewStates());
|
||||
}
|
||||
|
|
@ -2030,7 +2079,13 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
|
|||
|
||||
long agentId = agent.getId();
|
||||
Long clusterId = agent.getClusterId();
|
||||
if (agent.getHypervisorType() == HypervisorType.XenServer || agent.getHypervisorType() == HypervisorType.Xen){ // only fro Xen
|
||||
if (agent.getHypervisorType() == HypervisorType.XenServer || agent.getHypervisorType() == HypervisorType.Xen){ // only for Xen
|
||||
StartupRoutingCommand startup = (StartupRoutingCommand) cmd;
|
||||
HashMap<String, Pair<String, State>> allStates = startup.getClusterVMStateChanges();
|
||||
if (allStates != null){
|
||||
this.fullSync(clusterId, allStates, true);
|
||||
}
|
||||
|
||||
ClusterSyncCommand syncCmd = new ClusterSyncCommand(Integer.parseInt(Config.ClusterDeltaSyncInterval.getDefaultValue()),
|
||||
Integer.parseInt(Config.ClusterFullSyncSkipSteps.getDefaultValue()), clusterId);
|
||||
try {
|
||||
|
|
|
|||
|
|
@ -82,5 +82,6 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
|
|||
public Long countAllocatedVirtualRoutersForAccount(long accountId);
|
||||
|
||||
List<VMInstanceVO> listByClusterId(long clusterId);
|
||||
List<VMInstanceVO> listStartingByClusterId(long clusterId); // get all the VMs even starting one on this cluster
|
||||
List<VMInstanceVO> listVmsMigratingFromHost(Long hostId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,6 +50,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
public static final Logger s_logger = Logger.getLogger(VMInstanceDaoImpl.class);
|
||||
|
||||
protected final SearchBuilder<VMInstanceVO> VMClusterSearch;
|
||||
protected final SearchBuilder<VMInstanceVO> StartingVMClusterSearch;
|
||||
protected final SearchBuilder<VMInstanceVO> IdStatesSearch;
|
||||
protected final SearchBuilder<VMInstanceVO> AllFieldsSearch;
|
||||
protected final SearchBuilder<VMInstanceVO> ZoneTemplateNonExpungedSearch;
|
||||
|
|
@ -78,6 +79,13 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
|
||||
VMClusterSearch.done();
|
||||
|
||||
|
||||
StartingVMClusterSearch = createSearchBuilder();
|
||||
SearchBuilder<HostVO> hostSearch1 = _hostDao.createSearchBuilder();
|
||||
StartingVMClusterSearch.join("hostSearch1", hostSearch1, hostSearch1.entity().getId(), StartingVMClusterSearch.entity().getHostId(), JoinType.INNER);
|
||||
hostSearch1.and("clusterId", hostSearch1.entity().getClusterId(), SearchCriteria.Op.EQ);
|
||||
StartingVMClusterSearch.done();
|
||||
|
||||
AllFieldsSearch = createSearchBuilder();
|
||||
AllFieldsSearch.and("host", AllFieldsSearch.entity().getHostId(), Op.EQ);
|
||||
AllFieldsSearch.and("lastHost", AllFieldsSearch.entity().getLastHostId(), Op.EQ);
|
||||
|
|
@ -182,6 +190,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listStartingByClusterId(long clusterId) {
|
||||
SearchCriteria<VMInstanceVO> sc = StartingVMClusterSearch.create();
|
||||
sc.setJoinParameters("hostSearch1", "clusterId", clusterId);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listByZoneIdAndType(long zoneId, VirtualMachine.Type type) {
|
||||
|
|
|
|||
|
|
@ -740,6 +740,20 @@ CREATE TABLE `cloud`.`mshost` (
|
|||
INDEX `i_mshost__last_update`(`last_update`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE `cloud`.`mshost_peer` (
|
||||
`id` bigint unsigned NOT NULL auto_increment,
|
||||
`owner_mshost` bigint unsigned NOT NULL,
|
||||
`peer_mshost` bigint unsigned NOT NULL,
|
||||
`peer_runid` bigint NOT NULL,
|
||||
`peer_state` varchar(10) NOT NULL DEFAULT 'Down',
|
||||
`last_update` DATETIME NULL COMMENT 'Last record update time',
|
||||
|
||||
PRIMARY KEY (`id`),
|
||||
CONSTRAINT `fk_mshost_peer__owner_mshost` FOREIGN KEY (`owner_mshost`) REFERENCES `mshost`(`id`) ON DELETE CASCADE,
|
||||
CONSTRAINT `fk_mshost_peer__peer_mshost` FOREIGN KEY (`peer_mshost`) REFERENCES `mshost`(`id`),
|
||||
UNIQUE `i_mshost_peer__owner_peer_runid`(`owner_mshost`, `peer_mshost`, `peer_runid`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE `cloud`.`host_tags` (
|
||||
`id` bigint unsigned NOT NULL auto_increment,
|
||||
`host_id` bigint unsigned NOT NULL COMMENT 'host id',
|
||||
|
|
|
|||
|
|
@ -4,3 +4,18 @@
|
|||
|
||||
ALTER TABLE `cloud`.`vm_template` MODIFY `extractable` int(1) unsigned NOT NULL default 0 COMMENT 'Is this template extractable';
|
||||
INSERT INTO configuration (category, instance, component, name, value, description) VALUES ('Advanced', 'DEFAULT', 'management-server', 'external.network.stats.interval', '300', 'Interval (in seconds) to report external network statistics.');
|
||||
|
||||
CREATE TABLE `cloud`.`mshost_peer` (
|
||||
`id` bigint unsigned NOT NULL auto_increment,
|
||||
`owner_mshost` bigint unsigned NOT NULL,
|
||||
`peer_mshost` bigint unsigned NOT NULL,
|
||||
`peer_runid` bigint NOT NULL,
|
||||
`peer_state` varchar(10) NOT NULL DEFAULT 'Down',
|
||||
`last_update` DATETIME NULL COMMENT 'Last record update time',
|
||||
|
||||
PRIMARY KEY (`id`),
|
||||
CONSTRAINT `fk_mshost_peer__owner_mshost` FOREIGN KEY (`owner_mshost`) REFERENCES `mshost`(`id`) ON DELETE CASCADE,
|
||||
CONSTRAINT `fk_mshost_peer__peer_mshost` FOREIGN KEY (`peer_mshost`) REFERENCES `mshost`(`id`),
|
||||
UNIQUE `i_mshost_peer__owner_peer_runid`(`owner_mshost`, `peer_mshost`, `peer_runid`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
|
|
|
|||
|
|
@ -19,25 +19,46 @@
|
|||
package com.cloud.utils;
|
||||
|
||||
public class Profiler {
|
||||
private long startTickInMs;
|
||||
private long stopTickInMs;
|
||||
private Long startTickInMs;
|
||||
private Long stopTickInMs;
|
||||
|
||||
public Profiler() {
|
||||
startTickInMs = 0;
|
||||
stopTickInMs = 0;
|
||||
startTickInMs = null;
|
||||
stopTickInMs = null;
|
||||
}
|
||||
|
||||
public long start() {
|
||||
startTickInMs = System.currentTimeMillis();
|
||||
return startTickInMs;
|
||||
return startTickInMs.longValue();
|
||||
}
|
||||
|
||||
public long stop() {
|
||||
stopTickInMs = System.currentTimeMillis();
|
||||
return stopTickInMs;
|
||||
return stopTickInMs.longValue();
|
||||
}
|
||||
|
||||
public long getDuration() {
|
||||
return stopTickInMs - startTickInMs;
|
||||
}
|
||||
public long getDuration() {
|
||||
if(startTickInMs != null && stopTickInMs != null)
|
||||
return stopTickInMs.longValue() - startTickInMs.longValue();
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
public boolean isStarted() {
|
||||
return startTickInMs != null;
|
||||
}
|
||||
|
||||
public boolean isStopped() {
|
||||
return stopTickInMs != null;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
if(startTickInMs == null)
|
||||
return "Not Started";
|
||||
|
||||
if(stopTickInMs == null)
|
||||
return "Started but not stopped";
|
||||
|
||||
return "Done. Duration: " + getDuration() + "ms";
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -110,9 +110,7 @@ def build_thirdparty_dir ():
|
|||
Utils.pprint ("GREEN", "Installed files of thirdparty/")
|
||||
|
||||
def build_dependences ():
|
||||
excludes = ["cloud-xstream-1.3.1.jar", "cloud-commons-dbcp-1.2.2.jar",
|
||||
"cloud-commons-httpclient-3.1.jar", "cloud-commons-pool-1.4.jar",
|
||||
"cloud-servlet-api.jar", "cloud-commons-logging-1.1.1.jar",
|
||||
excludes = ["cloud-xstream-1.3.1.jar", "cloud-servlet-api.jar", "cloud-commons-logging-1.1.1.jar",
|
||||
"cloud-ws-commons-util-1.0.2.jar",
|
||||
"cloud-commons-collections-3.2.1.jar"]
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue