mirror of https://github.com/apache/cloudstack.git
Merge branch 'master' of ssh://git.cloud.com/var/lib/git/cloudstack-oss
This commit is contained in:
commit
6d101d554e
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
|
||||
#Labels
|
||||
label.action.edit.host=Edit Host
|
||||
|
||||
network.rate=Network Rate
|
||||
|
||||
ICMP.type=ICMP Type
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
|
||||
#Labels
|
||||
label.action.edit.host=edición Anfitrión
|
||||
|
||||
network.rate=Tasa de red
|
||||
|
||||
ICMP.type=Tipo ICMP
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
|
||||
#Labels
|
||||
label.action.edit.host=ホストを編集する
|
||||
|
||||
network.rate=ネットワーク速度
|
||||
|
||||
ICMP.type=ICMPタイプ
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
|
||||
#Labels
|
||||
label.action.edit.host=编辑主机
|
||||
|
||||
network.rate=网络速率
|
||||
|
||||
ICMP.type=ICMP类型
|
||||
|
|
|
|||
|
|
@ -601,7 +601,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
try {
|
||||
String[] addRules = rules[LoadBalancerConfigurator.ADD];
|
||||
String[] removeRules = rules[LoadBalancerConfigurator.REMOVE];
|
||||
String[] removeRules = rules[LoadBalancerConfigurator.REMOVE];
|
||||
String[] statRules = rules[LoadBalancerConfigurator.STATS];
|
||||
|
||||
String args = "";
|
||||
args += "-i " + routerIp;
|
||||
|
|
@ -624,7 +625,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
args += " -d " + sb.toString();
|
||||
}
|
||||
|
||||
|
||||
sb = new StringBuilder();
|
||||
if (statRules.length > 0) {
|
||||
for (int i = 0; i < statRules.length; i++) {
|
||||
sb.append(statRules[i]).append(',');
|
||||
}
|
||||
|
||||
args += " -s " + sb.toString();
|
||||
}
|
||||
|
||||
Pair<Boolean, String> result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", mgr.getSystemVMKeyFile(), null, "scp " + tmpCfgFilePath + " /etc/haproxy/haproxy.cfg.new");
|
||||
|
||||
if (!result.first()) {
|
||||
|
|
@ -1077,6 +1087,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
if (getVmState(vmMo) != State.Stopped)
|
||||
vmMo.safePowerOff(_shutdown_waitMs);
|
||||
vmMo.tearDownDevices(new Class<?>[] { VirtualDisk.class, VirtualEthernetCard.class });
|
||||
vmMo.ensureScsiDeviceController();
|
||||
} else {
|
||||
ManagedObjectReference morDc = hyperHost.getHyperHostDatacenter();
|
||||
assert (morDc != null);
|
||||
|
|
@ -1091,7 +1102,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
if (getVmState(vmMo) != State.Stopped)
|
||||
vmMo.safePowerOff(_shutdown_waitMs);
|
||||
vmMo.tearDownDevices(new Class<?>[] { VirtualDisk.class, VirtualEthernetCard.class });
|
||||
vmMo.tearDownDevices(new Class<?>[] { VirtualDisk.class, VirtualEthernetCard.class });
|
||||
vmMo.ensureScsiDeviceController();
|
||||
} else {
|
||||
int ramMb = (int) (vmSpec.getMinRam() / (1024 * 1024));
|
||||
Pair<ManagedObjectReference, DatastoreMO> rootDiskDataStoreDetails = null;
|
||||
|
|
|
|||
|
|
@ -206,7 +206,7 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator {
|
|||
result.addAll(Arrays.asList(defaultsSection));
|
||||
if (!lbCmd.lbStatsVisibility.equals("disabled"))
|
||||
{
|
||||
if (lbCmd.lbStatsVisibility.equals("guest-network"))
|
||||
if (lbCmd.lbStatsVisibility.equals("guest-network") || lbCmd.lbStatsVisibility.equals("link-local"))
|
||||
{
|
||||
result.add(getBlankLine());
|
||||
|
||||
|
|
|
|||
|
|
@ -6,16 +6,11 @@
|
|||
<classpathentry exported="true" kind="lib" path="cloud-backport-util-concurrent-3.0.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-bcprov-jdk16-1.45.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-cglib.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-commons-codec-1.4.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-commons-collections-3.2.1.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-commons-dbcp-1.2.2.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-commons-discovery.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-commons-httpclient-3.1.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-commons-logging-1.1.1.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-commons-pool-1.4.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-ehcache.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-email.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-gson.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-httpcore-4.0.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-jsch-0.1.42.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-jstl-1.2.jar"/>
|
||||
|
|
@ -55,5 +50,10 @@
|
|||
<classpathentry exported="true" kind="lib" path="cloud-junit.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-selenium-java-client-driver.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-selenium-server.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-commons-codec-1.5.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-commons-dbcp-1.4.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-commons-pool-1.5.6.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-google-gson-1.7.1.jar"/>
|
||||
<classpathentry exported="true" kind="lib" path="cloud-httpclient-4.1.2.jar"/>
|
||||
<classpathentry kind="output" path="bin"/>
|
||||
</classpath>
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,44 @@
|
|||
#!/bin/bash
|
||||
|
||||
# getLockFile() parameters
|
||||
# $1 lock filename
|
||||
# $2 timeout seconds
|
||||
getLockFile() {
|
||||
__locked=0
|
||||
__LOCKFILE="/tmp/$1.lock"
|
||||
if [ $2 ]
|
||||
then
|
||||
__TIMEOUT=$2
|
||||
else
|
||||
__TIMEOUT=10
|
||||
fi
|
||||
|
||||
for i in `seq 1 $__TIMEOUT`
|
||||
do
|
||||
if [ ! -e $__LOCKFILE ]
|
||||
then
|
||||
touch $__LOCKFILE
|
||||
__locked=1
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
logger -t cloud "sleep 1 second wait for the lock file " $__LOCKFILE
|
||||
done
|
||||
if [ $__locked -ne 1 ]
|
||||
then
|
||||
logger -t cloud "fail to acquire the lock file $__LOCKFILE after $__TIMEOUT seconds time out!"
|
||||
fi
|
||||
echo $__locked
|
||||
}
|
||||
|
||||
# releaseLockFile() parameters
|
||||
# $1 lock filename
|
||||
# $2 locked(1) or not(0)
|
||||
releaseLockFile() {
|
||||
__LOCKFILE="/tmp/$1.lock"
|
||||
__locked=$2
|
||||
if [ "$__locked" == "1" ]
|
||||
then
|
||||
rm $__LOCKFILE
|
||||
fi
|
||||
}
|
||||
|
|
@ -79,7 +79,7 @@ fw_remove_backup() {
|
|||
sudo iptables -X back_load_balancer_$vif 2> /dev/null
|
||||
done
|
||||
sudo iptables -F back_lb_stats 2> /dev/null
|
||||
sudo iptables -D INPUT -i $STAT_IF -p tcp -j back_lb_stats 2> /dev/null
|
||||
sudo iptables -D INPUT -p tcp -j back_lb_stats 2> /dev/null
|
||||
sudo iptables -X back_lb_stats 2> /dev/null
|
||||
}
|
||||
fw_restore() {
|
||||
|
|
@ -90,7 +90,7 @@ fw_restore() {
|
|||
sudo iptables -E back_load_balancer_$vif load_balancer_$vif 2> /dev/null
|
||||
done
|
||||
sudo iptables -F lb_stats 2> /dev/null
|
||||
sudo iptables -D INPUT -i $STAT_IF -p tcp -j lb_stats 2> /dev/null
|
||||
sudo iptables -D INPUT -p tcp -j lb_stats 2> /dev/null
|
||||
sudo iptables -X lb_stats 2> /dev/null
|
||||
sudo iptables -E back_lb_stats lb_stats 2> /dev/null
|
||||
}
|
||||
|
|
@ -121,7 +121,7 @@ fw_entry() {
|
|||
done
|
||||
sudo iptables -E lb_stats back_lb_stats 2> /dev/null
|
||||
sudo iptables -N lb_stats 2> /dev/null
|
||||
sudo iptables -A INPUT -i $STAT_IF -p tcp -j lb_stats
|
||||
sudo iptables -A INPUT -p tcp -j lb_stats
|
||||
|
||||
for i in $a
|
||||
do
|
||||
|
|
@ -259,8 +259,6 @@ if [ "$VIF_LIST" == "eth0" ]
|
|||
then
|
||||
ip_entry $addedIps $removedIps
|
||||
fi
|
||||
# FIXME make the load balancer stat interface generic
|
||||
STAT_IF="eth0"
|
||||
|
||||
# hot reconfigure haproxy
|
||||
reconfig_lb $cfgfile
|
||||
|
|
|
|||
|
|
@ -18,9 +18,18 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
|
||||
name="reconfigLB"
|
||||
|
||||
source func.sh
|
||||
locked=$(getLockFile $name)
|
||||
if [ "$locked" != "1" ]
|
||||
then
|
||||
logger -t cloud "Fail to get the lock for " $name
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ret=0
|
||||
# save previous state
|
||||
mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.old
|
||||
mv /var/run/haproxy.pid /var/run/haproxy.pid.old
|
||||
|
|
@ -32,7 +41,7 @@
|
|||
echo "New haproxy instance successfully loaded, stopping previous one."
|
||||
kill -KILL $(cat /var/run/haproxy.pid.old)
|
||||
rm -f /var/run/haproxy.pid.old
|
||||
exit 0
|
||||
ret=0
|
||||
else
|
||||
echo "New instance failed to start, resuming previous one."
|
||||
kill -TTIN $(cat /var/run/haproxy.pid.old)
|
||||
|
|
@ -40,5 +49,9 @@
|
|||
mv /var/run/haproxy.pid.old /var/run/haproxy.pid
|
||||
mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.new
|
||||
mv /etc/haproxy/haproxy.cfg.old /etc/haproxy/haproxy.cfg
|
||||
exit 1
|
||||
ret=1
|
||||
fi
|
||||
|
||||
releaseLockFile $name $locked
|
||||
|
||||
exit $ret
|
||||
|
|
|
|||
|
|
@ -264,7 +264,7 @@ public class AlertManagerImpl implements AlertManager {
|
|||
try {
|
||||
txn.start();
|
||||
// Calculate new Public IP capacity
|
||||
List<DataCenterVO> datacenters = _dcDao.listAllIncludingRemoved();
|
||||
List<DataCenterVO> datacenters = _dcDao.listAll();
|
||||
for (DataCenterVO datacenter : datacenters) {
|
||||
long dcId = datacenter.getId();
|
||||
|
||||
|
|
@ -283,7 +283,7 @@ public class AlertManagerImpl implements AlertManager {
|
|||
|
||||
txn.start();
|
||||
// Calculate new Private IP capacity
|
||||
List<HostPodVO> pods = _podDao.listAllIncludingRemoved();
|
||||
List<HostPodVO> pods = _podDao.listAll();
|
||||
for (HostPodVO pod : pods) {
|
||||
long podId = pod.getId();
|
||||
long dcId = pod.getDataCenterId();
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ public enum Config {
|
|||
StorageTemplateCleanupEnabled("Storage", ManagementServer.class, Boolean.class, "storage.template.cleanup.enabled", "true", "Enable/disable template cleanup activity, only take effect when overall storage cleanup is enabled", null),
|
||||
|
||||
// Network
|
||||
NetworkLBHaproxyStatsVisbility("Network", ManagementServer.class, String.class, "network.loadbalancer.haproxy.stats.visibility", "global", "Load Balancer(haproxy) stats visibilty, it can be global,guest-network,disabled", null),
|
||||
NetworkLBHaproxyStatsVisbility("Network", ManagementServer.class, String.class, "network.loadbalancer.haproxy.stats.visibility", "global", "Load Balancer(haproxy) stats visibilty, it can take the following four parameters : global,guest-network,link-local,disabled", null),
|
||||
NetworkLBHaproxyStatsUri("Network", ManagementServer.class, String.class, "network.loadbalancer.haproxy.stats.uri","/admin?stats","Load Balancer(haproxy) uri.",null),
|
||||
NetworkLBHaproxyStatsAuth("Network", ManagementServer.class, String.class, "network.loadbalancer.haproxy.stats.auth","admin1:AdMiN123","Load Balancer(haproxy) authetication string in the format username:password",null),
|
||||
NetworkLBHaproxyStatsPort("Network", ManagementServer.class, String.class, "network.loadbalancer.haproxy.stats.port","8081","Load Balancer(haproxy) stats port number.",null),
|
||||
|
|
@ -79,7 +79,7 @@ public enum Config {
|
|||
SecurityGroupWorkerThreads("Network", ManagementServer.class, Integer.class, "network.securitygroups.workers.pool.size", "50", "Number of worker threads processing the security group update work queue", null),
|
||||
SecurityGroupWorkGlobalLockTimeout("Network", ManagementServer.class, Integer.class, "network.securitygroups.work.lock.timeout", "300", "Lock wait timeout (seconds) while updating the security group work queue", null),
|
||||
|
||||
FirewallRuleUiEnabled("Network", ManagementServer.class, Boolean.class, "firewall.rule.ui.enabled", "true", "enable/disable UI that separates firewall rules from NAT/LB rules", null),
|
||||
FirewallRuleUiEnabled("Network", ManagementServer.class, Boolean.class, "firewall.rule.ui.enabled", "false", "enable/disable UI that separates firewall rules from NAT/LB rules", null),
|
||||
|
||||
//VPN
|
||||
RemoteAccessVpnPskLength("Network", AgentManager.class, Integer.class, "remote.access.vpn.psk.length", "24", "The length of the ipsec preshared key (minimum 8, maximum 256)", null),
|
||||
|
|
|
|||
|
|
@ -640,7 +640,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
|
|||
s_logger.warn("Unable to update db record for host id=" + host.getId() + "; it's possible that the host is removed");
|
||||
}
|
||||
|
||||
if (s_logger.isDebugEnabled() && result == 1) {
|
||||
if (s_logger.isDebugEnabled() && result == 0) {
|
||||
HostVO vo = findById(host.getId());
|
||||
|
||||
if (vo != null) {
|
||||
|
|
|
|||
|
|
@ -299,7 +299,13 @@ public class ElasticLoadBalancerManagerImpl implements
|
|||
cmd.lbStatsUri = _configDao.getValue(Config.NetworkLBHaproxyStatsUri.key());
|
||||
cmd.lbStatsAuth = _configDao.getValue(Config.NetworkLBHaproxyStatsAuth.key());
|
||||
cmd.lbStatsPort = _configDao.getValue(Config.NetworkLBHaproxyStatsPort.key());
|
||||
cmd.lbStatsIp = elbVm.getGuestIpAddress();
|
||||
if (cmd.lbStatsVisibility.equals("guest-network"))
|
||||
{
|
||||
cmd.lbStatsIp = elbVm.getGuestIpAddress();;
|
||||
}else
|
||||
{
|
||||
cmd.lbStatsIp = elbVm.getPrivateIpAddress();
|
||||
}
|
||||
cmds.addCommand(cmd);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2017,11 +2017,19 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian
|
|||
}
|
||||
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lbs);
|
||||
cmd.lbStatsIp = router.getGuestIpAddress();
|
||||
|
||||
cmd.lbStatsVisibility = _configDao.getValue(Config.NetworkLBHaproxyStatsVisbility.key());
|
||||
cmd.lbStatsUri = _configDao.getValue(Config.NetworkLBHaproxyStatsUri.key());
|
||||
cmd.lbStatsAuth = _configDao.getValue(Config.NetworkLBHaproxyStatsAuth.key());
|
||||
cmd.lbStatsPort = _configDao.getValue(Config.NetworkLBHaproxyStatsPort.key());
|
||||
if (cmd.lbStatsVisibility.equals("guest-network"))
|
||||
{
|
||||
cmd.lbStatsIp = router.getGuestIpAddress();
|
||||
}else
|
||||
{
|
||||
cmd.lbStatsIp = router.getPrivateIpAddress();
|
||||
}
|
||||
|
||||
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, router.getPrivateIpAddress());
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP, router.getGuestIpAddress());
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ import java.util.List;
|
|||
import java.util.Set;
|
||||
|
||||
import com.cloud.domain.DomainVO;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.template.VirtualMachineTemplate.TemplateFilter;
|
||||
|
|
|
|||
|
|
@ -249,21 +249,23 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
|
|||
}
|
||||
|
||||
private Long extract(Account caller, Long templateId, String url, Long zoneId, String mode, Long eventId, boolean isISO, AsyncJobVO job, AsyncJobManager mgr) {
|
||||
String desc = "template";
|
||||
String desc = Upload.Type.TEMPLATE.toString();
|
||||
if (isISO) {
|
||||
desc = "ISO";
|
||||
desc = Upload.Type.ISO.toString();
|
||||
}
|
||||
eventId = eventId == null ? 0:eventId;
|
||||
|
||||
VMTemplateVO template = _tmpltDao.findById(templateId);
|
||||
if (template == null || template.getRemoved() != null) {
|
||||
throw new InvalidParameterValueException("Unable to find " +desc+ " with id " + templateId);
|
||||
}
|
||||
|
||||
if (template.getTemplateType() == Storage.TemplateType.SYSTEM){
|
||||
throw new InvalidParameterValueException("Unable to extract the " + desc + " " + template.getName() + " as it is a default System template");
|
||||
}
|
||||
if (template.getTemplateType() == Storage.TemplateType.PERHOST){
|
||||
} else if (template.getTemplateType() == Storage.TemplateType.PERHOST){
|
||||
throw new InvalidParameterValueException("Unable to extract the " + desc + " " + template.getName() + " as it resides on host and not on SSVM");
|
||||
}
|
||||
|
||||
if (isISO) {
|
||||
if (template.getFormat() != ImageFormat.ISO ){
|
||||
throw new InvalidParameterValueException("Unsupported format, could not extract the ISO");
|
||||
|
|
@ -273,6 +275,7 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
|
|||
throw new InvalidParameterValueException("Unsupported format, could not extract the template");
|
||||
}
|
||||
}
|
||||
|
||||
if (_dcDao.findById(zoneId) == null) {
|
||||
throw new IllegalArgumentException("Please specify a valid zone.");
|
||||
}
|
||||
|
|
@ -300,14 +303,15 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
|
|||
}
|
||||
}
|
||||
|
||||
if ( tmpltHostRef == null ) {
|
||||
if (tmpltHostRef == null ) {
|
||||
throw new InvalidParameterValueException("The " + desc + " has not been downloaded ");
|
||||
}
|
||||
|
||||
Upload.Mode extractMode;
|
||||
if( mode == null || (!mode.equals(Upload.Mode.FTP_UPLOAD.toString()) && !mode.equals(Upload.Mode.HTTP_DOWNLOAD.toString())) ){
|
||||
throw new InvalidParameterValueException("Please specify a valid extract Mode "+Upload.Mode.values());
|
||||
}else{
|
||||
extractMode = mode.equals(Upload.Mode.FTP_UPLOAD.toString()) ? Upload.Mode.FTP_UPLOAD : Upload.Mode.HTTP_DOWNLOAD;
|
||||
if (mode == null || (!mode.equalsIgnoreCase(Upload.Mode.FTP_UPLOAD.toString()) && !mode.equalsIgnoreCase(Upload.Mode.HTTP_DOWNLOAD.toString())) ){
|
||||
throw new InvalidParameterValueException("Please specify a valid extract Mode. Supported modes: "+ Upload.Mode.FTP_UPLOAD + ", " + Upload.Mode.HTTP_DOWNLOAD);
|
||||
} else {
|
||||
extractMode = mode.equalsIgnoreCase(Upload.Mode.FTP_UPLOAD.toString()) ? Upload.Mode.FTP_UPLOAD : Upload.Mode.HTTP_DOWNLOAD;
|
||||
}
|
||||
|
||||
if (extractMode == Upload.Mode.FTP_UPLOAD){
|
||||
|
|
@ -334,7 +338,7 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
|
|||
throw new InvalidParameterValueException("Unable to resolve " + host);
|
||||
}
|
||||
|
||||
if ( _uploadMonitor.isTypeUploadInProgress(templateId, isISO ? Type.ISO : Type.TEMPLATE) ){
|
||||
if (_uploadMonitor.isTypeUploadInProgress(templateId, isISO ? Type.ISO : Type.TEMPLATE) ){
|
||||
throw new IllegalArgumentException(template.getName() + " upload is in progress. Please wait for some time to schedule another upload for the same");
|
||||
}
|
||||
|
||||
|
|
@ -342,7 +346,7 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe
|
|||
}
|
||||
|
||||
UploadVO vo = _uploadMonitor.createEntityDownloadURL(template, tmpltHostRef, zoneId, eventId);
|
||||
if (vo!=null){
|
||||
if (vo != null){
|
||||
return vo.getId();
|
||||
}else{
|
||||
return null;
|
||||
|
|
|
|||
|
|
@ -1594,13 +1594,20 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
|
|||
}
|
||||
}
|
||||
|
||||
if(vm.getHostId() == null || hostId != vm.getHostId()) {
|
||||
if(serverState == State.Running) {
|
||||
try {
|
||||
stateTransitTo(vm, VirtualMachine.Event.AgentReportMigrated, hostId);
|
||||
if(hostId != vm.getHostId()) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("detected host change when VM " + vm + " is at running state, VM could be live-migrated externally from host "
|
||||
+ vm.getHostId() + " to host " + hostId);
|
||||
}
|
||||
|
||||
stateTransitTo(vm, VirtualMachine.Event.AgentReportMigrated, hostId);
|
||||
}
|
||||
} catch (NoTransitionException e) {
|
||||
s_logger.warn(e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (agentState == serverState) {
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ ALTER TABLE `cloud`.`configuration` ADD INDEX `i_configuration__component` (`com
|
|||
ALTER TABLE `cloud`.`port_forwarding_rules` ADD CONSTRAINT `fk_port_forwarding_rules__instance_id` FOREIGN KEY `fk_port_forwarding_rules__instance_id` (`instance_id`) REFERENCES `vm_instance` (`id`) ON DELETE CASCADE;
|
||||
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'agent.load.threshold', '0.70', 'Percentage (as a value between 0 and 1) of connected agents after which agent load balancing will start happening');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.visibility', 'global', 'Load Balancer(haproxy) stats visibilty, it can be global,guest-network,disabled');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.visibility', 'global', 'Load Balancer(haproxy) stats visibilty, it can take the following four parameters : global,guest-network,link-local,disabled');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.uri','/admin?stats','Load Balancer(haproxy) uri.');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.auth','admin1:AdMiN123','Load Balancer(haproxy) authetication string in the format username:password');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.port','8081','Load Balancer(haproxy) stats port number.');
|
||||
|
|
|
|||
|
|
@ -27,11 +27,11 @@ ALTER TABLE `cloud`.`firewall_rules` ADD CONSTRAINT `fk_firewall_rules__related`
|
|||
ALTER TABLE `cloud`.`firewall_rules` MODIFY `start_port` int(10) COMMENT 'starting port of a port range';
|
||||
ALTER TABLE `cloud`.`firewall_rules` MODIFY `end_port` int(10) COMMENT 'end port of a port range';
|
||||
|
||||
INSERT IGNORE INTO `cloud`.`configuration` (category, instance, name, value, description) VALUES ('Network', 'DEFAULT', 'firewall.rule.ui.enabled', 'true', 'enable/disable UI that separates firewall rules from NAT/LB rules');
|
||||
INSERT IGNORE INTO `cloud`.`configuration` (category, instance, name, value, description) VALUES ('Network', 'DEFAULT', 'firewall.rule.ui.enabled', 'false', 'enable/disable UI that separates firewall rules from NAT/LB rules');
|
||||
|
||||
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'agent.load.threshold', '0.70', 'Percentage (as a value between 0 and 1) of connected agents after which agent load balancing will start happening');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.visibility', 'global', 'Load Balancer(haproxy) stats visibilty, it can be global,guest-network,disabled');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.visibility', 'global', 'Load Balancer(haproxy) stats visibilty, it can take the following four parameters : global,guest-network,link-local,disabled');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.uri','/admin?stats','Load Balancer(haproxy) uri.');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.auth','admin1:AdMiN123','Load Balancer(haproxy) authetication string in the format username:password');
|
||||
INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'management-server', 'network.loadbalancer.haproxy.stats.port','8081','Load Balancer(haproxy) stats port number.');
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@ import time
|
|||
import Queue
|
||||
import copy
|
||||
import sys
|
||||
import jsonHelper
|
||||
import datetime
|
||||
|
||||
class job(object):
|
||||
def __init__(self):
|
||||
|
|
@ -17,51 +19,81 @@ class jobStatus(object):
|
|||
self.endTime = None
|
||||
self.duration = None
|
||||
self.jobId = None
|
||||
self.responsecls = None
|
||||
class workThread(threading.Thread):
|
||||
def __init__(self, in_queue, out_dict, apiClient, db=None):
|
||||
def __init__(self, in_queue, outqueue, apiClient, db=None, lock=None):
|
||||
threading.Thread.__init__(self)
|
||||
self.inqueue = in_queue
|
||||
self.output = out_dict
|
||||
self.connection = copy.copy(apiClient.connection)
|
||||
self.output = outqueue
|
||||
self.connection = apiClient.connection
|
||||
self.db = None
|
||||
self.lock = lock
|
||||
|
||||
def queryAsynJob(self, job):
|
||||
if job.jobId is None:
|
||||
return job
|
||||
|
||||
try:
|
||||
self.lock.acquire()
|
||||
result = self.connection.pollAsyncJob(job.jobId, job.responsecls).jobresult
|
||||
except cloudstackException.cloudstackAPIException, e:
|
||||
result = str(e)
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
job.result = result
|
||||
return job
|
||||
|
||||
def executeCmd(self, job):
|
||||
cmd = job.cmd
|
||||
|
||||
jobstatus = jobStatus()
|
||||
jobId = None
|
||||
try:
|
||||
self.lock.acquire()
|
||||
|
||||
if cmd.isAsync == "false":
|
||||
jobstatus.startTime = datetime.datetime.now()
|
||||
|
||||
result = self.connection.make_request(cmd)
|
||||
jobstatus.result = result
|
||||
jobstatus.endTime = datetime.datetime.now()
|
||||
jobstatus.duration = time.mktime(jobstatus.endTime.timetuple()) - time.mktime(jobstatus.startTime.timetuple())
|
||||
else:
|
||||
result = self.connection.make_request(cmd, None, True)
|
||||
if result is None:
|
||||
jobstatus.status = False
|
||||
else:
|
||||
jobId = result.jobid
|
||||
jobstatus.jobId = jobId
|
||||
try:
|
||||
responseName = cmd.__class__.__name__.replace("Cmd", "Response")
|
||||
jobstatus.responsecls = jsonHelper.getclassFromName(cmd, responseName)
|
||||
except:
|
||||
pass
|
||||
jobstatus.status = True
|
||||
except cloudstackException.cloudstackAPIException, e:
|
||||
jobstatus.result = str(e)
|
||||
jobstatus.status = False
|
||||
except:
|
||||
jobstatus.status = False
|
||||
jobstatus.result = sys.exc_info()
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
return jobstatus
|
||||
|
||||
def run(self):
|
||||
while self.inqueue.qsize() > 0:
|
||||
job = self.inqueue.get()
|
||||
cmd = job.cmd
|
||||
cmdName = cmd.__class__.__name__
|
||||
responseName = cmdName.replace("Cmd", "Response")
|
||||
responseInstance = self.connection.getclassFromName(cmd, responseName)
|
||||
jobstatus = jobStatus()
|
||||
jobId = None
|
||||
try:
|
||||
if not cmd.isAsync:
|
||||
jobstatus.startTime = time.time()
|
||||
result = self.connection.make_request(cmd, responseInstance)
|
||||
jobstatus.result = result
|
||||
jobstatus.endTime = time.time()
|
||||
else:
|
||||
result = self.connection.make_request(cmd, responseInstance, True)
|
||||
jobId = self.connection.getAsyncJobId(responseInstance, result)
|
||||
result = self.connection.pollAsyncJob(cmd, responseInstance, jobId)
|
||||
jobstatus.result = result
|
||||
jobstatus.jobId = jobId
|
||||
if isinstance(job, jobStatus):
|
||||
jobstatus = self.queryAsynJob(job)
|
||||
else:
|
||||
jobstatus = self.executeCmd(job)
|
||||
|
||||
jobstatus.status = True
|
||||
except cloudstackException.cloudstackAPIException, e:
|
||||
jobstatus.result = str(e)
|
||||
jobstatus.status = False
|
||||
except:
|
||||
jobstatus.status = False
|
||||
jobstatus.result = sys.exc_info()
|
||||
|
||||
#print job.id
|
||||
self.output.lock.acquire()
|
||||
self.output.dict[job.id] = jobstatus
|
||||
self.output.lock.release()
|
||||
self.output.put(jobstatus)
|
||||
self.inqueue.task_done()
|
||||
|
||||
|
||||
'''release the resource'''
|
||||
self.connection.close()
|
||||
|
||||
|
|
@ -92,6 +124,7 @@ class asyncJobMgr(object):
|
|||
def __init__(self, apiClient, db):
|
||||
self.inqueue = Queue.Queue()
|
||||
self.output = outputDict()
|
||||
self.outqueue = Queue.Queue()
|
||||
self.apiClient = apiClient
|
||||
self.db = db
|
||||
|
||||
|
|
@ -109,34 +142,48 @@ class asyncJobMgr(object):
|
|||
ids.append(id)
|
||||
return ids
|
||||
|
||||
def waitForComplete(self):
|
||||
self.inqueue.join()
|
||||
|
||||
for k,jobstatus in self.output.dict.iteritems():
|
||||
jobId = jobstatus.jobId
|
||||
if jobId is not None and self.db is not None:
|
||||
result = self.db.execute("select job_status, created, last_updated from async_job where id=%s"%jobId)
|
||||
if result is not None and len(result) > 0:
|
||||
if result[0][0] == 1:
|
||||
jobstatus.status = True
|
||||
else:
|
||||
jobstatus.status = False
|
||||
def updateTimeStamp(self, jobstatus):
|
||||
jobId = jobstatus.jobId
|
||||
if jobId is not None and self.db is not None:
|
||||
result = self.db.execute("select job_status, created, last_updated from async_job where id=%s"%jobId)
|
||||
if result is not None and len(result) > 0:
|
||||
if result[0][0] == 1:
|
||||
jobstatus.status = True
|
||||
else:
|
||||
jobstatus.status = False
|
||||
jobstatus.startTime = result[0][1]
|
||||
jobstatus.endTime = result[0][2]
|
||||
delta = jobstatus.endTime - jobstatus.startTime
|
||||
jobstatus.duration = delta.total_seconds()
|
||||
|
||||
def waitForComplete(self, workers=10):
|
||||
self.inqueue.join()
|
||||
lock = threading.Lock()
|
||||
resultQueue = Queue.Queue()
|
||||
'''intermediate result is stored in self.outqueue'''
|
||||
for i in range(workers):
|
||||
worker = workThread(self.outqueue, resultQueue, self.apiClient, self.db, lock)
|
||||
worker.start()
|
||||
|
||||
return self.output.dict
|
||||
self.outqueue.join()
|
||||
|
||||
asyncJobResult = []
|
||||
while resultQueue.qsize() > 0:
|
||||
jobstatus = resultQueue.get()
|
||||
self.updateTimeStamp(jobstatus)
|
||||
asyncJobResult.append(jobstatus)
|
||||
|
||||
return asyncJobResult
|
||||
|
||||
'''put commands into a queue at first, then start workers numbers threads to execute this commands'''
|
||||
def submitCmdsAndWait(self, cmds, workers=10):
|
||||
self.submitCmds(cmds)
|
||||
|
||||
lock = threading.Lock()
|
||||
for i in range(workers):
|
||||
worker = workThread(self.inqueue, self.output, self.apiClient, self.db)
|
||||
worker = workThread(self.inqueue, self.outqueue, self.apiClient, self.db, lock)
|
||||
worker.start()
|
||||
|
||||
return self.waitForComplete()
|
||||
return self.waitForComplete(workers)
|
||||
|
||||
'''submit one job and execute the same job ntimes, with nums_threads of threads'''
|
||||
def submitJobExecuteNtimes(self, job, ntimes=1, nums_threads=1, interval=1):
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import time
|
|||
import inspect
|
||||
import cloudstackException
|
||||
from cloudstackAPI import *
|
||||
import jsonHelper
|
||||
|
||||
class cloudConnection(object):
|
||||
def __init__(self, mgtSvr, port=8096, apiKey = None, securityKey = None, asyncTimeout=3600, logging=None):
|
||||
|
|
@ -38,7 +39,7 @@ class cloudConnection(object):
|
|||
def make_request_with_auth(self, command, requests={}):
|
||||
requests["command"] = command
|
||||
requests["apiKey"] = self.apiKey
|
||||
requests["response"] = "xml"
|
||||
requests["response"] = "json"
|
||||
requests = zip(requests.keys(), requests.values())
|
||||
requests.sort(key=lambda x: str.lower(x[0]))
|
||||
|
||||
|
|
@ -54,148 +55,30 @@ class cloudConnection(object):
|
|||
|
||||
def make_request_without_auth(self, command, requests={}):
|
||||
requests["command"] = command
|
||||
requests["response"] = "xml"
|
||||
requests["response"] = "json"
|
||||
requests = zip(requests.keys(), requests.values())
|
||||
requestUrl = "&".join(["=".join([request[0], urllib.quote_plus(str(request[1]))]) for request in requests])
|
||||
self.connection.request("GET", "/&%s"%requestUrl)
|
||||
return self.connection.getresponse().read()
|
||||
|
||||
def getText(self, elements):
|
||||
if len(elements) < 1:
|
||||
return None
|
||||
if not elements[0].hasChildNodes():
|
||||
return None
|
||||
if elements[0].childNodes[0].nodeValue is None:
|
||||
return None
|
||||
return elements[0].childNodes[0].nodeValue.strip()
|
||||
|
||||
def getclassFromName(self, cmd, name):
|
||||
module = inspect.getmodule(cmd)
|
||||
return getattr(module, name)()
|
||||
def parseOneInstance(self, element, instance):
|
||||
ItemsNeedToCheck = {}
|
||||
for attribute in dir(instance):
|
||||
if attribute != "__doc__" and attribute != "__init__" and attribute != "__module__":
|
||||
ItemsNeedToCheck[attribute] = getattr(instance, attribute)
|
||||
for attribute, value in ItemsNeedToCheck.items():
|
||||
if type(value) == types.ListType:
|
||||
subItem = []
|
||||
for subElement in element.getElementsByTagName(attribute):
|
||||
newInstance = self.getclassFromName(instance, attribute)
|
||||
self.parseOneInstance(subElement, newInstance)
|
||||
subItem.append(newInstance)
|
||||
setattr(instance, attribute, subItem)
|
||||
continue
|
||||
else:
|
||||
item = element.getElementsByTagName(attribute)
|
||||
if len(item) == 0:
|
||||
continue
|
||||
|
||||
returnValue = self.getText(item)
|
||||
setattr(instance, attribute, returnValue)
|
||||
|
||||
def hasErrorCode(self, elements, responseName):
|
||||
errorCode = elements[0].getElementsByTagName("errorcode")
|
||||
if len(errorCode) > 0:
|
||||
erroCodeText = self.getText(errorCode)
|
||||
errorText = elements[0].getElementsByTagName("errortext")
|
||||
if len(errorText) > 0:
|
||||
errorText = self.getText(errorText)
|
||||
errMsg = "errorCode: %s, errorText:%s"%(erroCodeText, errorText)
|
||||
raise cloudstackException.cloudstackAPIException(responseName, errMsg)
|
||||
|
||||
def paraseReturnXML(self, result, response):
|
||||
responseName = response.__class__.__name__.lower()
|
||||
cls = response.__class__
|
||||
|
||||
responseLists = []
|
||||
morethanOne = False
|
||||
|
||||
dom = xml.dom.minidom.parseString(result)
|
||||
elements = dom.getElementsByTagName(responseName)
|
||||
if len(elements) == 0:
|
||||
return responseLists
|
||||
|
||||
self.hasErrorCode(elements, responseName)
|
||||
|
||||
count = elements[0].getElementsByTagName("count")
|
||||
if len(count) > 0:
|
||||
morethanOne = True
|
||||
for node in elements[0].childNodes:
|
||||
if node.nodeName == "count":
|
||||
continue
|
||||
newInstance = cls()
|
||||
self.parseOneInstance(node, newInstance)
|
||||
responseLists.append(newInstance)
|
||||
|
||||
else:
|
||||
if elements[0].hasChildNodes():
|
||||
newInstance = cls()
|
||||
self.parseOneInstance(elements[0], newInstance)
|
||||
responseLists.append(newInstance)
|
||||
|
||||
return responseLists, morethanOne
|
||||
|
||||
def paraseResultFromElement(self, elements, response):
|
||||
responseName = response.__class__.__name__.lower()
|
||||
cls = response.__class__
|
||||
|
||||
responseLists = []
|
||||
morethanOne = False
|
||||
|
||||
newInstance = cls()
|
||||
self.parseOneInstance(elements[0], newInstance)
|
||||
responseLists.append(newInstance)
|
||||
|
||||
return responseLists, morethanOne
|
||||
def getAsyncJobId(self, response, resultXml):
|
||||
responseName = response.__class__.__name__.lower()
|
||||
dom = xml.dom.minidom.parseString(resultXml)
|
||||
elements = dom.getElementsByTagName(responseName)
|
||||
if len(elements) == 0:
|
||||
raise cloudstackException.cloudstackAPIException("can't find %s"%responseName)
|
||||
|
||||
self.hasErrorCode(elements, responseName)
|
||||
|
||||
jobIdEle = elements[0].getElementsByTagName("jobid")
|
||||
if len(jobIdEle) == 0:
|
||||
errMsg = 'can not find jobId in the result:%s'%resultXml
|
||||
|
||||
raise cloudstackException.cloudstackAPIException(errMsg)
|
||||
|
||||
jobId = self.getText(jobIdEle)
|
||||
return jobId
|
||||
|
||||
def pollAsyncJob(self, cmd, response, jobId):
|
||||
commandName = cmd.__class__.__name__.replace("Cmd", "")
|
||||
def pollAsyncJob(self, jobId, response):
|
||||
cmd = queryAsyncJobResult.queryAsyncJobResultCmd()
|
||||
cmd.jobid = jobId
|
||||
|
||||
while self.asyncTimeout > 0:
|
||||
asyncResponse = queryAsyncJobResult.queryAsyncJobResultResponse()
|
||||
responseName = asyncResponse.__class__.__name__.lower()
|
||||
asyncResponseXml = self.make_request(cmd, asyncResponse, True)
|
||||
dom = xml.dom.minidom.parseString(asyncResponseXml)
|
||||
elements = dom.getElementsByTagName(responseName)
|
||||
if len(elements) == 0:
|
||||
raise cloudstackException.cloudstackAPIException("can't find %s"%responseName)
|
||||
|
||||
self.hasErrorCode(elements, responseName)
|
||||
asyncResonse = self.make_request(cmd, response, True)
|
||||
|
||||
jobstatus = self.getText(elements[0].getElementsByTagName("jobstatus"))
|
||||
if asyncResonse.jobstatus == 2:
|
||||
raise cloudstackException.cloudstackAPIException("asyncquery", asyncResonse.jobresult)
|
||||
elif asyncResonse.jobstatus == 1:
|
||||
return asyncResonse
|
||||
|
||||
if jobstatus == "2":
|
||||
jobResult = self.getText(elements[0].getElementsByTagName("jobresult"))
|
||||
raise cloudstackException.cloudstackAPIException(commandName, jobResult)
|
||||
elif jobstatus == "1":
|
||||
jobResultEle = elements[0].getElementsByTagName("jobresult")
|
||||
|
||||
return self.paraseResultFromElement(jobResultEle, response)
|
||||
|
||||
time.sleep(5)
|
||||
self.asyncTimeout = self.asyncTimeout - 5
|
||||
|
||||
raise cloudstackException.cloudstackAPIException(commandName, "Async job timeout")
|
||||
def make_request(self, cmd, response, raw=False):
|
||||
raise cloudstackException.cloudstackAPIException("asyncquery", "Async job timeout")
|
||||
|
||||
def make_request(self, cmd, response = None, raw=False):
|
||||
commandName = cmd.__class__.__name__.replace("Cmd", "")
|
||||
isAsync = "false"
|
||||
requests = {}
|
||||
|
|
@ -231,7 +114,7 @@ class cloudConnection(object):
|
|||
i = i + 1
|
||||
|
||||
if self.logging is not None:
|
||||
self.logging.debug("sending command: " + str(requests))
|
||||
self.logging.debug("sending command: %s %s"%(commandName, str(requests)))
|
||||
result = None
|
||||
if self.auth:
|
||||
result = self.make_request_with_auth(commandName, requests)
|
||||
|
|
@ -243,19 +126,13 @@ class cloudConnection(object):
|
|||
if result is None:
|
||||
return None
|
||||
|
||||
if raw:
|
||||
return result
|
||||
if isAsync == "false":
|
||||
result,num = self.paraseReturnXML(result, response)
|
||||
else:
|
||||
jobId = self.getAsyncJobId(response, result)
|
||||
result,num = self.pollAsyncJob(cmd, response, jobId)
|
||||
if num:
|
||||
result = jsonHelper.getResultObj(result, response)
|
||||
if raw or isAsync == "false":
|
||||
return result
|
||||
else:
|
||||
if len(result) != 0:
|
||||
return result[0]
|
||||
return None
|
||||
asynJobId = result.jobid
|
||||
result = self.pollAsyncJob(asynJobId, response)
|
||||
return result.jobresult
|
||||
|
||||
if __name__ == '__main__':
|
||||
xml = '<?xml version="1.0" encoding="ISO-8859-1"?><deployVirtualMachineResponse><virtualmachine><id>407</id><name>i-1-407-RS3</name><displayname>i-1-407-RS3</displayname><account>system</account><domainid>1</domainid><domain>ROOT</domain><created>2011-07-30T14:45:19-0700</created><state>Running</state><haenable>false</haenable><zoneid>1</zoneid><zonename>CA1</zonename><hostid>3</hostid><hostname>kvm-50-205</hostname><templateid>4</templateid><templatename>CentOS 5.5(64-bit) no GUI (KVM)</templatename><templatedisplaytext>CentOS 5.5(64-bit) no GUI (KVM)</templatedisplaytext><passwordenabled>false</passwordenabled><serviceofferingid>1</serviceofferingid><serviceofferingname>Small Instance</serviceofferingname><cpunumber>1</cpunumber><cpuspeed>500</cpuspeed><memory>512</memory><guestosid>112</guestosid><rootdeviceid>0</rootdeviceid><rootdevicetype>NetworkFilesystem</rootdevicetype><nic><id>380</id><networkid>203</networkid><netmask>255.255.255.0</netmask><gateway>65.19.181.1</gateway><ipaddress>65.19.181.110</ipaddress><isolationuri>vlan://65</isolationuri><broadcasturi>vlan://65</broadcasturi><traffictype>Guest</traffictype><type>Direct</type><isdefault>true</isdefault><macaddress>06:52:da:00:00:08</macaddress></nic><hypervisor>KVM</hypervisor></virtualmachine></deployVirtualMachineResponse>'
|
||||
|
|
|
|||
|
|
@ -39,7 +39,8 @@ class cloudstackTestClient(object):
|
|||
def getApiClient(self):
|
||||
return self.apiClient
|
||||
|
||||
def submitCmdsAndWait(self, cmds, workers=10):
|
||||
'''FixME, httplib has issue if more than one thread submitted'''
|
||||
def submitCmdsAndWait(self, cmds, workers=1):
|
||||
if self.asyncJobMgr is None:
|
||||
self.asyncJobMgr = asyncJobMgr.asyncJobMgr(self.apiClient, self.dbConnection)
|
||||
return self.asyncJobMgr.submitCmdsAndWait(cmds, workers)
|
||||
|
|
|
|||
|
|
@ -1,57 +1,7 @@
|
|||
import json
|
||||
import os
|
||||
from optparse import OptionParser
|
||||
class Struct:
|
||||
'''The recursive class for building and representing objects with.'''
|
||||
def __init__(self, obj):
|
||||
for k in obj:
|
||||
v = obj[k]
|
||||
if isinstance(v, dict):
|
||||
setattr(self, k, Struct(v))
|
||||
elif isinstance(v, (list, tuple)):
|
||||
setattr(self, k, [Struct(elem) for elem in v])
|
||||
else:
|
||||
setattr(self,k,v)
|
||||
def __getattr__(self, val):
|
||||
if val in self.__dict__:
|
||||
return self.__dict__[val]
|
||||
else:
|
||||
return None
|
||||
def __repr__(self):
|
||||
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v) in self.__dict__.iteritems()))
|
||||
|
||||
|
||||
def json_repr(obj):
|
||||
"""Represent instance of a class as JSON.
|
||||
Arguments:
|
||||
obj -- any object
|
||||
Return:
|
||||
String that reprent JSON-encoded object.
|
||||
"""
|
||||
def serialize(obj):
|
||||
"""Recursively walk object's hierarchy."""
|
||||
if isinstance(obj, (bool, int, long, float, basestring)):
|
||||
return obj
|
||||
elif isinstance(obj, dict):
|
||||
obj = obj.copy()
|
||||
newobj = {}
|
||||
for key in obj:
|
||||
if obj[key] is not None:
|
||||
if (isinstance(obj[key], list) and len(obj[key]) == 0):
|
||||
continue
|
||||
newobj[key] = serialize(obj[key])
|
||||
|
||||
return newobj
|
||||
elif isinstance(obj, list):
|
||||
return [serialize(item) for item in obj]
|
||||
elif isinstance(obj, tuple):
|
||||
return tuple(serialize([item for item in obj]))
|
||||
elif hasattr(obj, '__dict__'):
|
||||
return serialize(obj.__dict__)
|
||||
else:
|
||||
return repr(obj) # Don't know how to handle, convert to string
|
||||
return serialize(obj)
|
||||
|
||||
import jsonHelper
|
||||
class managementServer():
|
||||
def __init__(self):
|
||||
self.mgtSvrIp = None
|
||||
|
|
@ -199,7 +149,7 @@ def describe_setup_in_basic_mode():
|
|||
p.name = "test" +str(l) + str(i)
|
||||
p.gateway = "192.168.%d.1"%i
|
||||
p.netmask = "255.255.255.0"
|
||||
p.startip = "192.168.%d.200"%i
|
||||
p.startip = "192.168.%d.150"%i
|
||||
p.endip = "192.168.%d.220"%i
|
||||
|
||||
'''add two pod guest ip ranges'''
|
||||
|
|
@ -399,10 +349,10 @@ def describe_setup_in_advanced_mode():
|
|||
def generate_setup_config(config, file=None):
|
||||
describe = config
|
||||
if file is None:
|
||||
return json.dumps(json_repr(describe))
|
||||
return json.dumps(jsonHelper.jsonDump.dump(describe))
|
||||
else:
|
||||
fp = open(file, 'w')
|
||||
json.dump(json_repr(describe), fp, indent=4)
|
||||
json.dump(jsonHelper.jsonDump.dump(describe), fp, indent=4)
|
||||
fp.close()
|
||||
|
||||
|
||||
|
|
@ -412,7 +362,7 @@ def get_setup_config(file):
|
|||
config = cloudstackConfiguration()
|
||||
fp = open(file, 'r')
|
||||
config = json.load(fp)
|
||||
return Struct(config)
|
||||
return jsonHelper.jsonLoader(config)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = OptionParser()
|
||||
|
|
|
|||
|
|
@ -0,0 +1,169 @@
|
|||
import cloudstackException
|
||||
import json
|
||||
import inspect
|
||||
from cloudstackAPI import *
|
||||
class jsonLoader:
|
||||
'''The recursive class for building and representing objects with.'''
|
||||
def __init__(self, obj):
|
||||
for k in obj:
|
||||
v = obj[k]
|
||||
if isinstance(v, dict):
|
||||
setattr(self, k, jsonLoader(v))
|
||||
elif isinstance(v, (list, tuple)):
|
||||
setattr(self, k, [jsonLoader(elem) for elem in v])
|
||||
else:
|
||||
setattr(self,k,v)
|
||||
def __getattr__(self, val):
|
||||
if val in self.__dict__:
|
||||
return self.__dict__[val]
|
||||
else:
|
||||
return None
|
||||
def __repr__(self):
|
||||
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v) in self.__dict__.iteritems()))
|
||||
def __str__(self):
|
||||
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v) in self.__dict__.iteritems()))
|
||||
|
||||
|
||||
class jsonDump:
|
||||
@staticmethod
|
||||
def __serialize(obj):
|
||||
"""Recursively walk object's hierarchy."""
|
||||
if isinstance(obj, (bool, int, long, float, basestring)):
|
||||
return obj
|
||||
elif isinstance(obj, dict):
|
||||
obj = obj.copy()
|
||||
newobj = {}
|
||||
for key in obj:
|
||||
if obj[key] is not None:
|
||||
if (isinstance(obj[key], list) and len(obj[key]) == 0):
|
||||
continue
|
||||
newobj[key] = jsonDump.__serialize(obj[key])
|
||||
|
||||
return newobj
|
||||
elif isinstance(obj, list):
|
||||
return [jsonDump.__serialize(item) for item in obj]
|
||||
elif isinstance(obj, tuple):
|
||||
return tuple(jsonDump.__serialize([item for item in obj]))
|
||||
elif hasattr(obj, '__dict__'):
|
||||
return jsonDump.__serialize(obj.__dict__)
|
||||
else:
|
||||
return repr(obj) # Don't know how to handle, convert to string
|
||||
|
||||
@staticmethod
|
||||
def dump(obj):
|
||||
return jsonDump.__serialize(obj)
|
||||
|
||||
def getclassFromName(cmd, name):
|
||||
module = inspect.getmodule(cmd)
|
||||
return getattr(module, name)()
|
||||
|
||||
def finalizeResultObj(result, responseName, responsecls):
|
||||
if responsecls is None and responseName.endswith("response") and responseName != "queryasyncjobresultresponse":
|
||||
'''infer the response class from the name'''
|
||||
moduleName = responseName.replace("response", "")
|
||||
try:
|
||||
responsecls = getclassFromName(moduleName, responseName)
|
||||
except:
|
||||
pass
|
||||
|
||||
if responseName is not None and responseName == "queryasyncjobresultresponse" and responsecls is not None and result.jobresult is not None:
|
||||
result.jobresult = finalizeResultObj(result.jobresult, None, responsecls)
|
||||
return result
|
||||
elif responsecls is not None:
|
||||
for k,v in result.__dict__.iteritems():
|
||||
if k in responsecls.__dict__:
|
||||
return result
|
||||
|
||||
attr = result.__dict__.keys()[0]
|
||||
|
||||
value = getattr(result, attr)
|
||||
if not isinstance(value, jsonLoader):
|
||||
return result
|
||||
|
||||
findObj = False
|
||||
for k,v in value.__dict__.iteritems():
|
||||
if k in responsecls.__dict__:
|
||||
findObj = True
|
||||
break
|
||||
if findObj:
|
||||
return value
|
||||
else:
|
||||
return result
|
||||
else:
|
||||
return result
|
||||
|
||||
|
||||
|
||||
def getResultObj(returnObj, responsecls=None):
|
||||
returnObj = json.loads(returnObj)
|
||||
|
||||
if len(returnObj) == 0:
|
||||
return None
|
||||
responseName = returnObj.keys()[0]
|
||||
|
||||
response = returnObj[responseName]
|
||||
if len(response) == 0:
|
||||
return None
|
||||
|
||||
result = jsonLoader(response)
|
||||
if result.errorcode is not None:
|
||||
errMsg = "errorCode: %s, errorText:%s"%(result.errorcode, result.errortext)
|
||||
raise cloudstackException.cloudstackAPIException(responseName.replace("response", ""), errMsg)
|
||||
|
||||
if result.count is not None:
|
||||
for key in result.__dict__.iterkeys():
|
||||
if key == "count":
|
||||
continue
|
||||
else:
|
||||
return getattr(result, key)
|
||||
else:
|
||||
return finalizeResultObj(result, responseName, responsecls)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
result = '{ "listzonesresponse" : { "count":1 ,"zone" : [ {"id":1,"name":"test0","dns1":"8.8.8.8","dns2":"4.4.4.4","internaldns1":"192.168.110.254","internaldns2":"192.168.110.253","networktype":"Basic","securitygroupsenabled":true,"allocationstate":"Enabled","zonetoken":"5e818a11-6b00-3429-9a07-e27511d3169a","dhcpprovider":"DhcpServer"} ] } }'
|
||||
zones = getResultObj(result)
|
||||
print zones[0].id
|
||||
res = authorizeSecurityGroupIngress.authorizeSecurityGroupIngressResponse()
|
||||
result = '{ "queryasyncjobresultresponse" : {"jobid":10,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"securitygroup":{"id":1,"name":"default","description":"Default Security Group","account":"admin","domainid":1,"domain":"ROOT","ingressrule":[{"ruleid":1,"protocol":"tcp","startport":22,"endport":22,"securitygroupname":"default","account":"a"},{"ruleid":2,"protocol":"tcp","startport":22,"endport":22,"securitygroupname":"default","account":"b"}]}}} }'
|
||||
asynJob = getResultObj(result, res)
|
||||
print asynJob.jobid, repr(asynJob.jobresult)
|
||||
print asynJob.jobresult.ingressrule[0].account
|
||||
|
||||
result = '{ "queryasyncjobresultresponse" : {"errorcode" : 431, "errortext" : "Unable to execute API command queryasyncjobresultresponse due to missing parameter jobid"} }'
|
||||
try:
|
||||
asynJob = getResultObj(result)
|
||||
except cloudstackException.cloudstackAPIException, e:
|
||||
print e
|
||||
|
||||
result = '{ "queryasyncjobresultresponse" : {} }'
|
||||
asynJob = getResultObj(result)
|
||||
print asynJob
|
||||
|
||||
result = '{}'
|
||||
asynJob = getResultObj(result)
|
||||
print asynJob
|
||||
|
||||
result = '{ "createzoneresponse" : { "zone" : {"id":1,"name":"test0","dns1":"8.8.8.8","dns2":"4.4.4.4","internaldns1":"192.168.110.254","internaldns2":"192.168.110.253","networktype":"Basic","securitygroupsenabled":true,"allocationstate":"Enabled","zonetoken":"3442f287-e932-3111-960b-514d1f9c4610","dhcpprovider":"DhcpServer"} } }'
|
||||
res = createZone.createZoneResponse()
|
||||
zone = getResultObj(result, res)
|
||||
print zone.id
|
||||
|
||||
result = '{ "attachvolumeresponse" : {"jobid":24} }'
|
||||
res = attachVolume.attachVolumeResponse()
|
||||
res = getResultObj(result, res)
|
||||
print res
|
||||
|
||||
result = '{ "listtemplatesresponse" : { } }'
|
||||
print getResultObj(result, listTemplates.listTemplatesResponse())
|
||||
|
||||
result = '{ "queryasyncjobresultresponse" : {"jobid":34,"jobstatus":2,"jobprocstatus":0,"jobresultcode":530,"jobresulttype":"object","jobresult":{"errorcode":431,"errortext":"Please provide either a volume id, or a tuple(device id, instance id)"}} }'
|
||||
print getResultObj(result, listTemplates.listTemplatesResponse())
|
||||
result = '{ "queryasyncjobresultresponse" : {"jobid":41,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"virtualmachine":{"id":37,"name":"i-2-37-TEST","displayname":"i-2-37-TEST","account":"admin","domainid":1,"domain":"ROOT","created":"2011-08-25T11:13:42-0700","state":"Running","haenable":false,"zoneid":1,"zonename":"test0","hostid":5,"hostname":"SimulatedAgent.1e629060-f547-40dd-b792-57cdc4b7d611","templateid":10,"templatename":"CentOS 5.3(64-bit) no GUI (Simulator)","templatedisplaytext":"CentOS 5.3(64-bit) no GUI (Simulator)","passwordenabled":false,"serviceofferingid":7,"serviceofferingname":"Small Instance","cpunumber":1,"cpuspeed":500,"memory":512,"guestosid":11,"rootdeviceid":0,"rootdevicetype":"NetworkFilesystem","securitygroup":[{"id":1,"name":"default","description":"Default Security Group"}],"nic":[{"id":43,"networkid":204,"netmask":"255.255.255.0","gateway":"192.168.1.1","ipaddress":"192.168.1.27","isolationuri":"ec2://untagged","broadcasturi":"vlan://untagged","traffictype":"Guest","type":"Direct","isdefault":true,"macaddress":"06:56:b8:00:00:53"}],"hypervisor":"Simulator"}}} }'
|
||||
vm = getResultObj(result, deployVirtualMachine.deployVirtualMachineResponse())
|
||||
print vm.jobresult.id
|
||||
|
||||
cmd = deployVirtualMachine.deployVirtualMachineCmd()
|
||||
responsename = cmd.__class__.__name__.replace("Cmd", "Response")
|
||||
response = getclassFromName(cmd, responsename)
|
||||
print response.id
|
||||
|
|
@ -10,7 +10,7 @@ class TestCase1(cloudstackTestCase):
|
|||
listtmcmd.templatefilter = "featured"
|
||||
listtmresponse = apiClient.listTemplates(listtmcmd)
|
||||
if listtmresponse is not None and len(listtmresponse) > 0:
|
||||
self.debug(listtmresponse[0].isready)
|
||||
self.debug(listtmresponse)
|
||||
self.debug("we are here")
|
||||
else:
|
||||
self.debug("we are there")
|
||||
|
|
|
|||
|
|
@ -7,5 +7,17 @@ class TestCase2(cloudstackTestCase):
|
|||
listtmcmd.id = 10
|
||||
listtmcmd.zoneid = 1
|
||||
listtmcmd.templatefilter = "featured"
|
||||
listtmresponse = apiClient.listTemplates(listtmcmd)
|
||||
self.debug(listtmresponse[0].isready)
|
||||
#listtmresponse = apiClient.listTemplates(listtmcmd)
|
||||
#self.debug(listtmresponse[0].isready)
|
||||
|
||||
listhostcmd=listHosts.listHostsCmd()
|
||||
listhostcmd.zoneid=1
|
||||
listhostcmd.type="Routing"
|
||||
|
||||
asyncJobResult=self.testClient.submitCmdsAndWait([listhostcmd],1)
|
||||
listVMresponse = asyncJobResult[0].result
|
||||
self.debug("Total Number of Hosts: " + str(len(listVMresponse)))
|
||||
|
||||
for i in listVMresponse:
|
||||
self.debug("id: " + str(i.id) +" pod id: " + str(i.podid) +" host tag: " + str(i.hosttags))
|
||||
|
||||
|
|
@ -21,11 +21,11 @@ class TestCase1(cloudstackTestCase):
|
|||
tmpls = self.testClient.getApiClient().listTemplates(listtmplcmd)
|
||||
if tmpls is not None:
|
||||
for tmpl in tmpls:
|
||||
if tmpl.isready == "true":
|
||||
if tmpl.isready:
|
||||
self.templateId = tmpl.id
|
||||
self.zoneId = tmpl.zoneid
|
||||
break
|
||||
|
||||
|
||||
def test_cloudstackapi(self):
|
||||
apiClient = self.testClient.getApiClient()
|
||||
|
||||
|
|
@ -52,14 +52,34 @@ class TestCase1(cloudstackTestCase):
|
|||
'''
|
||||
cidrlist = ["192.168.1.1/24", "10.1.1.1/24"]
|
||||
securitygroup.cidrlist = cidrlist
|
||||
apiClient.authorizeSecurityGroupIngress(securitygroup)
|
||||
|
||||
try:
|
||||
apiClient.authorizeSecurityGroupIngress(securitygroup)
|
||||
except:
|
||||
pass
|
||||
'''
|
||||
createvm = deployVirtualMachine.deployVirtualMachineCmd()
|
||||
createvm.serviceofferingid = self.svid
|
||||
createvm.templateid = self.templateId
|
||||
createvm.zoneid = self.zoneId
|
||||
vm = apiClient.deployVirtualMachine(createvm)
|
||||
vmId = vm.id
|
||||
'''
|
||||
vmId = 1
|
||||
vmcmds = []
|
||||
for i in range(10):
|
||||
createvm = deployVirtualMachine.deployVirtualMachineCmd()
|
||||
createvm.serviceofferingid = self.svid
|
||||
createvm.templateid = self.templateId
|
||||
createvm.zoneid = self.zoneId
|
||||
vmcmds.append(createvm)
|
||||
|
||||
result = self.testClient.submitCmdsAndWait(vmcmds, 5)
|
||||
for jobstatus in result:
|
||||
if jobstatus.status == 1:
|
||||
self.debug(jobstatus.result.id)
|
||||
self.debug(jobstatus.result.displayname)
|
||||
else:
|
||||
self.debug(jobstatus.result)
|
||||
|
||||
creatvolume = createVolume.createVolumeCmd()
|
||||
creatvolume.name = "tetst" + str(uuid.uuid4())
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ if __name__ == "__main__":
|
|||
|
||||
asyncJobResult = testclient.submitCmdsAndWait(cmds, 6)
|
||||
|
||||
for handle, jobStatus in asyncJobResult.iteritems():
|
||||
for jobStatus in asyncJobResult:
|
||||
if jobStatus.status:
|
||||
print jobStatus.result[0].id, jobStatus.result[0].templatename, jobStatus.startTime, jobStatus.endTime
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ a:hover {
|
|||
width:181px;
|
||||
height:66px;
|
||||
float:left;
|
||||
background:url(../images/login_logo.gif) no-repeat top left;
|
||||
background:url(../images/login_logo.png) no-repeat top left;
|
||||
margin:0 0 0 75px;
|
||||
display:inline;
|
||||
padding:0;
|
||||
|
|
@ -1391,7 +1391,7 @@ a:hover {
|
|||
width:106px;
|
||||
height:37px;
|
||||
float:left;
|
||||
background:url(../images/logo.gif) no-repeat top left;
|
||||
background:url(../images/logo.png) no-repeat top left;
|
||||
margin:0 0 0 8px;
|
||||
display:inline;
|
||||
padding:0;
|
||||
|
|
@ -3261,7 +3261,7 @@ a:hover.search_button {
|
|||
width:132px;
|
||||
height:22px;
|
||||
float:right;
|
||||
background:url(../images/poweredby.gif) no-repeat top left;
|
||||
/*background:url(../images/poweredby.gif) no-repeat top left;*/
|
||||
margin:2px 10px 0 0;
|
||||
padding:0;
|
||||
display:inline;
|
||||
|
|
|
|||
Binary file not shown.
|
Before Width: | Height: | Size: 6.1 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 5.0 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 3.3 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 3.1 KiB |
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
<script language="javascript">
|
||||
dictionary = {
|
||||
'label.action.edit.host' : '<fmt:message key="label.action.edit.host"/>',
|
||||
'label.action.enable.maintenance.mode' : '<fmt:message key="label.action.enable.maintenance.mode"/>',
|
||||
'label.action.enable.maintenance.mode.processing' : '<fmt:message key="label.action.enable.maintenance.mode.processing"/>',
|
||||
'message.action.host.enable.maintenance.mode' : '<fmt:message key="message.action.host.enable.maintenance.mode"/>',
|
||||
|
|
@ -120,6 +121,8 @@ dictionary = {
|
|||
<div class="grid_row_cell" style="width: 79%;">
|
||||
<div class="row_celltitles" id="hosttags">
|
||||
</div>
|
||||
<input class="text" id="hosttags_edit" style="width: 200px; display: none;" type="text" />
|
||||
<div id="hosttags_edit_errormsg" style="display:none"></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="grid_rows odd">
|
||||
|
|
@ -189,7 +192,9 @@ dictionary = {
|
|||
</div>
|
||||
<div class="grid_row_cell" style="width: 79%;">
|
||||
<div class="row_celltitles" id="oscategoryname">
|
||||
</div>
|
||||
</div>
|
||||
<select class="select" id="os_dropdown" style="width: 202px; display: none;">
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="grid_rows even">
|
||||
|
|
@ -202,7 +207,14 @@ dictionary = {
|
|||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="grid_botactionpanel">
|
||||
<div class="gridbot_buttons" id="save_button" style="display:none;"><fmt:message key="label.save"/></div>
|
||||
<div class="gridbot_buttons" id="cancel_button" style="display:none;"><fmt:message key="label.cancel"/></div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<!-- Details tab (end)-->
|
||||
|
|
|
|||
|
|
@ -61,7 +61,25 @@ function afterLoadHostJSP() {
|
|||
var tabContentArray = [$("#tab_content_details"), $("#tab_content_instance"), $("#tab_content_router"), $("#tab_content_systemvm"), $("#tab_content_statistics")];
|
||||
var afterSwitchFnArray = [hostJsonToDetailsTab, hostJsonToInstanceTab, hostJsonToRouterTab, hostJsonToSystemvmTab, hostJsonToStatisticsTab];
|
||||
switchBetweenDifferentTabs(tabArray, tabContentArray, afterSwitchFnArray);
|
||||
|
||||
|
||||
$readonlyFields = $("#tab_content_details").find("#hosttags,#oscategoryname");
|
||||
$editFields = $("#tab_content_details").find("#hosttags_edit,#os_dropdown");
|
||||
|
||||
$.ajax({
|
||||
data: createURL("command=listOsCategories"),
|
||||
dataType: "json",
|
||||
success: function(json) {
|
||||
var categories = json.listoscategoriesresponse.oscategory;
|
||||
var $dropdown = $("#tab_content_details").find("#os_dropdown").empty();
|
||||
$dropdown.append("<option value=''>None</option>");
|
||||
if (categories != null && categories.length > 0) {
|
||||
for (var i = 0; i < categories.length; i++) {
|
||||
$dropdown.append("<option value='" + categories[i].id + "'>" + fromdb(categories[i].name) + "</option>");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
hostRefreshDataBinding();
|
||||
}
|
||||
|
||||
|
|
@ -203,43 +221,45 @@ function hostBuildActionMenu(jsonObj, $thisTab, $midmenuItem1) {
|
|||
$actionMenu.find("#action_list").empty();
|
||||
var noAvailableActions = true;
|
||||
|
||||
if (jsonObj.state == 'Up' || jsonObj.state == "Connecting") {
|
||||
if (jsonObj.state == 'Up' || jsonObj.state == "Connecting") {
|
||||
buildActionLinkForTab("label.action.edit.host", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.enable.maintenance.mode", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.force.reconnect", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.update.OS.preference", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.force.reconnect", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.update.OS.preference", hostActionMap, $actionMenu, $midmenuItem1, $thisTab); //temp
|
||||
noAvailableActions = false;
|
||||
}
|
||||
else if(jsonObj.state == 'Down') {
|
||||
buildActionLinkForTab("label.action.enable.maintenance.mode", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.update.OS.preference", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
else if(jsonObj.state == 'Down') {
|
||||
buildActionLinkForTab("label.action.edit.host", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.enable.maintenance.mode", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.remove.host", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
noAvailableActions = false;
|
||||
}
|
||||
else if(jsonObj.state == "Alert") {
|
||||
buildActionLinkForTab("label.action.update.OS.preference", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.edit.host", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.remove.host", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
noAvailableActions = false;
|
||||
|
||||
}
|
||||
else if (jsonObj.state == "ErrorInMaintenance") {
|
||||
else if (jsonObj.state == "ErrorInMaintenance") {
|
||||
buildActionLinkForTab("label.action.edit.host", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.enable.maintenance.mode", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.cancel.maintenance.mode", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.update.OS.preference", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
|
||||
noAvailableActions = false;
|
||||
}
|
||||
else if (jsonObj.state == "PrepareForMaintenance") {
|
||||
buildActionLinkForTab("label.action.cancel.maintenance.mode", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.update.OS.preference", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
else if (jsonObj.state == "PrepareForMaintenance") {
|
||||
buildActionLinkForTab("label.action.edit.host", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.cancel.maintenance.mode", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
noAvailableActions = false;
|
||||
}
|
||||
else if (jsonObj.state == "Maintenance") {
|
||||
buildActionLinkForTab("label.action.cancel.maintenance.mode", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.update.OS.preference", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
else if (jsonObj.state == "Maintenance") {
|
||||
buildActionLinkForTab("label.action.edit.host", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.cancel.maintenance.mode", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.remove.host", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
noAvailableActions = false;
|
||||
}
|
||||
else if (jsonObj.state == "Disconnected"){
|
||||
buildActionLinkForTab("label.action.update.OS.preference", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.edit.host", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
buildActionLinkForTab("label.action.remove.host", hostActionMap, $actionMenu, $midmenuItem1, $thisTab);
|
||||
noAvailableActions = false;
|
||||
}
|
||||
|
|
@ -548,6 +568,9 @@ function populateForUpdateOSDialog(oscategoryid) {
|
|||
|
||||
|
||||
var hostActionMap = {
|
||||
"label.action.edit.host": {
|
||||
dialogBeforeActionFn: doEditHost
|
||||
},
|
||||
"label.action.enable.maintenance.mode": {
|
||||
isAsyncJob: true,
|
||||
asyncJobResponse: "preparehostformaintenanceresponse",
|
||||
|
|
@ -604,6 +627,59 @@ var hostActionMap = {
|
|||
}
|
||||
}
|
||||
|
||||
function doEditHost($actionLink, $detailsTab, $midmenuItem1) {
|
||||
var jsonObj = $midmenuItem1.data("jsonObj");
|
||||
$detailsTab.find("#os_dropdown").val(jsonObj.oscategoryid);
|
||||
|
||||
$readonlyFields.hide();
|
||||
$editFields.show();
|
||||
$detailsTab.find("#cancel_button, #save_button").show();
|
||||
|
||||
$detailsTab.find("#cancel_button").unbind("click").bind("click", function(event){
|
||||
cancelEditMode($detailsTab);
|
||||
return false;
|
||||
});
|
||||
$detailsTab.find("#save_button").unbind("click").bind("click", function(event){
|
||||
doEditHost2($actionLink, $detailsTab, $midmenuItem1, $readonlyFields, $editFields);
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
function doEditHost2($actionLink, $detailsTab, $midmenuItem1, $readonlyFields, $editFields) {
|
||||
var isValid = true;
|
||||
isValid &= validateString("Host Tags", $detailsTab.find("#hosttags_edit"), $detailsTab.find("#hosttags_edit_errormsg"), true); //optional
|
||||
if (!isValid)
|
||||
return;
|
||||
|
||||
var jsonObj = $midmenuItem1.data("jsonObj");
|
||||
var id = jsonObj.id;
|
||||
|
||||
var array1 = [];
|
||||
array1.push("&id="+id);
|
||||
|
||||
var hosttags = $detailsTab.find("#hosttags_edit").val();
|
||||
array1.push("&hosttags="+todb(hosttags));
|
||||
|
||||
var osCategoryId = $detailsTab.find("#os_dropdown").val();
|
||||
if (osCategoryId != null && osCategoryId.length > 0)
|
||||
array1.push("&osCategoryId="+osCategoryId);
|
||||
|
||||
$.ajax({
|
||||
data: createURL("command=updateHost"+array1.join("")),
|
||||
dataType: "json",
|
||||
async: false,
|
||||
success: function(json) {
|
||||
var jsonObj = json.updatehostresponse.host;
|
||||
hostToMidmenu(jsonObj, $midmenuItem1);
|
||||
hostToRightPanel($midmenuItem1);
|
||||
|
||||
$editFields.hide();
|
||||
$readonlyFields.show();
|
||||
$("#save_button, #cancel_button").hide();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function doEnableMaintenanceMode($actionLink, $detailsTab, $midmenuItem1){
|
||||
var jsonObj = $midmenuItem1.data("jsonObj");
|
||||
|
||||
|
|
|
|||
|
|
@ -81,6 +81,7 @@ import com.vmware.vim25.VirtualMachineSnapshotInfo;
|
|||
import com.vmware.vim25.VirtualMachineSnapshotTree;
|
||||
import com.vmware.vim25.VirtualPCIController;
|
||||
import com.vmware.vim25.VirtualSCSIController;
|
||||
import com.vmware.vim25.VirtualSCSISharing;
|
||||
|
||||
public class VirtualMachineMO extends BaseMO {
|
||||
private static final Logger s_logger = Logger.getLogger(VirtualMachineMO.class);
|
||||
|
|
@ -1526,7 +1527,43 @@ public class VirtualMachineMO extends BaseMO {
|
|||
assert(false);
|
||||
throw new Exception("SCSI Controller Not Found");
|
||||
}
|
||||
|
||||
|
||||
public int getScsiDeviceControllerKeyNoException() throws Exception {
|
||||
VirtualDevice[] devices = (VirtualDevice [])_context.getServiceUtil().
|
||||
getDynamicProperty(_mor, "config.hardware.device");
|
||||
|
||||
if(devices != null && devices.length > 0) {
|
||||
for(VirtualDevice device : devices) {
|
||||
if(device instanceof VirtualLsiLogicController) {
|
||||
return device.getKey();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
public void ensureScsiDeviceController() throws Exception {
|
||||
int scsiControllerKey = getScsiDeviceControllerKeyNoException();
|
||||
if(scsiControllerKey < 0) {
|
||||
VirtualMachineConfigSpec vmConfig = new VirtualMachineConfigSpec();
|
||||
|
||||
// Scsi controller
|
||||
VirtualLsiLogicController scsiController = new VirtualLsiLogicController();
|
||||
scsiController.setSharedBus(VirtualSCSISharing.noSharing);
|
||||
scsiController.setBusNumber(0);
|
||||
scsiController.setKey(1);
|
||||
VirtualDeviceConfigSpec scsiControllerSpec = new VirtualDeviceConfigSpec();
|
||||
scsiControllerSpec.setDevice(scsiController);
|
||||
scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.add);
|
||||
|
||||
vmConfig.setDeviceChange(new VirtualDeviceConfigSpec[] { scsiControllerSpec });
|
||||
if(configureVm(vmConfig)) {
|
||||
throw new Exception("Unable to add Scsi controller");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// return pair of VirtualDisk and disk device bus name(ide0:0, etc)
|
||||
public Pair<VirtualDisk, String> getDiskDevice(String vmdkDatastorePath, boolean matchExactly) throws Exception {
|
||||
VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil().getDynamicProperty(_mor, "config.hardware.device");
|
||||
|
|
|
|||
Loading…
Reference in New Issue