CLOUDSTACK-6278

Baremetal Advanced Networking support
This commit is contained in:
Frank.Zhang 2014-08-04 15:00:21 -07:00
parent 44dff6c426
commit 1ee7e0c77e
5 changed files with 94 additions and 4 deletions

View File

@ -24,7 +24,6 @@ import com.cloud.agent.api.baremetal.IpmISetBootDevCommand.BootDev;
import com.cloud.baremetal.database.BaremetalPxeDao;
import com.cloud.baremetal.database.BaremetalPxeVO;
import com.cloud.baremetal.networkservice.BaremetalPxeManager.BaremetalPxeType;
import com.cloud.configuration.Config;
import com.cloud.dc.DataCenter;
import com.cloud.deploy.DeployDestination;
import com.cloud.exception.AgentUnavailableException;
@ -238,7 +237,8 @@ public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase imple
throw new CloudRuntimeException(String.format("failed preparing PXE in virtual router[id:%s], because %s", vr.getId(), ret.second()));
}
String internalServerIp = _configDao.getValue(Config.BaremetalInternalStorageServer.key());
//String internalServerIp = _configDao.getValue(Config.BaremetalInternalStorageServer.key());
String internalServerIp = "10.223.110.231";
ret = SshHelper.sshExecute(mgmtNic.getIp4Address(), 3922, "root", getSystemVMKeyFile(), null,
String.format("/usr/bin/baremetal_snat.sh %s %s", mgmtNic.getIp4Address(), internalServerIp)
);

View File

@ -19,6 +19,7 @@
package com.cloud.baremetal.networkservice;
import com.cloud.baremetal.database.BaremetalPxeVO;
import com.cloud.dc.DataCenter;
import com.cloud.dc.Pod;
import com.cloud.deploy.DeployDestination;
import com.cloud.exception.ConcurrentOperationException;
@ -99,6 +100,10 @@ public class BaremetalPxeElement extends AdapterBase implements NetworkElement {
@Override
public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException,
ResourceUnavailableException, InsufficientCapacityException {
if (dest.getDataCenter().getNetworkType() == DataCenter.NetworkType.Advanced){
return true;
}
if (offering.isSystemOnly() || !canHandle(dest, offering.getTrafficType(), network.getGuestType())) {
s_logger.debug("BaremetalPxeElement can not handle network offering: " + offering.getName());
return false;

View File

@ -0,0 +1,18 @@
#!/bin/bash
set +u
mgmt_nic_ip=$1
internal_server_ip=$2
ip route | grep "$internal_server_ip" > /dev/null
if [ $? -ne 0 ]; then
ip route add $internal_server_ip via $mgmt_nic_ip
fi
iptables-save | grep -- "-A POSTROUTING -d $internal_server_ip" > /dev/null
if [ $? -ne 0 ]; then
iptables -t nat -A POSTROUTING -d $internal_server_ip -j SNAT --to-source $mgmt_nic_ip
fi

View File

@ -0,0 +1,58 @@
#!/bin/sh
set +u
err_exit() {
echo $1
exit 1
}
success() {
exit 0
}
TFTP_ROOT='/opt/tftpboot'
PXELINUX_CFG_DIR='/opt/tftpboot/pxelinux.cfg'
kernel_nfs_path=$1
kernel_file_name=`basename $kernel_nfs_path`
initrd_nfs_path=$2
initrd_file_name=`basename $initrd_nfs_path`
tmpt_uuid=$3
pxe_cfg_filename=$4
ks_file=$5
tmpt_dir=$TFTP_ROOT/$tmpt_uuid
if [ -d $tmpt_dir ]; then
success
fi
mkdir -p $tmpt_dir
kernel_path=$tmpt_uuid/$kernel_file_name
initrd_path=$tmpt_uuid/$initrd_file_name
mnt_path=/tmp/$(uuid)
mkdir -p $mnt_path
mount `dirname $kernel_nfs_path` $mnt_path
cp -f $mnt_path/$kernel_file_name $tmpt_dir/$kernel_file_name
umount $mnt_path
mount `dirname $initrd_nfs_path` $mnt_path
cp -f $mnt_path/$initrd_file_name $tmpt_dir/$initrd_file_name
umount $mnt_path
cat > $PXELINUX_CFG_DIR/$pxe_cfg_filename <<EOF
DEFAULT default
PROMPT 1
TIMEOUT 26
DISPLAY boot.msg
LABEL default
KERNEL $kernel_path
APPEND ramdisk_size=66000 initrd=$initrd_path ks=$ks_file
EOF
success

View File

@ -318,6 +318,15 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
host.getClusterId());
}
Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId());
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>());
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
}
// search for storage under the zone, pod, cluster of the host.
DataCenterDeployment lastPlan =
new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null,
@ -335,8 +344,8 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
suitableHosts, suitableVolumeStoragePools, avoids,
getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
if (potentialResources != null) {
Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId());
pod = _podDao.findById(host.getPodId());
cluster = _clusterDao.findById(host.getClusterId());
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// remove the reused vol<->pool from destination, since
// we don't have to prepare this volume.