diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java index f154a46e034..f7d82d34d0d 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java @@ -24,7 +24,6 @@ import com.cloud.agent.api.baremetal.IpmISetBootDevCommand.BootDev; import com.cloud.baremetal.database.BaremetalPxeDao; import com.cloud.baremetal.database.BaremetalPxeVO; import com.cloud.baremetal.networkservice.BaremetalPxeManager.BaremetalPxeType; -import com.cloud.configuration.Config; import com.cloud.dc.DataCenter; import com.cloud.deploy.DeployDestination; import com.cloud.exception.AgentUnavailableException; @@ -238,7 +237,8 @@ public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase imple throw new CloudRuntimeException(String.format("failed preparing PXE in virtual router[id:%s], because %s", vr.getId(), ret.second())); } - String internalServerIp = _configDao.getValue(Config.BaremetalInternalStorageServer.key()); + //String internalServerIp = _configDao.getValue(Config.BaremetalInternalStorageServer.key()); + String internalServerIp = "10.223.110.231"; ret = SshHelper.sshExecute(mgmtNic.getIp4Address(), 3922, "root", getSystemVMKeyFile(), null, String.format("/usr/bin/baremetal_snat.sh %s %s", mgmtNic.getIp4Address(), internalServerIp) ); diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java index ab8eae90288..b314bdbef45 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java @@ -19,6 +19,7 @@ package com.cloud.baremetal.networkservice; import com.cloud.baremetal.database.BaremetalPxeVO; +import com.cloud.dc.DataCenter; import com.cloud.dc.Pod; import com.cloud.deploy.DeployDestination; import com.cloud.exception.ConcurrentOperationException; @@ -99,6 +100,10 @@ public class BaremetalPxeElement extends AdapterBase implements NetworkElement { @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { + if (dest.getDataCenter().getNetworkType() == DataCenter.NetworkType.Advanced){ + return true; + } + if (offering.isSystemOnly() || !canHandle(dest, offering.getTrafficType(), network.getGuestType())) { s_logger.debug("BaremetalPxeElement can not handle network offering: " + offering.getName()); return false; diff --git a/scripts/network/ping/baremetal_snat.sh b/scripts/network/ping/baremetal_snat.sh new file mode 100755 index 00000000000..4cbf20eec00 --- /dev/null +++ b/scripts/network/ping/baremetal_snat.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set +u + +mgmt_nic_ip=$1 +internal_server_ip=$2 + +ip route | grep "$internal_server_ip" > /dev/null + +if [ $? -ne 0 ]; then + ip route add $internal_server_ip via $mgmt_nic_ip +fi + +iptables-save | grep -- "-A POSTROUTING -d $internal_server_ip" > /dev/null + +if [ $? -ne 0 ]; then + iptables -t nat -A POSTROUTING -d $internal_server_ip -j SNAT --to-source $mgmt_nic_ip +fi \ No newline at end of file diff --git a/scripts/network/ping/prepare_pxe.sh b/scripts/network/ping/prepare_pxe.sh new file mode 100755 index 00000000000..7b31c7f9fb2 --- /dev/null +++ b/scripts/network/ping/prepare_pxe.sh @@ -0,0 +1,58 @@ +#!/bin/sh +set +u + +err_exit() { + echo $1 + exit 1 +} + +success() { + exit 0 +} + +TFTP_ROOT='/opt/tftpboot' +PXELINUX_CFG_DIR='/opt/tftpboot/pxelinux.cfg' + +kernel_nfs_path=$1 +kernel_file_name=`basename $kernel_nfs_path` +initrd_nfs_path=$2 +initrd_file_name=`basename $initrd_nfs_path` +tmpt_uuid=$3 +pxe_cfg_filename=$4 +ks_file=$5 + +tmpt_dir=$TFTP_ROOT/$tmpt_uuid +if [ -d $tmpt_dir ]; then + success +fi + +mkdir -p $tmpt_dir + +kernel_path=$tmpt_uuid/$kernel_file_name +initrd_path=$tmpt_uuid/$initrd_file_name + +mnt_path=/tmp/$(uuid) + +mkdir -p $mnt_path +mount `dirname $kernel_nfs_path` $mnt_path +cp -f $mnt_path/$kernel_file_name $tmpt_dir/$kernel_file_name +umount $mnt_path + +mount `dirname $initrd_nfs_path` $mnt_path +cp -f $mnt_path/$initrd_file_name $tmpt_dir/$initrd_file_name +umount $mnt_path + +cat > $PXELINUX_CFG_DIR/$pxe_cfg_filename < { host.getClusterId()); } + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + + if (vm.getHypervisorType() == HypervisorType.BareMetal) { + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap()); + s_logger.debug("Returning Deployment Destination: " + dest); + return dest; + } + // search for storage under the zone, pod, cluster of the host. DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, @@ -335,8 +344,8 @@ StateListener { suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes); if (potentialResources != null) { - Pod pod = _podDao.findById(host.getPodId()); - Cluster cluster = _clusterDao.findById(host.getClusterId()); + pod = _podDao.findById(host.getPodId()); + cluster = _clusterDao.findById(host.getClusterId()); Map storageVolMap = potentialResources.second(); // remove the reused vol<->pool from destination, since // we don't have to prepare this volume.