after rebase, fix bugs

This commit is contained in:
edison 2010-09-29 18:35:35 -07:00
parent ad1ef80459
commit 7615f569ff
13 changed files with 70 additions and 127 deletions

View File

@ -20,7 +20,7 @@ package com.cloud.vm;
import java.util.List;
import java.util.Map;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.offering.ServiceOffering;
public class VirtualMachineProfile {
@ -28,7 +28,7 @@ public class VirtualMachineProfile {
int _cpus;
int _speed; // in mhz
long _ram; // in bytes
Hypervisor.Type _hypervisorType;
HypervisorType _hypervisorType;
VirtualMachine.Type _type;
Map<String, String> _params;
Long _templateId;
@ -79,7 +79,7 @@ public class VirtualMachineProfile {
return _disks;
}
public Hypervisor.Type getHypervisorType() {
public HypervisorType getHypervisorType() {
return _hypervisorType;
}
@ -87,7 +87,7 @@ public class VirtualMachineProfile {
return _vm;
}
public VirtualMachineProfile(long id, int core, int speed, long ram, Long templateId, Hypervisor.Type type, Map<String, String> params) {
public VirtualMachineProfile(long id, int core, int speed, long ram, Long templateId, HypervisorType type, Map<String, String> params) {
this._cpus = core;
this._speed = speed;
this._ram = ram;
@ -112,4 +112,4 @@ public class VirtualMachineProfile {
public String toString() {
return "VM-" + _type + "-" + _vm.getId();
}
}
}

View File

@ -25,6 +25,7 @@ import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.org.Cluster;
import com.cloud.utils.NumbersUtil;
@Entity

View File

@ -490,7 +490,7 @@ public interface ManagementServer {
* @return true if success, false if not
*/
VolumeVO allocVolume(long accountId, long userId, String name, long zoneId, long diskOfferingId, long startEventId, long size) throws InternalErrorException;
VolumeVO createVolume(long accountId, long userId, String name, long zoneId, long diskOfferingId, long startEventId, long size) throws InternalErrorException;
long createVolumeAsync(long userId, long accountId, String name, long zoneId, long diskOfferingId, long size) throws InvalidParameterValueException, InternalErrorException, ResourceAllocationException;
/**

View File

@ -213,7 +213,8 @@ public interface StorageManager extends Manager {
* @param size
* @return VolumeVO
*/
VolumeVO createVolume(long volumeId, HypervisorType hyperType);
public VolumeVO createVolume(VolumeVO volume, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, Long clusterId,
ServiceOfferingVO offering, DiskOfferingVO diskOffering, List<StoragePoolVO> avoids, long size, HypervisorType hyperType);
VolumeVO allocVolume(long accountId, long userId, String name, DataCenterVO dc, DiskOfferingVO diskOffering, long startEventId, long size);

View File

@ -237,9 +237,7 @@ public class VolumeVO implements Volume {
this.recreatable = false;
}
public void setState(State state) {
this.state = state;
}
public boolean isRecreatable() {
return recreatable;
@ -472,9 +470,9 @@ public class VolumeVO implements Volume {
return state;
}
public void setState(State st) {
this.state = st;
}
public void setState(State state) {
this.state = state;
}
public void setUpdated(Date updated) {
this.updated = updated;

View File

@ -43,6 +43,7 @@ import com.cloud.user.Account;
import com.cloud.utils.component.Inject;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.JoinBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.Transaction;
@ -193,7 +194,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
TmpltsInZoneSearch = createSearchBuilder();
TmpltsInZoneSearch.and("removed", TmpltsInZoneSearch.entity().getRemoved(), SearchCriteria.Op.NULL);
TmpltsInZoneSearch.join("tmpltzone", tmpltZoneSearch, tmpltZoneSearch.entity().getTemplateId(), TmpltsInZoneSearch.entity().getId());
TmpltsInZoneSearch.join("tmpltzone", tmpltZoneSearch, tmpltZoneSearch.entity().getTemplateId(), TmpltsInZoneSearch.entity().getId(), JoinBuilder.JoinType.INNER);
tmpltZoneSearch.done();
TmpltsInZoneSearch.done();

View File

@ -769,7 +769,7 @@ public class DownloadManagerImpl implements DownloadManager {
s_logger.info("createtmplt.sh found in " + createTmpltScr);
List<Processor> processors = new ArrayList<Processor>();
_processors = new Adapters<Processor>("processors", processors);
Processor processor = new VhdProcessor();
processor.configure("VHD Processor", params);
@ -787,6 +787,7 @@ public class DownloadManagerImpl implements DownloadManager {
processor.configure("VMDK Processor", params);
processors.add(processor);
_processors = new Adapters<Processor>("processors", processors);
// Add more processors here.
threadPool = Executors.newFixedThreadPool(numInstallThreads);
return true;

View File

@ -62,7 +62,7 @@ public class VolumeOperationExecutor extends BaseAsyncJobExecutor {
if (op == VolumeOp.Create) {
eventType = EventTypes.EVENT_VOLUME_CREATE;
failureDescription = "Failed to create volume";
volume = asyncMgr.getExecutorContext().getManagementServer().allocVolume(param.getUserId(), param.getAccountId(), param.getName(), param.getZoneId(), param.getDiskOfferingId(), param.getEventId(), param.getSize());
volume = asyncMgr.getExecutorContext().getManagementServer().createVolume(param.getUserId(), param.getAccountId(), param.getName(), param.getZoneId(), param.getDiskOfferingId(), param.getEventId(), param.getSize());
if (volume.getStatus() == AsyncInstanceCreateStatus.Corrupted) {
asyncMgr.completeAsyncJob(getJob().getId(), AsyncJobResult.STATUS_FAILED, BaseCmd.INTERNAL_ERROR, "Failed to create volume.");
} else {

View File

@ -135,7 +135,7 @@ public enum Config {
NetworkType("Advanced", ManagementServer.class, String.class, "network.type", "vlan", "The type of network that this deployment will use.", "vlan,direct"),
LinkLocalIpNums("Advanced", ManagementServer.class, Integer.class, "linkLocalIp.nums", "10", "The number of link local ip that needed by domR(in power of 2)", null),
HypervisorDefaultType("Advanced", ManagementServer.class, String.class, "hypervisor.type", HypervisorType.KVM.toString(), "The type of hypervisor that this deployment will use.", "kvm|xenserver"),
HypervisorList("Advanced", ManagementServer.class, String.class, "hypervisor.list", HypervisorType.KVM + "," + HypervisorType.XenServer + "," + HypervisorType.VmWare, "The type of hypervisor that this deployment will use.", "kvm,xenserver,vmware"),
HypervisorList("Advanced", ManagementServer.class, String.class, "hypervisor.list", HypervisorType.KVM + "," + HypervisorType.XenServer + "," + HypervisorType.VmWare, "The list of hypervisors that this deployment will use.", "kvm,xenserver,vmware"),
ManagementHostIPAdr("Advanced", ManagementServer.class, String.class, "host", "localhost", "The ip address of management server", null),
UseSecondaryStorageVm("Advanced", ManagementServer.class, Boolean.class, "secondary.storage.vm", "false", "Deploys a VM per zone to manage secondary storage if true, otherwise secondary storage is mounted on management server", null),
EventPurgeDelay("Advanced", ManagementServer.class, Integer.class, "event.purge.delay", "0", "Events older than specified number days will be purged", null),

View File

@ -206,14 +206,6 @@ public class KvmServerDiscoverer extends DiscovererBase implements Discoverer,
return null;
}
/*Is a KVM host?*/
sshSession = sshConnection.openSession();
sshSession.execCommand("lsmod|grep kvm >& /dev/null");
if (sshSession.getExitStatus() != 0) {
s_logger.debug("It's not a KVM enabled machine");
return null;
}
if (!sshExecuteCmd(sshConnection, "lsmod|grep kvm >& /dev/null", 3)) {
s_logger.debug("It's not a KVM enabled machine");
return null;

View File

@ -1771,7 +1771,7 @@ public class ManagementServerImpl implements ManagementServer {
}
@Override
public VolumeVO allocVolume(long userId, long accountId, String name, long zoneId, long diskOfferingId, long startEventId, long size) throws InternalErrorException {
public VolumeVO createVolume(long userId, long accountId, String name, long zoneId, long diskOfferingId, long startEventId, long size) throws InternalErrorException {
saveStartedEvent(userId, accountId, EventTypes.EVENT_VOLUME_CREATE, "Creating volume", startEventId);
DataCenterVO zone = _dcDao.findById(zoneId);
DiskOfferingVO diskOffering = _diskOfferingDao.findById(diskOfferingId);

View File

@ -719,6 +719,7 @@ public class StorageManagerImpl implements StorageManager {
return new Pair<String, String>(vdiUUID, basicErrMsg);
}
@Override
@DB
public VolumeVO createVolume(VolumeVO volume, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, Long clusterId,
ServiceOfferingVO offering, DiskOfferingVO diskOffering, List<StoragePoolVO> avoids, long size, HypervisorType hyperType) {
@ -735,8 +736,6 @@ public class StorageManagerImpl implements StorageManager {
Transaction txn = Transaction.currentTxn();
VolumeType volType = volume.getVolumeType();
VolumeTO created = null;
int retry = _retry;
while (--retry >= 0) {
@ -811,7 +810,7 @@ public class StorageManagerImpl implements StorageManager {
volume.setSize(created.getSize());
volume.setPoolType(pool.getPoolType());
volume.setPodId(pod.getId());
volume.setState(Volume.State.Created);
volume.setState(Volume.State.Ready);
_volsDao.persist(volume);
return volume;
}
@ -1575,106 +1574,15 @@ public class StorageManagerImpl implements StorageManager {
return _volsDao.findById(volume.getId());
}
@Override
@DB
public VolumeVO createVolume(long volumeId, HypervisorType hyperType)
{
String volumeName = "";
VolumeVO createdVolume = null;
try
{
// Determine the volume's name
volumeName = getRandomVolumeName();
// Create the Volume object and save it so that we can return it to the user
Account account = _accountDao.findById(accountId);
VolumeVO volume = new VolumeVO(userSpecifiedName, -1, -1, -1, -1, new Long(-1), null, null, 0, Volume.VolumeType.DATADISK);
volume.setPoolId(null);
volume.setDataCenterId(dc.getId());
volume.setPodId(null);
volume.setAccountId(accountId);
volume.setDomainId(account.getDomainId());
volume.setMirrorState(MirrorState.NOT_MIRRORED);
volume.setDiskOfferingId(diskOffering.getId());
volume.setStorageResourceType(Storage.StorageResourceType.STORAGE_POOL);
volume.setInstanceId(null);
volume.setUpdated(new Date());
volume.setStatus(AsyncInstanceCreateStatus.Creating);
volume.setDomainId(account.getDomainId());
volume.setSourceId(diskOffering.getId());
volume.setSourceType(SourceType.DiskOffering);
volume = _volsDao.persist(volume);
AsyncJobExecutor asyncExecutor = BaseAsyncJobExecutor.getCurrentExecutor();
if (asyncExecutor != null) {
AsyncJobVO job = asyncExecutor.getJob();
if (s_logger.isInfoEnabled())
s_logger.info("CreateVolume created a new instance " + volume.getId() + ", update async job-" + job.getId() + " progress status");
_asyncMgr.updateAsyncJobAttachment(job.getId(), "volume", volume.getId());
_asyncMgr.updateAsyncJobStatus(job.getId(), BaseCmd.PROGRESS_INSTANCE_CREATED, volume.getId());
}
List<StoragePoolVO> poolsToAvoid = new ArrayList<StoragePoolVO>();
Set<Long> podsToAvoid = new HashSet<Long>();
Pair<HostPodVO, Long> pod = null;
DataCenterVO dc = _dcDao.findById(volume.getDataCenterId());
DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
long accountId = volume.getAccountId();
while ((pod = _agentMgr.findPod(null, null, dc, volume.getAccountId(), podsToAvoid)) != null) {
if ((createdVolume = createVolume(volume, null, null, dc, pod.first(), null, null, diskOffering, poolsToAvoid, volume.getSize(), hyperType)) != null) {
break;
} else {
podsToAvoid.add(pod.first().getId());
}
}
// Create an event
EventVO event = new EventVO();
event.setAccountId(accountId);
event.setType(EventTypes.EVENT_VOLUME_CREATE);
Transaction txn = Transaction.currentTxn();
txn.start();
if (createdVolume != null) {
// Increment the number of volumes
_accountMgr.incrementResourceCount(accountId, ResourceType.volume);
// Set event parameters
long sizeMB = createdVolume.getSize() / (1024 * 1024);
StoragePoolVO pool = _storagePoolDao.findById(createdVolume.getPoolId());
String eventParams = "id=" + createdVolume.getId() + "\ndoId=" + diskOffering.getId() + "\ntId=" + -1 + "\ndcId=" + dc.getId() + "\nsize=" + sizeMB;
event.setLevel(EventVO.LEVEL_INFO);
event.setDescription("Created volume: " + createdVolume.getName() + " with size: " + sizeMB + " MB in pool: " + pool.getName());
event.setParameters(eventParams);
_eventDao.persist(event);
} else {
// Mark the existing volume record as corrupted
volume.setStatus(AsyncInstanceCreateStatus.Corrupted);
volume.setDestroyed(true);
_volsDao.update(volume.getId(), volume);
}
txn.commit();
} catch (Exception e) {
s_logger.error("Unhandled exception while saving volume " + volumeName, e);
}
return createdVolume;
}
/*Just allocate a volume in the database, don't send the createvolume cmd to hypervisor. The volume will be finally created only when it's attached to a VM.*/
@Override
@DB
public VolumeVO allocVolume(long accountId, long userId, String userSpecifiedName, DataCenterVO dc, DiskOfferingVO diskOffering, long startEventId, long size) {
String volumeName = "";
VolumeVO createdVolume = null;
VolumeVO allocatedVolume = null;
try
{
@ -1701,6 +1609,8 @@ public class StorageManagerImpl implements StorageManager {
volume.setState(Volume.State.Allocated);
volume = _volsDao.persist(volume);
allocatedVolume = volume;
AsyncJobExecutor asyncExecutor = BaseAsyncJobExecutor.getCurrentExecutor();
if (asyncExecutor != null) {
AsyncJobVO job = asyncExecutor.getJob();
@ -1710,13 +1620,41 @@ public class StorageManagerImpl implements StorageManager {
_asyncMgr.updateAsyncJobAttachment(job.getId(), "volume", volume.getId());
_asyncMgr.updateAsyncJobStatus(job.getId(), BaseCmd.PROGRESS_INSTANCE_CREATED, volume.getId());
}
createdVolume = volume;
}
// Create an event
EventVO event = new EventVO();
event.setAccountId(accountId);
event.setType(EventTypes.EVENT_VOLUME_CREATE);
Transaction txn = Transaction.currentTxn();
txn.start();
if (allocatedVolume != null) {
// Increment the number of volumes
_accountMgr.incrementResourceCount(accountId, ResourceType.volume);
// Set event parameters
long sizeMB = allocatedVolume.getSize() / (1024 * 1024);
String eventParams = "id=" + allocatedVolume.getId() + "\ndoId=" + diskOffering.getId() + "\ntId=" + -1 + "\ndcId=" + dc.getId() + "\nsize=" + sizeMB;
event.setLevel(EventVO.LEVEL_INFO);
event.setDescription("Allocated volume: " + allocatedVolume.getName() + " with size: " + sizeMB + " MB.");
event.setParameters(eventParams);
_eventDao.persist(event);
} else {
// Mark the existing volume record as corrupted
volume.setStatus(AsyncInstanceCreateStatus.Corrupted);
volume.setDestroyed(true);
_volsDao.update(volume.getId(), volume);
}
txn.commit();
} catch (Exception e) {
s_logger.error("Unhandled exception while saving volume " + volumeName, e);
}
return createdVolume;
return allocatedVolume;
}
@Override

View File

@ -314,10 +314,21 @@ public class UserVmManagerImpl implements UserVmManager {
if (volume.getState().equals(Volume.State.Allocated)) {
/*Need to create the volume*/
VMTemplateVO rootDiskTmplt = _templateDao.findById(vm.getTemplateId());
DataCenterVO dcVO = _dcDao.findById(vm.getDataCenterId());
HostPodVO pod = _podDao.findById(vm.getPodId());
StoragePoolVO rootDiskPool = _storagePoolDao.findById(rootVolumeOfVm.getPoolId());
ServiceOfferingVO svo = _serviceOfferingDao.findById(vm.getServiceOfferingId());
DiskOfferingVO diskVO = _diskOfferingDao.findById(volume.getDiskOfferingId());
HypervisorType rootDiskHyperType = _volsDao.getHypervisorType(rootVolumeOfVm.getId());
volume = _storageMgr.createVolume(volumeId, _volsDao.getHypervisorType(rootVolumeOfVm.getId()));
volume = _storageMgr.createVolume(volume, vm, rootDiskTmplt, dcVO, pod, rootDiskPool.getClusterId(), svo, diskVO, new ArrayList<StoragePoolVO>(), volume.getSize(), rootDiskHyperType);
if (volume == null) {
throw new InternalErrorException("Failed to create volume when attaching it to VM: " + vm.getName());
}
}
List<VolumeVO> vols = _volsDao.findByInstance(vmId);
if( deviceId != null ) {
if( deviceId.longValue() > 15 || deviceId.longValue() == 0 || deviceId.longValue() == 3) {