mirror of https://github.com/apache/cloudstack.git
Merge pull request #1813 from priyankparihar/CLOUDSTACK-9604
CLOUDSTACK-9604: Root disk resize support for VMware and XenServer.
This commit is contained in:
commit
45f62c3483
|
|
@ -702,7 +702,17 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
"Please re-try when virtual disk is attached to a VM using SCSI controller.");
|
||||
}
|
||||
|
||||
if (vdisk.second() != null && !vdisk.second().toLowerCase().startsWith("scsi"))
|
||||
{
|
||||
s_logger.error("Unsupported disk device bus "+ vdisk.second());
|
||||
throw new Exception("Unsupported disk device bus "+ vdisk.second());
|
||||
}
|
||||
VirtualDisk disk = vdisk.first();
|
||||
if ((VirtualDiskFlatVer2BackingInfo)disk.getBacking() != null && ((VirtualDiskFlatVer2BackingInfo)disk.getBacking()).getParent() != null)
|
||||
{
|
||||
s_logger.error("Resize is not supported because Disk device has Parent "+ ((VirtualDiskFlatVer2BackingInfo)disk.getBacking()).getParent().getUuid());
|
||||
throw new Exception("Resize is not supported because Disk device has Parent "+ ((VirtualDiskFlatVer2BackingInfo)disk.getBacking()).getParent().getUuid());
|
||||
}
|
||||
String vmdkAbsFile = getAbsoluteVmdkFile(disk);
|
||||
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
|
||||
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
|
||||
|
|
@ -1515,7 +1525,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
String vmNameOnVcenter = names.second();
|
||||
String dataDiskController = vmSpec.getDetails().get(VmDetailConstants.DATA_DISK_CONTROLLER);
|
||||
String rootDiskController = vmSpec.getDetails().get(VmDetailConstants.ROOT_DISK_CONTROLLER);
|
||||
|
||||
DiskTO rootDiskTO = null;
|
||||
// If root disk controller is scsi, then data disk controller would also be scsi instead of using 'osdefault'
|
||||
// This helps avoid mix of different scsi subtype controllers in instance.
|
||||
if (DiskControllerType.lsilogic == DiskControllerType.getType(rootDiskController)) {
|
||||
|
|
@ -1888,6 +1898,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
volumeDsDetails.first(),
|
||||
(controllerKey == vmMo.getIDEControllerKey(ideUnitNumber)) ? ((ideUnitNumber++) % VmwareHelper.MAX_IDE_CONTROLLER_COUNT) : scsiUnitNumber++, i + 1);
|
||||
|
||||
if (vol.getType() == Volume.Type.ROOT)
|
||||
rootDiskTO = vol;
|
||||
deviceConfigSpecArray[i].setDevice(device);
|
||||
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
|
||||
|
||||
|
|
@ -2022,6 +2034,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
hyperHost.setRestartPriorityForVM(vmMo, DasVmPriority.HIGH.value());
|
||||
}
|
||||
|
||||
//For resizing root disk.
|
||||
if (rootDiskTO != null && !hasSnapshot) {
|
||||
resizeRootDisk(vmMo, rootDiskTO, hyperHost, context);
|
||||
}
|
||||
|
||||
//
|
||||
// Post Configuration
|
||||
//
|
||||
|
|
@ -2081,6 +2098,43 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
|
||||
private void resizeRootDisk(VirtualMachineMO vmMo, DiskTO rootDiskTO, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception
|
||||
{
|
||||
Pair<VirtualDisk, String> vdisk = getVirtualDiskInfo(vmMo, rootDiskTO.getPath() + ".vmdk");
|
||||
assert(vdisk != null);
|
||||
|
||||
Long reqSize=((VolumeObjectTO)rootDiskTO.getData()).getSize()/1024;
|
||||
VirtualDisk disk = vdisk.first();
|
||||
if (reqSize > disk.getCapacityInKB())
|
||||
{
|
||||
VirtualMachineDiskInfo diskInfo = getMatchingExistingDisk(vmMo.getDiskInfoBuilder(), rootDiskTO, hyperHost, context);
|
||||
assert (diskInfo != null);
|
||||
String[] diskChain = diskInfo.getDiskChain();
|
||||
|
||||
if (diskChain != null && diskChain.length>1)
|
||||
{
|
||||
s_logger.error("Unsupported Disk chain length "+ diskChain.length);
|
||||
throw new Exception("Unsupported Disk chain length "+ diskChain.length);
|
||||
}
|
||||
if (diskInfo.getDiskDeviceBusName() == null || !diskInfo.getDiskDeviceBusName().toLowerCase().startsWith("scsi"))
|
||||
{
|
||||
s_logger.error("Unsupported root disk device bus "+ diskInfo.getDiskDeviceBusName() );
|
||||
throw new Exception("Unsupported root disk device bus "+ diskInfo.getDiskDeviceBusName());
|
||||
}
|
||||
|
||||
disk.setCapacityInKB(reqSize);
|
||||
VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
|
||||
VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec();
|
||||
deviceConfigSpec.setDevice(disk);
|
||||
deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.EDIT);
|
||||
vmConfigSpec.getDeviceChange().add(deviceConfigSpec);
|
||||
if (!vmMo.configureVm(vmConfigSpec)) {
|
||||
throw new Exception("Failed to configure VM for given root disk size. vmName: " + vmMo.getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Sets video card memory to the one provided in detail svga.vramSize (if provided) on {@code vmConfigSpec}.
|
||||
* 64MB was always set before.
|
||||
|
|
|
|||
|
|
@ -970,6 +970,13 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
|
||||
tmpltvdi = getVDIbyUuid(conn, srcData.getPath());
|
||||
vdi = tmpltvdi.createClone(conn, new HashMap<String, String>());
|
||||
Long virtualSize = vdi.getVirtualSize(conn);
|
||||
if (volume.getSize() > virtualSize) {
|
||||
s_logger.debug("Overriding provided template's size with new size " + volume.getSize() + " for volume: " + volume.getName());
|
||||
vdi.resize(conn, volume.getSize());
|
||||
} else {
|
||||
s_logger.debug("Using templates disk size of " + virtualSize + " for volume: " + volume.getName() + " since size passed was " + volume.getSize());
|
||||
}
|
||||
vdi.setNameLabel(conn, volume.getName());
|
||||
|
||||
VDI.Record vdir;
|
||||
|
|
|
|||
5
plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java
Normal file → Executable file
5
plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java
Normal file → Executable file
|
|
@ -48,6 +48,11 @@ public final class CitrixResizeVolumeCommandWrapper extends CommandWrapper<Resiz
|
|||
long newSize = command.getNewSize();
|
||||
|
||||
try {
|
||||
|
||||
if (command.getCurrentSize() >= newSize) {
|
||||
s_logger.info("No need to resize volume: " + volId +", current size " + command.getCurrentSize() + " is same as new size " + newSize);
|
||||
return new ResizeVolumeAnswer(command, true, "success", newSize);
|
||||
}
|
||||
if (command.isManaged()) {
|
||||
resizeSr(conn, command);
|
||||
}
|
||||
|
|
|
|||
1
plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixRequestWrapperTest.java
Normal file → Executable file
1
plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixRequestWrapperTest.java
Normal file → Executable file
|
|
@ -436,7 +436,6 @@ public class CitrixRequestWrapperTest {
|
|||
final Answer answer = wrapper.execute(resizeCommand, citrixResourceBase);
|
||||
verify(citrixResourceBase, times(1)).getConnection();
|
||||
|
||||
assertFalse(answer.getResult());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
|||
|
|
@ -868,8 +868,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId());
|
||||
|
||||
if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer &&
|
||||
hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Any && hypervisorType != HypervisorType.None) {
|
||||
throw new InvalidParameterValueException("CloudStack currently supports volume resize only on KVM, VMware, or XenServer.");
|
||||
hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Any && hypervisorType != HypervisorType.None ) {
|
||||
throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support rootdisksize override");
|
||||
}
|
||||
|
||||
if (volume.getState() != Volume.State.Ready && volume.getState() != Volume.State.Allocated) {
|
||||
|
|
@ -1026,6 +1026,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
UserVmVO userVm = _userVmDao.findById(volume.getInstanceId());
|
||||
|
||||
if (userVm != null) {
|
||||
if (volume.getVolumeType().equals(Volume.Type.ROOT) && userVm.getPowerState()!= VirtualMachine.PowerState.PowerOff && hypervisorType == HypervisorType.VMware){
|
||||
s_logger.error(" For ROOT volume resize VM should be in Power Off state.");
|
||||
throw new InvalidParameterValueException("VM current state is : "+userVm.getPowerState()+ ". But VM should be in "+VirtualMachine.PowerState.PowerOff+" state.");
|
||||
}
|
||||
// serialize VM operation
|
||||
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
|
||||
|
||||
|
|
|
|||
|
|
@ -3511,27 +3511,17 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
}
|
||||
rootDiskSize = Long.parseLong(customParameters.get("rootdisksize"));
|
||||
|
||||
// only KVM supports rootdisksize override
|
||||
if (hypervisorType != HypervisorType.KVM) {
|
||||
throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support rootdisksize override");
|
||||
// only KVM, XenServer and VMware supports rootdisksize override
|
||||
if (!(hypervisorType == HypervisorType.KVM || hypervisorType == HypervisorType.XenServer || hypervisorType == HypervisorType.VMware)) {
|
||||
throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support rootdisksize override");
|
||||
}
|
||||
|
||||
// rotdisksize must be larger than template
|
||||
VMTemplateVO templateVO = _templateDao.findById(template.getId());
|
||||
if (templateVO == null) {
|
||||
throw new InvalidParameterValueException("Unable to look up template by id " + template.getId());
|
||||
}
|
||||
|
||||
if ((rootDiskSize << 30) < templateVO.getSize()) {
|
||||
Long templateVOSizeGB = templateVO.getSize() / 1024 / 1024 / 1024;
|
||||
throw new InvalidParameterValueException("unsupported: rootdisksize override is smaller than template size " + templateVO.getSize()
|
||||
+ "B (" + templateVOSizeGB + "GB)");
|
||||
} else {
|
||||
s_logger.debug("rootdisksize of " + (rootDiskSize << 30) + " was larger than template size of " + templateVO.getSize());
|
||||
}
|
||||
|
||||
s_logger.debug("found root disk size of " + rootDiskSize);
|
||||
customParameters.remove("rootdisksize");
|
||||
validateRootDiskResize(hypervisorType, rootDiskSize, templateVO, vm, customParameters);
|
||||
}
|
||||
|
||||
if (isDisplayVm != null) {
|
||||
|
|
@ -3614,6 +3604,29 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
});
|
||||
}
|
||||
|
||||
public void validateRootDiskResize(final HypervisorType hypervisorType, Long rootDiskSize, VMTemplateVO templateVO, UserVmVO vm, final Map<String, String> customParameters) throws InvalidParameterValueException
|
||||
{
|
||||
// rootdisksize must be larger than template.
|
||||
if ((rootDiskSize << 30) < templateVO.getSize()) {
|
||||
Long templateVOSizeGB = templateVO.getSize() / 1024 / 1024 / 1024;
|
||||
s_logger.error("unsupported: rootdisksize override is smaller than template size " + templateVO.getSize() + "B (" + templateVOSizeGB + "GB)");
|
||||
throw new InvalidParameterValueException("unsupported: rootdisksize override is smaller than template size " + templateVO.getSize() + "B (" + templateVOSizeGB + "GB)");
|
||||
} else if ((rootDiskSize << 30) > templateVO.getSize()) {
|
||||
if (hypervisorType == HypervisorType.VMware && (vm.getDetails() == null || vm.getDetails().get("rootDiskController") == null)) {
|
||||
s_logger.warn("If Root disk controller parameter is not overridden, then Root disk resize may fail because current Root disk controller value is NULL.");
|
||||
} else if (hypervisorType == HypervisorType.VMware && !vm.getDetails().get("rootDiskController").toLowerCase().contains("scsi")) {
|
||||
s_logger.error("Found unsupported root disk controller : " + vm.getDetails().get("rootDiskController"));
|
||||
throw new InvalidParameterValueException("Found unsupported root disk controller :" + vm.getDetails().get("rootDiskController"));
|
||||
} else {
|
||||
s_logger.debug("Rootdisksize override validation successful. Template root disk size "+(templateVO.getSize() / 1024 / 1024 / 1024)+ " GB" + " Root disk size specified "+ rootDiskSize+" GB");
|
||||
}
|
||||
} else {
|
||||
s_logger.debug("Root disk size specified is " + (rootDiskSize << 30) + " and Template root disk size is " + templateVO.getSize()+" . Both are equal so no need to override");
|
||||
customParameters.remove("rootdisksize");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void generateUsageEvent(VirtualMachine vm, boolean isDisplay, String eventType){
|
||||
ServiceOfferingVO serviceOffering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId());
|
||||
|
|
|
|||
|
|
@ -16,7 +16,8 @@
|
|||
// under the License.
|
||||
|
||||
package com.cloud.vm;
|
||||
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Matchers.any;
|
||||
|
|
@ -36,7 +37,9 @@ import static org.mockito.Mockito.when;
|
|||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import com.cloud.network.element.UserDataServiceProvider;
|
||||
|
|
@ -44,6 +47,7 @@ import com.cloud.storage.Storage;
|
|||
import com.cloud.user.User;
|
||||
import com.cloud.event.dao.UsageEventDao;
|
||||
import com.cloud.uservm.UserVm;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mock;
|
||||
|
|
@ -236,6 +240,7 @@ public class UserVmManagerTest {
|
|||
_userVmMgr._entityMgr = _entityMgr;
|
||||
_userVmMgr._storagePoolDao = _storagePoolDao;
|
||||
_userVmMgr._vmSnapshotDao = _vmSnapshotDao;
|
||||
_userVmMgr._configDao = _configDao;
|
||||
_userVmMgr._nicDao = _nicDao;
|
||||
_userVmMgr._networkModel = _networkModel;
|
||||
_userVmMgr._networkDao = _networkDao;
|
||||
|
|
@ -260,6 +265,56 @@ public class UserVmManagerTest {
|
|||
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testValidateRootDiskResize()
|
||||
{
|
||||
HypervisorType hypervisorType = HypervisorType.Any;
|
||||
Long rootDiskSize = Long.valueOf(10);
|
||||
UserVmVO vm = Mockito.mock(UserVmVO.class);
|
||||
VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class);
|
||||
Map<String, String> customParameters = new HashMap<String, String>();
|
||||
Map<String, String> vmDetals = new HashMap<String, String>();
|
||||
|
||||
|
||||
vmDetals.put("rootDiskController","ide");
|
||||
when(vm.getDetails()).thenReturn(vmDetals);
|
||||
when(templateVO.getSize()).thenReturn((rootDiskSize<<30)+1);
|
||||
//Case 1: >
|
||||
try{
|
||||
_userVmMgr.validateRootDiskResize(hypervisorType, rootDiskSize, templateVO, vm, customParameters);
|
||||
Assert.fail("Function should throw InvalidParameterValueException");
|
||||
}catch(Exception e){
|
||||
assertThat(e, instanceOf(InvalidParameterValueException.class));
|
||||
}
|
||||
|
||||
//Case 2: =
|
||||
when(templateVO.getSize()).thenReturn((rootDiskSize<<30));
|
||||
customParameters.put("rootdisksize","10");
|
||||
_userVmMgr.validateRootDiskResize(hypervisorType, rootDiskSize, templateVO, vm, customParameters);
|
||||
assert(!customParameters.containsKey("rootdisksize"));
|
||||
|
||||
when(templateVO.getSize()).thenReturn((rootDiskSize<<30)-1);
|
||||
|
||||
//Case 3: <
|
||||
|
||||
//Case 3.1: HypervisorType!=VMware
|
||||
_userVmMgr.validateRootDiskResize(hypervisorType, rootDiskSize, templateVO, vm, customParameters);
|
||||
|
||||
hypervisorType = HypervisorType.VMware;
|
||||
//Case 3.2: 0->(rootDiskController!=scsi)
|
||||
try {
|
||||
_userVmMgr.validateRootDiskResize(hypervisorType, rootDiskSize, templateVO, vm, customParameters);
|
||||
Assert.fail("Function should throw InvalidParameterValueException");
|
||||
}catch(Exception e) {
|
||||
assertThat(e, instanceOf(InvalidParameterValueException.class));
|
||||
}
|
||||
|
||||
//Case 3.3: 1->(rootDiskController==scsi)
|
||||
vmDetals.put("rootDiskController","scsi");
|
||||
_userVmMgr.validateRootDiskResize(hypervisorType, rootDiskSize, templateVO, vm, customParameters);
|
||||
}
|
||||
|
||||
// Test restoreVm when VM state not in running/stopped case
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
public void testRestoreVMF1() throws ResourceAllocationException, InsufficientCapacityException, ResourceUnavailableException {
|
||||
|
|
|
|||
|
|
@ -89,7 +89,8 @@ class TestResizeVolume(cloudstackTestCase):
|
|||
|
||||
try:
|
||||
cls.hypervisor = str(get_hypervisor_type(cls.api_client)).lower()
|
||||
|
||||
if cls.hypervisor.lower() in ['hyperv']:
|
||||
raise unittest.SkipTest("Volume resize is not supported on %s" % cls.hypervisor)
|
||||
# Creating service offering with normal config
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -19,91 +19,188 @@
|
|||
|
||||
#All tests inherit from cloudstackTestCase
|
||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||
|
||||
#Import Integration Libraries
|
||||
|
||||
#base - contains all resources as entities and defines create, delete, list operations on them
|
||||
from marvin.lib.base import Account, VirtualMachine, ServiceOffering
|
||||
|
||||
from marvin.lib.base import Account, VirtualMachine, ServiceOffering,\
|
||||
Configurations,StoragePool,Template
|
||||
#utils - utility classes for common cleanup, external library wrappers etc
|
||||
from marvin.lib.utils import cleanup_resources
|
||||
|
||||
#common - commonly used methods for all tests are listed here
|
||||
from marvin.lib.common import get_zone, get_domain, get_template, list_volumes
|
||||
|
||||
from marvin.codes import FAILED
|
||||
|
||||
from marvin.lib.utils import cleanup_resources,validateList
|
||||
from marvin.lib.common import get_zone, get_domain, get_template,\
|
||||
list_volumes,list_storage_pools,list_configurations
|
||||
from marvin.codes import FAILED,INVALID_INPUT
|
||||
from nose.plugins.attrib import attr
|
||||
|
||||
from marvin.sshClient import SshClient
|
||||
import time
|
||||
import re
|
||||
from marvin.cloudstackAPI import updateTemplate,registerTemplate
|
||||
|
||||
class TestData(object):
|
||||
"""Test data object that is required to create resources
|
||||
"""
|
||||
def __init__(self):
|
||||
self.testdata = {
|
||||
#data to create an account
|
||||
"account": {
|
||||
"email": "test@test.com",
|
||||
"firstname": "Test",
|
||||
"lastname": "User",
|
||||
"username": "test",
|
||||
"password": "password",
|
||||
},
|
||||
#data reqd for virtual machine creation
|
||||
"virtual_machine" : {
|
||||
"name" : "testvm",
|
||||
"displayname" : "Test VM",
|
||||
},
|
||||
#small service offering
|
||||
"service_offering": {
|
||||
"small": {
|
||||
"name": "Small Instance",
|
||||
"displaytext": "Small Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100,
|
||||
"memory": 256,
|
||||
},
|
||||
},
|
||||
"ostype": 'CentOS 5.3 (64-bit)',
|
||||
}
|
||||
|
||||
class TestDeployVM(cloudstackTestCase):
|
||||
class TestDeployVmRootSize(cloudstackTestCase):
|
||||
"""Test deploy a VM into a user account
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
self.testdata = TestData().testdata
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.hypervisor = self.testClient.getHypervisorInfo()
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.cloudstacktestclient = super(TestDeployVmRootSize,
|
||||
cls).getClsTestClient()
|
||||
cls.api_client = cls.cloudstacktestclient.getApiClient()
|
||||
cls.hypervisor = cls.cloudstacktestclient.getHypervisorInfo().lower()
|
||||
cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
|
||||
|
||||
# Get Zone, Domain and Default Built-in template
|
||||
self.domain = get_domain(self.apiclient)
|
||||
self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
|
||||
self.testdata["mode"] = self.zone.networktype
|
||||
self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"])
|
||||
if self.template == FAILED:
|
||||
cls.domain = get_domain(cls.api_client)
|
||||
cls.zone = get_zone(cls.api_client,
|
||||
cls.cloudstacktestclient.getZoneForTests())
|
||||
cls.services = cls.testClient.getParsedTestDataConfig()
|
||||
cls.services["mode"] = cls.zone.networktype
|
||||
cls._cleanup = []
|
||||
cls.updateclone = False
|
||||
cls.restartreq = False
|
||||
cls.defaultdiskcontroller = "ide"
|
||||
cls.template = get_template(cls.api_client, cls.zone.id)
|
||||
if cls.template == FAILED:
|
||||
assert False, "get_template() failed to return template "
|
||||
# for testing with specific template
|
||||
# self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"], templatetype='USER', services = {"template":'31f52a4d-5681-43f7-8651-ad4aaf823618'})
|
||||
|
||||
|
||||
#create a user account
|
||||
self.account = Account.create(
|
||||
self.apiclient,
|
||||
self.testdata["account"],
|
||||
domainid=self.domain.id
|
||||
cls.account = Account.create(
|
||||
cls.api_client,
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id,admin=True
|
||||
)
|
||||
cls._cleanup.append(cls.account)
|
||||
list_pool_resp = list_storage_pools(cls.api_client,
|
||||
account=cls.account.name,
|
||||
domainid=cls.domain.id)
|
||||
#Identify the storage pool type and set vmware fullclone to
|
||||
# true if storage is VMFS
|
||||
if cls.hypervisor == 'vmware':
|
||||
# please make sure url of templateregister dictionary in
|
||||
# test_data.config pointing to .ova file
|
||||
|
||||
list_config_storage_response = list_configurations(
|
||||
cls.api_client
|
||||
, name=
|
||||
"vmware.root.disk.controller")
|
||||
cls.defaultdiskcontroller = list_config_storage_response[0].value
|
||||
if list_config_storage_response[0].value == "ide" or \
|
||||
list_config_storage_response[0].value == \
|
||||
"osdefault":
|
||||
Configurations.update(cls.api_client,
|
||||
"vmware.root.disk.controller",
|
||||
value="scsi")
|
||||
|
||||
cls.updateclone = True
|
||||
cls.restartreq = True
|
||||
|
||||
list_config_fullclone_global_response = list_configurations(
|
||||
cls.api_client
|
||||
, name=
|
||||
"vmware.create.full.clone")
|
||||
if list_config_fullclone_global_response[0].value=="false":
|
||||
Configurations.update(cls.api_client,
|
||||
"vmware.create.full.clone",
|
||||
value="true")
|
||||
|
||||
cls.updateclone = True
|
||||
cls.restartreq = True
|
||||
|
||||
cls.tempobj = Template.register(cls.api_client,
|
||||
cls.services["templateregister"],
|
||||
hypervisor=cls.hypervisor,
|
||||
zoneid=cls.zone.id,
|
||||
account=cls.account.name,
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
cls.tempobj.download(cls.api_client)
|
||||
|
||||
for strpool in list_pool_resp:
|
||||
if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem":
|
||||
list_config_storage_response = list_configurations(
|
||||
cls.api_client
|
||||
, name=
|
||||
"vmware.create.full.clone",storageid=strpool.id)
|
||||
res = validateList(list_config_storage_response)
|
||||
if res[2]== INVALID_INPUT:
|
||||
raise Exception("Failed to list configurations ")
|
||||
|
||||
if list_config_storage_response[0].value == "false":
|
||||
Configurations.update(cls.api_client,
|
||||
"vmware.create.full.clone",
|
||||
value="true",
|
||||
storageid=strpool.id)
|
||||
cls.updateclone = True
|
||||
StoragePool.update(cls.api_client,id=strpool.id,
|
||||
tags="scsi")
|
||||
cls.storageID = strpool.id
|
||||
break
|
||||
if cls.restartreq:
|
||||
cls.restartServer()
|
||||
#create a service offering
|
||||
self.service_offering = ServiceOffering.create(
|
||||
self.apiclient,
|
||||
self.testdata["service_offering"]["small"]
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.api_client,
|
||||
cls.services["service_offering"]
|
||||
)
|
||||
#build cleanup list
|
||||
self.cleanup = [
|
||||
self.service_offering,
|
||||
self.account
|
||||
]
|
||||
cls.services_offering_vmware=ServiceOffering.create(
|
||||
cls.api_client,cls.services["service_offering"],tags="scsi")
|
||||
cls._cleanup.extend([cls.service_offering,cls.services_offering_vmware])
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
# Cleanup resources used
|
||||
|
||||
if cls.updateclone:
|
||||
Configurations.update(cls.api_client,
|
||||
"vmware.create.full.clone",
|
||||
value="false",storageid=cls.storageID)
|
||||
Configurations.update(cls.api_client,
|
||||
"vmware.create.full.clone",
|
||||
value="false")
|
||||
Configurations.update(cls.api_client,
|
||||
"vmware.root.disk.controller",
|
||||
value=cls.defaultdiskcontroller)
|
||||
|
||||
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.apiclient = self.cloudstacktestclient.getApiClient()
|
||||
self.cleanup = []
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
# Clean up, terminate the created instance, volumes and snapshots
|
||||
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
pass
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def restartServer(cls):
|
||||
"""Restart management server"""
|
||||
|
||||
sshClient = SshClient(
|
||||
cls.mgtSvrDetails["mgtSvrIp"],
|
||||
22,
|
||||
cls.mgtSvrDetails["user"],
|
||||
cls.mgtSvrDetails["passwd"]
|
||||
)
|
||||
command = "service cloudstack-management stop"
|
||||
sshClient.execute(command)
|
||||
|
||||
command = "service cloudstack-management start"
|
||||
sshClient.execute(command)
|
||||
|
||||
#time.sleep(cls.services["sleep"])
|
||||
time.sleep(300)
|
||||
|
||||
return
|
||||
|
||||
@attr(tags = ['advanced', 'basic', 'sg'], required_hardware="true")
|
||||
def test_00_deploy_vm_root_resize(self):
|
||||
|
|
@ -114,36 +211,47 @@ class TestDeployVM(cloudstackTestCase):
|
|||
# 2. root disk has new size per listVolumes
|
||||
# 3. Rejects non-supported hypervisor types
|
||||
"""
|
||||
if(self.hypervisor.lower() == 'kvm'):
|
||||
newrootsize = (self.template.size >> 30) + 2
|
||||
self.virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.testdata["virtual_machine"],
|
||||
accountid=self.account.name,
|
||||
zoneid=self.zone.id,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
templateid=self.template.id,
|
||||
rootdisksize=newrootsize
|
||||
|
||||
|
||||
newrootsize = (self.template.size >> 30) + 2
|
||||
if(self.hypervisor.lower() == 'kvm' or self.hypervisor.lower() ==
|
||||
'xenserver'or self.hypervisor.lower() == 'vmware' ):
|
||||
|
||||
if self.hypervisor=="vmware":
|
||||
self.virtual_machine = VirtualMachine.create(
|
||||
self.apiclient, self.services["virtual_machine"],
|
||||
zoneid=self.zone.id,
|
||||
accountid=self.account.name,
|
||||
domainid=self.domain.id,
|
||||
serviceofferingid=self.services_offering_vmware.id,
|
||||
templateid=self.tempobj.id,
|
||||
rootdisksize=newrootsize
|
||||
)
|
||||
|
||||
else:
|
||||
self.virtual_machine = VirtualMachine.create(
|
||||
self.apiclient, self.services["virtual_machine"],
|
||||
zoneid=self.zone.id,
|
||||
accountid=self.account.name,
|
||||
domainid=self.domain.id,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
templateid=self.template.id,
|
||||
rootdisksize=newrootsize
|
||||
)
|
||||
|
||||
list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
|
||||
|
||||
|
||||
|
||||
list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
|
||||
self.debug(
|
||||
"Verify listVirtualMachines response for virtual machine: %s"\
|
||||
"Verify listVirtualMachines response for virtual machine: %s" \
|
||||
% self.virtual_machine.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_vms, list),
|
||||
True,
|
||||
"List VM response was not a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_vms),
|
||||
0,
|
||||
"List VM response was empty"
|
||||
)
|
||||
res=validateList(list_vms)
|
||||
self.assertNotEqual(res[2],INVALID_INPUT," Invalid list VM "
|
||||
"response")
|
||||
self.cleanup.append(self.virtual_machine)
|
||||
|
||||
vm = list_vms[0]
|
||||
self.assertEqual(
|
||||
|
|
@ -164,25 +272,27 @@ class TestDeployVM(cloudstackTestCase):
|
|||
|
||||
# get root vol from created vm, verify it is correct size
|
||||
list_volume_response = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
)
|
||||
|
||||
self.apiclient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
)
|
||||
res=validateList(list_volume_response)
|
||||
self.assertNotEqual(res[2],INVALID_INPUT," Invalid list VM "
|
||||
"response")
|
||||
rootvolume = list_volume_response[0]
|
||||
success = False
|
||||
if rootvolume is not None and rootvolume.size == (newrootsize << 30):
|
||||
success = True
|
||||
|
||||
self.assertEqual(
|
||||
success,
|
||||
True,
|
||||
"Check if the root volume resized appropriately"
|
||||
)
|
||||
success,
|
||||
True,
|
||||
"Check if the root volume resized appropriately"
|
||||
)
|
||||
|
||||
else:
|
||||
self.debug("hypervisor %s unsupported for test 00, verifying it errors properly" % self.hypervisor)
|
||||
|
||||
newrootsize = (self.template.size >> 30) + 2
|
||||
success = False
|
||||
try:
|
||||
|
|
@ -193,7 +303,7 @@ class TestDeployVM(cloudstackTestCase):
|
|||
zoneid=self.zone.id,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
templateid=self.template.id,
|
||||
templateid=self.tempobj.id,
|
||||
rootdisksize=newrootsize
|
||||
)
|
||||
except Exception as ex:
|
||||
|
|
@ -208,63 +318,91 @@ class TestDeployVM(cloudstackTestCase):
|
|||
def test_01_deploy_vm_root_resize(self):
|
||||
"""Test proper failure to deploy virtual machine with rootdisksize of 0
|
||||
"""
|
||||
if (self.hypervisor.lower() == 'kvm'):
|
||||
newrootsize = 0
|
||||
success = False
|
||||
newrootsize=0
|
||||
success=False
|
||||
|
||||
|
||||
if(self.hypervisor.lower() == 'kvm' or self.hypervisor.lower() ==
|
||||
'xenserver'or self.hypervisor.lower() == 'vmware' ):
|
||||
try:
|
||||
self.virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.testdata["virtual_machine"],
|
||||
accountid=self.account.name,
|
||||
zoneid=self.zone.id,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
templateid=self.template.id,
|
||||
rootdisksize=newrootsize
|
||||
if self.hypervisor=="vmware":
|
||||
self.virtual_machine = VirtualMachine.create(
|
||||
self.apiclient, self.services["virtual_machine"],
|
||||
zoneid=self.zone.id,
|
||||
accountid=self.account.name,
|
||||
domainid=self.domain.id,
|
||||
serviceofferingid=self.services_offering_vmware.id,
|
||||
templateid=self.tempobj.id,
|
||||
rootdisksize=newrootsize
|
||||
)
|
||||
|
||||
else:
|
||||
self.virtual_machine = VirtualMachine.create(
|
||||
self.apiclient, self.services["virtual_machine"],
|
||||
zoneid=self.zone.id,
|
||||
accountid=self.account.name,
|
||||
domainid=self.domain.id,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
templateid=self.template.id,
|
||||
rootdisksize=newrootsize
|
||||
)
|
||||
except Exception as ex:
|
||||
if "rootdisk size should be a non zero number" in str(ex):
|
||||
success = True
|
||||
else:
|
||||
self.debug("virtual machine create did not fail appropriately. Error was actually : " + str(ex));
|
||||
|
||||
self.assertEqual(success, True, "Check if passing 0 as rootdisksize fails appropriately")
|
||||
else:
|
||||
self.debug("test 01 does not support hypervisor type " + self.hypervisor);
|
||||
self.debug("test 01 does not support hypervisor type " + self.hypervisor)
|
||||
|
||||
|
||||
|
||||
@attr(tags = ['advanced', 'basic', 'sg'], required_hardware="true", BugId="CLOUDSTACK-6984")
|
||||
def test_02_deploy_vm_root_resize(self):
|
||||
"""Test proper failure to deploy virtual machine with rootdisksize less than template size
|
||||
"""
|
||||
if (self.hypervisor.lower() == 'kvm'):
|
||||
newrootsize = (self.template.size >> 30) - 1
|
||||
newrootsize = (self.template.size >> 30) - 1
|
||||
success=False
|
||||
self.assertEqual(newrootsize > 0, True, "Provided template is less than 1G in size, cannot run test")
|
||||
|
||||
self.assertEqual(newrootsize > 0, True, "Provided template is less than 1G in size, cannot run test")
|
||||
|
||||
success = False
|
||||
if(self.hypervisor.lower() == 'kvm' or self.hypervisor.lower() ==
|
||||
'xenserver'or self.hypervisor.lower() == 'vmware' ):
|
||||
try:
|
||||
self.virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.testdata["virtual_machine"],
|
||||
accountid=self.account.name,
|
||||
zoneid=self.zone.id,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
templateid=self.template.id,
|
||||
rootdisksize=newrootsize
|
||||
if self.hypervisor=="vmware":
|
||||
self.virtual_machine = VirtualMachine.create(
|
||||
self.apiclient, self.services["virtual_machine"],
|
||||
zoneid=self.zone.id,
|
||||
accountid=self.account.name,
|
||||
domainid=self.domain.id,
|
||||
serviceofferingid=self.services_offering_vmware.id,
|
||||
templateid=self.tempobj.id,
|
||||
rootdisksize=newrootsize
|
||||
)
|
||||
|
||||
else:
|
||||
self.virtual_machine = VirtualMachine.create(
|
||||
self.apiclient, self.services["virtual_machine"],
|
||||
zoneid=self.zone.id,
|
||||
accountid=self.account.name,
|
||||
domainid=self.domain.id,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
templateid=self.template.id,
|
||||
rootdisksize=newrootsize
|
||||
)
|
||||
except Exception as ex:
|
||||
if "rootdisksize override is smaller than template size" in str(ex):
|
||||
success = True
|
||||
else:
|
||||
self.debug("virtual machine create did not fail appropriately. Error was actually : " + str(ex));
|
||||
if "rootdisksize override is smaller than template size" in str(ex):
|
||||
success = True
|
||||
else:
|
||||
self.debug("virtual machine create did not fail appropriately. Error was actually : " + str(ex));
|
||||
|
||||
self.assertEqual(success, True, "Check if passing rootdisksize < templatesize fails appropriately")
|
||||
else:
|
||||
self.debug("test 01 does not support hypervisor type " + self.hypervisor);
|
||||
self.debug("test 02 does not support hypervisor type " +
|
||||
self.hypervisor)
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
except Exception as e:
|
||||
self.debug("Warning! Exception in tearDown: %s" % e)
|
||||
|
||||
|
|
|
|||
|
|
@ -3888,7 +3888,7 @@ class Configurations:
|
|||
"""Manage Configuration"""
|
||||
|
||||
@classmethod
|
||||
def update(cls, apiclient, name, value=None, zoneid=None):
|
||||
def update(cls, apiclient, name, value=None, zoneid=None, clusterid=None, storageid=None):
|
||||
"""Updates the specified configuration"""
|
||||
|
||||
cmd = updateConfiguration.updateConfigurationCmd()
|
||||
|
|
@ -3897,8 +3897,13 @@ class Configurations:
|
|||
|
||||
if zoneid:
|
||||
cmd.zoneid = zoneid
|
||||
if clusterid:
|
||||
cmd.clusterid = clusterid
|
||||
if storageid:
|
||||
cmd.storageid=storageid
|
||||
apiclient.updateConfiguration(cmd)
|
||||
|
||||
|
||||
@classmethod
|
||||
def list(cls, apiclient, **kwargs):
|
||||
"""Lists configurations"""
|
||||
|
|
|
|||
|
|
@ -1548,17 +1548,20 @@
|
|||
createForm: {
|
||||
title: 'label.action.resize.volume',
|
||||
preFilter: function(args) {
|
||||
if (args.context.volumes != null && args.context.volumes[0].type == 'ROOT') {
|
||||
var vol;
|
||||
if (args.context.volumes != null) vol = args.context.volumes[0];
|
||||
if (vol.type == "ROOT" && (vol.hypervisor == "XenServer" || vol.hypervisor == "KVM" || vol.hypervisor == "VMware")) {
|
||||
args.$form.find('.form-item[rel=newdiskoffering]').hide();
|
||||
|
||||
selectedDiskOfferingObj = null;
|
||||
args.$form.find('.form-item[rel=newsize]').css('display', 'inline-block');
|
||||
} else {
|
||||
args.$form.find('.form-item[rel=newdiskoffering]').css('display', 'inline-block');
|
||||
args.$form.find('.form-item[rel=newsize]').hide();
|
||||
}
|
||||
},
|
||||
fields: {
|
||||
newdiskoffering: {
|
||||
label: 'label.resize.new.offering.id',
|
||||
isHidden: true,
|
||||
select: function(args) {
|
||||
if (args.context.volumes != null && args.context.volumes[0].type == 'ROOT') {
|
||||
args.response.success({
|
||||
|
|
@ -1586,6 +1589,11 @@
|
|||
});
|
||||
|
||||
args.$select.change(function() {
|
||||
if(args.context.volumes[0].type == "ROOT") {
|
||||
selectedDiskOfferingObj = null;
|
||||
return;
|
||||
}
|
||||
|
||||
var diskOfferingId = $(this).val();
|
||||
$(diskofferingObjs).each(function() {
|
||||
if (this.id == diskOfferingId) {
|
||||
|
|
@ -1636,7 +1644,8 @@
|
|||
shrinkok: {
|
||||
label: 'label.resize.shrink.ok',
|
||||
isBoolean: true,
|
||||
isChecked: false
|
||||
isChecked: false,
|
||||
isHidden: true
|
||||
},
|
||||
minIops: {
|
||||
label: 'label.disk.iops.min',
|
||||
|
|
@ -1658,38 +1667,47 @@
|
|||
},
|
||||
action: function(args) {
|
||||
var array1 = [];
|
||||
|
||||
if(args.$form.find('.form-item[rel=shrinkok]').css("display") != "none") {
|
||||
array1.push("&shrinkok=" + (args.data.shrinkok == "on"));
|
||||
}
|
||||
|
||||
var newDiskOffering = args.data.newdiskoffering;
|
||||
var newSize;
|
||||
if (selectedDiskOfferingObj == null || selectedDiskOfferingObj.iscustomized == true) {
|
||||
newSize = args.data.newsize;
|
||||
}
|
||||
if (newDiskOffering != null && newDiskOffering.length > 0) {
|
||||
array1.push("&diskofferingid=" + todb(newDiskOffering));
|
||||
}
|
||||
if (newSize != null && newSize.length > 0) {
|
||||
array1.push("&size=" + todb(newSize));
|
||||
if (newSize != null && newSize.length > 0) {
|
||||
array1.push("&size=" + todb(newSize));
|
||||
}
|
||||
} else {
|
||||
|
||||
if(args.$form.find('.form-item[rel=shrinkok]').css("display") != "none") {
|
||||
array1.push("&shrinkok=" + (args.data.shrinkok == "on"));
|
||||
}
|
||||
|
||||
var newDiskOffering = args.data.newdiskoffering;
|
||||
|
||||
if (selectedDiskOfferingObj.iscustomized == true) {
|
||||
newSize = args.data.newsize;
|
||||
}
|
||||
if (newDiskOffering != null && newDiskOffering.length > 0) {
|
||||
array1.push("&diskofferingid=" + todb(newDiskOffering));
|
||||
}
|
||||
if (newSize != null && newSize.length > 0) {
|
||||
array1.push("&size=" + todb(newSize));
|
||||
}
|
||||
|
||||
var minIops;
|
||||
var maxIops
|
||||
|
||||
if (selectedDiskOfferingObj.iscustomizediops == true) {
|
||||
minIops = args.data.minIops;
|
||||
maxIops = args.data.maxIops;
|
||||
}
|
||||
|
||||
if (minIops != null && minIops.length > 0) {
|
||||
array1.push("&miniops=" + todb(minIops));
|
||||
}
|
||||
|
||||
if (maxIops != null && maxIops.length > 0) {
|
||||
array1.push("&maxiops=" + todb(maxIops));
|
||||
}
|
||||
}
|
||||
|
||||
var minIops;
|
||||
var maxIops;
|
||||
|
||||
if (selectedDiskOfferingObj != null && selectedDiskOfferingObj.iscustomizediops == true) {
|
||||
minIops = args.data.minIops;
|
||||
maxIops = args.data.maxIops;
|
||||
}
|
||||
|
||||
if (minIops != null && minIops.length > 0) {
|
||||
array1.push("&miniops=" + todb(minIops));
|
||||
}
|
||||
|
||||
if (maxIops != null && maxIops.length > 0) {
|
||||
array1.push("&maxiops=" + todb(maxIops));
|
||||
}
|
||||
|
||||
$.ajax({
|
||||
url: createURL("resizeVolume&id=" + args.context.volumes[0].id + array1.join("")),
|
||||
|
|
@ -2708,7 +2726,7 @@
|
|||
}
|
||||
}
|
||||
|
||||
if (jsonObj.state == "Ready" || jsonObj.state == "Allocated") {
|
||||
if ((jsonObj.type == "DATADISK" || jsonObj.type == "ROOT") && (jsonObj.state == "Ready" || jsonObj.state == "Allocated")) {
|
||||
allowedActions.push("resize");
|
||||
}
|
||||
|
||||
|
|
@ -2718,6 +2736,8 @@
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (jsonObj.type == "ROOT" || jsonObj.type == "DATADISK") {
|
||||
if (jsonObj.state == "Ready" && isAdmin() && jsonObj.virtualmachineid != null) {
|
||||
allowedActions.push("migrateVolume");
|
||||
|
|
|
|||
|
|
@ -435,22 +435,22 @@
|
|||
var $target = $(this);
|
||||
var val = $target.val();
|
||||
var item = null;
|
||||
if (item == null) {
|
||||
if (item == null && args.data.templates.featuredtemplates != undefined) {
|
||||
item = $.grep(args.data.templates.featuredtemplates, function(elem) {
|
||||
return elem.id == val;
|
||||
})[0];
|
||||
}
|
||||
if (item == null) {
|
||||
if (item == null && args.data.templates.communitytemplates != undefined) {
|
||||
item = $.grep(args.data.templates.communitytemplates, function(elem) {
|
||||
return elem.id == val;
|
||||
})[0];
|
||||
}
|
||||
if (item == null) {
|
||||
if (item == null && args.data.templates.mytemplates!=undefined) {
|
||||
item = $.grep(args.data.templates.mytemplates, function(elem) {
|
||||
return elem.id == val;
|
||||
})[0];
|
||||
}
|
||||
if (item == null) {
|
||||
if (item == null && args.data.templates.sharedtemplates!=undefined) {
|
||||
item = $.grep(args.data.templates.sharedtemplates, function(elem) {
|
||||
return elem.id == val;
|
||||
})[0];
|
||||
|
|
@ -459,7 +459,7 @@
|
|||
if (!item) return true;
|
||||
|
||||
var hypervisor = item['hypervisor'];
|
||||
if (hypervisor == 'KVM') {
|
||||
if (hypervisor == 'KVM' || hypervisor == 'XenServer' || hypervisor == 'VMware') {
|
||||
$step.find('.section.custom-size').show();
|
||||
$step.addClass('custom-disk-size');
|
||||
} else {
|
||||
|
|
|
|||
Loading…
Reference in New Issue