mirror of https://github.com/apache/cloudstack.git
Merge branch 'main' of https://github.com/apache/cloudstack into nsx-integration
This commit is contained in:
commit
b37a9f7ee8
|
|
@ -56,6 +56,10 @@ public interface IpAddressManager {
|
|||
"Set placement of vrouter ips in redundant mode in vpc tiers, this can be 3 value: `first` to use first ips in tiers, `last` to use last ips in tiers and `random` to take random ips in tiers.",
|
||||
true, ConfigKey.Scope.Account, null, null, null, null, null, ConfigKey.Kind.Select, "first,last,random");
|
||||
|
||||
ConfigKey<Boolean> AllowUserListAvailableIpsOnSharedNetwork = new ConfigKey<Boolean>("Advanced", Boolean.class, "allow.user.list.available.ips.on.shared.network", "false",
|
||||
"Determines whether users can list available IPs on shared networks",
|
||||
true, ConfigKey.Scope.Global);
|
||||
|
||||
/**
|
||||
* Assigns a new public ip address.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1906,8 +1906,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host));
|
||||
}
|
||||
} else {
|
||||
// This might impact other managed storages, grant access for PowerFlex storage pool only
|
||||
if (pool.getPoolType() == Storage.StoragePoolType.PowerFlex) {
|
||||
// This might impact other managed storages, grant access for PowerFlex and Iscsi/Solidfire storage pool only
|
||||
if (pool.getPoolType() == Storage.StoragePoolType.PowerFlex || pool.getPoolType() == Storage.StoragePoolType.Iscsi) {
|
||||
try {
|
||||
volService.grantAccess(volFactory.getVolume(vol.getId()), host, (DataStore)pool);
|
||||
} catch (Exception e) {
|
||||
|
|
|
|||
|
|
@ -236,6 +236,8 @@ public class LibvirtReplugNicCommandWrapperTest {
|
|||
bridgeVifDriver.configure(params);
|
||||
ovsVifDriver.configure(params);
|
||||
}
|
||||
|
||||
LibvirtVMDef.setGlobalLibvirtVersion(6400000L);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -246,6 +248,10 @@ public class LibvirtReplugNicCommandWrapperTest {
|
|||
+ "<target dev='vnet10'/>\n"
|
||||
+ "<mac address='02:00:7c:98:00:02'/>\n"
|
||||
+ "<model type='virtio'/>\n"
|
||||
+ "<bandwidth>\n"
|
||||
+ "<inbound average='25600' peak='25600'/>\n"
|
||||
+ "<outbound average='25600' peak='25600'/>\n"
|
||||
+ "</bandwidth>\n"
|
||||
+ "<link state='up'/>\n"
|
||||
+ "</interface>\n";
|
||||
final String expectedAttachXml =
|
||||
|
|
|
|||
|
|
@ -1107,12 +1107,25 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
return "snapshots/" + accountId + "/" + volumeId;
|
||||
}
|
||||
|
||||
protected boolean isManagedStorageDatastorePath(final String datastorePath) {
|
||||
// ex. [-iqn.2010-01.com.solidfire:3p53.data-9999.97-0] i-2-9999-VM
|
||||
return datastorePath != null && datastorePath.startsWith("[-iqn.");
|
||||
}
|
||||
|
||||
protected String getManagedDatastoreName(final String datastorePath) {
|
||||
// ex. [-iqn.2010-01.com.solidfire:3p53.data-9999.97-0]
|
||||
return datastorePath == null ? datastorePath : datastorePath.split(" ")[0];
|
||||
}
|
||||
|
||||
private long getVMSnapshotChainSize(VmwareContext context, VmwareHypervisorHost hyperHost, String fileName, ManagedObjectReference morDs,
|
||||
String exceptFileName, String vmName) throws Exception {
|
||||
long size = 0;
|
||||
DatastoreMO dsMo = new DatastoreMO(context, morDs);
|
||||
HostDatastoreBrowserMO browserMo = dsMo.getHostDatastoreBrowserMO();
|
||||
String datastorePath = (new DatastoreFile(dsMo.getName(), vmName)).getPath();
|
||||
if (isManagedStorageDatastorePath(datastorePath)) {
|
||||
datastorePath = getManagedDatastoreName(datastorePath);
|
||||
}
|
||||
HostDatastoreBrowserSearchSpec searchSpec = new HostDatastoreBrowserSearchSpec();
|
||||
FileQueryFlags fqf = new FileQueryFlags();
|
||||
fqf.setFileSize(true);
|
||||
|
|
@ -1241,11 +1254,9 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
String vmdkName = null;
|
||||
|
||||
// if this is managed storage
|
||||
if (fullPath.startsWith("[-iqn.")) { // ex. [-iqn.2010-01.com.company:3y8w.vol-10.64-0] -iqn.2010-01.com.company:3y8w.vol-10.64-0-000001.vmdk
|
||||
baseName = fullPath.split(" ")[0]; // ex. [-iqn.2010-01.com.company:3y8w.vol-10.64-0]
|
||||
|
||||
// remove '[' and ']'
|
||||
baseName = baseName.substring(1, baseName.length() - 1);
|
||||
if (isManagedStorageDatastorePath(fullPath)) {
|
||||
baseName = getManagedDatastoreName(fullPath);
|
||||
baseName = baseName.substring(1, baseName.length() - 1); // remove '[' and ']'
|
||||
|
||||
vmdkName = fullPath; // for managed storage, vmdkName == fullPath
|
||||
} else {
|
||||
|
|
@ -1288,12 +1299,9 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
}
|
||||
} else {
|
||||
Map<String, String> mapNewDisk = getNewDiskMap(vmMo);
|
||||
// if this is managed storage
|
||||
if (path.startsWith("[-iqn.")) { // ex. [-iqn.2010-01.com.company:3y8w.vol-10.64-0] -iqn.2010-01.com.company:3y8w.vol-10.64-0-000001.vmdk
|
||||
path = path.split(" ")[0]; // ex. [-iqn.2010-01.com.company:3y8w.vol-10.64-0]
|
||||
|
||||
// remove '[' and ']'
|
||||
baseName = path.substring(1, path.length() - 1);
|
||||
if (isManagedStorageDatastorePath(path)) {
|
||||
path = getManagedDatastoreName(path);
|
||||
baseName = path.substring(1, path.length() - 1); // remove '[' and ']'
|
||||
} else {
|
||||
baseName = VmwareHelper.trimSnapshotDeltaPostfix(path);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -949,6 +949,11 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
|
|||
ManagedObjectReference morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, VmwareResource.getDatastoreName(iScsiName));
|
||||
DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDS);
|
||||
|
||||
if (path.startsWith("[-iqn.")) {
|
||||
// Rescan 1:1 LUN that VMware may not know the LUN was recently resized
|
||||
_storageProcessor.rescanAllHosts(context, lstHosts, true, true);
|
||||
}
|
||||
|
||||
_storageProcessor.expandDatastore(hostDatastoreSystem, dsMo);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2825,7 +2825,15 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
|
||||
morDs = firstHostDatastoreSystemMO.findDatastoreByName(datastoreName);
|
||||
if (morDs == null) {
|
||||
morDs = firstHostDatastoreSystemMO.createVmfsDatastore(datastoreName, hostScsiDisk);
|
||||
final String hostVersion = firstHostMO.getProductVersion();
|
||||
if (hostVersion.compareTo(VmwareHelper.MIN_VERSION_VMFS6) >= 0) {
|
||||
morDs = firstHostDatastoreSystemMO.createVmfs6Datastore(datastoreName, hostScsiDisk);
|
||||
} else {
|
||||
morDs = firstHostDatastoreSystemMO.createVmfs5Datastore(datastoreName, hostScsiDisk);
|
||||
}
|
||||
} else {
|
||||
// in case of iSCSI/solidfire 1:1 VMFS datastore could be inaccessible
|
||||
mountVmfsDatastore(new DatastoreMO(context, morDs), lstHosts);
|
||||
}
|
||||
|
||||
if (morDs != null) {
|
||||
|
|
@ -3364,7 +3372,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
private void rescanAllHosts(VmwareContext context, List<Pair<ManagedObjectReference, String>> lstHostPairs, boolean rescanHba, boolean rescanVmfs) throws Exception {
|
||||
public void rescanAllHosts(VmwareContext context, List<Pair<ManagedObjectReference, String>> lstHostPairs, boolean rescanHba, boolean rescanVmfs) throws Exception {
|
||||
List<HostMO> hosts = new ArrayList<>(lstHostPairs.size());
|
||||
|
||||
for (Pair<ManagedObjectReference, String> hostPair : lstHostPairs) {
|
||||
|
|
|
|||
|
|
@ -116,4 +116,15 @@ public class VmwareStorageManagerImplTest {
|
|||
public void testSetVolumeToPathAndSizeDatastoreClusterDifferentChildStore() {
|
||||
testCommon(Storage.StoragePoolType.PreSetup, Storage.StoragePoolType.DatastoreCluster, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsManagedStorageDatastorePath() {
|
||||
Assert.assertTrue("Test if [-iqn... is a managed storage", storageManager.isManagedStorageDatastorePath("[-iqn.2010-01.com.solidfire:3p53.data-9999.97-0] i-2-9999-VM.vmdk"));
|
||||
Assert.assertFalse("Test if [SomeDS] is not a managed storage", storageManager.isManagedStorageDatastorePath("[SomeDS] i-2-9999-VM/disk.vmdk"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetManagedDatastoreName() {
|
||||
Assert.assertEquals("[-iqn.2010-01.com.solidfire:3p53.data-9999.97-0]", storageManager.getManagedDatastoreName("[-iqn.2010-01.com.solidfire:3p53.data-9999.97-0] i-2-9999-VM.vmdk"));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -53,6 +53,7 @@ public class ListTungstenFabricAddressGroupCmdTest {
|
|||
ReflectionTestUtils.setField(listTungstenFabricAddressGroupCmd, "addressGroupUuid", "test");
|
||||
ReflectionTestUtils.setField(listTungstenFabricAddressGroupCmd, "page", 1);
|
||||
ReflectionTestUtils.setField(listTungstenFabricAddressGroupCmd, "pageSize", 10);
|
||||
ReflectionTestUtils.setField(listTungstenFabricAddressGroupCmd, "s_maxPageSize", -1L);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
package org.apache.cloudstack.storage.datastore.driver;
|
||||
|
||||
import java.text.NumberFormat;
|
||||
import java.util.Arrays;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
|
@ -85,6 +86,8 @@ import com.cloud.user.dao.AccountDao;
|
|||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.db.GlobalLock;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
|
|
@ -111,6 +114,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
@Inject private PrimaryDataStoreDao storagePoolDao;
|
||||
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
|
||||
@Inject private VMTemplatePoolDao vmTemplatePoolDao;
|
||||
@Inject private VMInstanceDao vmDao;
|
||||
@Inject private VolumeDao volumeDao;
|
||||
@Inject private VolumeDetailsDao volumeDetailsDao;
|
||||
@Inject private VolumeDataFactory volumeFactory;
|
||||
|
|
@ -187,13 +191,33 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
}
|
||||
}
|
||||
|
||||
private boolean isRevokeAccessNotNeeded(DataObject dataObject) {
|
||||
// Workaround: don't unplug iscsi lun when volume is attached to a VM
|
||||
// This is regression workaround from upper layers which are calling
|
||||
// a releaseVmResources() method that calls the revoke on an attached disk
|
||||
if (dataObject.getType() == DataObjectType.VOLUME) {
|
||||
Volume volume = volumeDao.findById(dataObject.getId());
|
||||
if (volume.getInstanceId() != null) {
|
||||
VirtualMachine vm = vmDao.findById(volume.getInstanceId());
|
||||
if (vm != null && !Arrays.asList(VirtualMachine.State.Destroyed, VirtualMachine.State.Expunging, VirtualMachine.State.Error).contains(vm.getState())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore)
|
||||
{
|
||||
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
|
||||
if (dataObject == null || host == null || dataStore == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (isRevokeAccessNotNeeded(dataObject)) {
|
||||
LOGGER.debug("Skipping revoke access for Solidfire data object type:" + dataObject.getType() + " id:" + dataObject.getId());
|
||||
return;
|
||||
}
|
||||
|
||||
long sfVolumeId = getSolidFireVolumeId(dataObject, false);
|
||||
long clusterId = host.getClusterId();
|
||||
long storagePoolId = dataStore.getId();
|
||||
|
|
@ -210,6 +234,8 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
||||
LOGGER.debug("Revoking access for Solidfire data object type:" + dataObject.getType() + " id:" + dataObject.getId());
|
||||
|
||||
try {
|
||||
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
|
||||
|
||||
|
|
|
|||
|
|
@ -910,7 +910,7 @@ public class ApiResponseHelper implements ResponseGenerator {
|
|||
Long networkId = vlan.getNetworkId();
|
||||
if (networkId != null) {
|
||||
Network network = _ntwkModel.getNetwork(networkId);
|
||||
if (network != null) {
|
||||
if (network != null && TrafficType.Guest.equals(network.getTrafficType())) {
|
||||
Long accountId = network.getAccountId();
|
||||
populateAccount(vlanResponse, accountId);
|
||||
populateDomain(vlanResponse, ApiDBUtils.findAccountById(accountId).getDomainId());
|
||||
|
|
|
|||
|
|
@ -5414,10 +5414,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
|
||||
// Check if any of the Public IP addresses is allocated to another
|
||||
// account
|
||||
boolean forSystemVms = false;
|
||||
final List<IPAddressVO> ips = _publicIpAddressDao.listByVlanId(vlanDbId);
|
||||
for (final IPAddressVO ip : ips) {
|
||||
forSystemVms = ip.isForSystemVms();
|
||||
if (ip.isForSystemVms()) {
|
||||
throw new InvalidParameterValueException(ip.getAddress() + " Public IP address in range is dedicated to system vms ");
|
||||
}
|
||||
final Long allocatedToAccountId = ip.getAllocatedToAccountId();
|
||||
if (allocatedToAccountId != null) {
|
||||
if (vlanOwner != null && allocatedToAccountId != vlanOwner.getId()) {
|
||||
|
|
@ -5442,7 +5443,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NET_IP_ASSIGN, vlanOwner.getId(), ip.getDataCenterId(), ip.getId(), ip.getAddress().toString(), ip.isSourceNat(),
|
||||
vlan.getVlanType().toString(), ip.getSystem(), usageHidden, ip.getClass().getName(), ip.getUuid());
|
||||
}
|
||||
} else if (domain != null && !forSystemVms) {
|
||||
} else if (domain != null) {
|
||||
// Create an DomainVlanMapVO entry
|
||||
DomainVlanMapVO domainVlanMapVO = new DomainVlanMapVO(domain.getId(), vlan.getId());
|
||||
_domainVlanMapDao.persist(domainVlanMapVO);
|
||||
|
|
@ -7277,7 +7278,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
@Override
|
||||
public Domain getVlanDomain(long vlanId) {
|
||||
Vlan vlan = _vlanDao.findById(vlanId);
|
||||
Long domainId = null;
|
||||
|
||||
// if vlan is Virtual Domain specific, get vlan information from the
|
||||
// accountVlanMap; otherwise get account information
|
||||
|
|
|
|||
|
|
@ -334,19 +334,10 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
|||
@DB
|
||||
private IPAddressVO assignAndAllocateIpAddressEntry(final Account owner, final VlanType vlanUse, final Long guestNetworkId,
|
||||
final boolean sourceNat, final boolean allocate, final boolean isSystem,
|
||||
final Long vpcId, final Boolean displayIp, final boolean fetchFromDedicatedRange,
|
||||
final Long vpcId, final Boolean displayIp,
|
||||
final List<IPAddressVO> addressVOS) throws CloudRuntimeException {
|
||||
return Transaction.execute((TransactionCallbackWithException<IPAddressVO, CloudRuntimeException>) status -> {
|
||||
IPAddressVO finalAddress = null;
|
||||
if (!fetchFromDedicatedRange && VlanType.VirtualNetwork.equals(vlanUse)) {
|
||||
// Check that the maximum number of public IPs for the given accountId will not be exceeded
|
||||
try {
|
||||
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip);
|
||||
} catch (ResourceAllocationException ex) {
|
||||
s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner);
|
||||
throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded.");
|
||||
}
|
||||
}
|
||||
|
||||
for (IPAddressVO possibleAddr : addressVOS) {
|
||||
if (possibleAddr.getState() != State.Free) {
|
||||
|
|
@ -496,9 +487,8 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
|||
AssignIpAddressSearch.and("dc", AssignIpAddressSearch.entity().getDataCenterId(), Op.EQ);
|
||||
AssignIpAddressSearch.and("allocated", AssignIpAddressSearch.entity().getAllocatedTime(), Op.NULL);
|
||||
AssignIpAddressSearch.and("vlanId", AssignIpAddressSearch.entity().getVlanId(), Op.IN);
|
||||
if (SystemVmPublicIpReservationModeStrictness.value()) {
|
||||
AssignIpAddressSearch.and("forSystemVms", AssignIpAddressSearch.entity().isForSystemVms(), Op.EQ);
|
||||
}
|
||||
AssignIpAddressSearch.and("forSystemVms", AssignIpAddressSearch.entity().isForSystemVms(), Op.EQ);
|
||||
|
||||
SearchBuilder<VlanVO> vlanSearch = _vlanDao.createSearchBuilder();
|
||||
vlanSearch.and("type", vlanSearch.entity().getVlanType(), Op.EQ);
|
||||
vlanSearch.and("networkId", vlanSearch.entity().getNetworkId(), Op.EQ);
|
||||
|
|
@ -827,6 +817,10 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
|||
throws InsufficientAddressCapacityException {
|
||||
List<IPAddressVO> addrs = listAvailablePublicIps(dcId, podId, vlanDbIds, owner, vlanUse, guestNetworkId, sourceNat, assign, allocate, requestedIp, requestedGateway, isSystem, vpcId, displayIp, forSystemVms, true);
|
||||
IPAddressVO addr = addrs.get(0);
|
||||
if (assign) {
|
||||
addr = assignAndAllocateIpAddressEntry(owner, vlanUse, guestNetworkId, sourceNat, allocate,
|
||||
isSystem,vpcId, displayIp, addrs);
|
||||
}
|
||||
if (vlanUse == VlanType.VirtualNetwork) {
|
||||
_firewallMgr.addSystemFirewallRules(addr, owner);
|
||||
}
|
||||
|
|
@ -838,128 +832,99 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
|||
public List<IPAddressVO> listAvailablePublicIps(final long dcId, final Long podId, final List<Long> vlanDbIds, final Account owner, final VlanType vlanUse, final Long guestNetworkId,
|
||||
final boolean sourceNat, final boolean assign, final boolean allocate, final String requestedIp, final String requestedGateway, final boolean isSystem,
|
||||
final Long vpcId, final Boolean displayIp, final boolean forSystemVms, final boolean lockOneRow) throws InsufficientAddressCapacityException {
|
||||
return Transaction.execute(new TransactionCallbackWithException<List<IPAddressVO>, InsufficientAddressCapacityException>() {
|
||||
@Override
|
||||
public List<IPAddressVO> doInTransaction(TransactionStatus status) throws InsufficientAddressCapacityException {
|
||||
StringBuilder errorMessage = new StringBuilder("Unable to get ip address in ");
|
||||
boolean fetchFromDedicatedRange = false;
|
||||
List<Long> dedicatedVlanDbIds = new ArrayList<Long>();
|
||||
List<Long> nonDedicatedVlanDbIds = new ArrayList<Long>();
|
||||
DataCenter zone = _entityMgr.findById(DataCenter.class, dcId);
|
||||
|
||||
SearchCriteria<IPAddressVO> sc = null;
|
||||
if (podId != null) {
|
||||
sc = AssignIpAddressFromPodVlanSearch.create();
|
||||
sc.setJoinParameters("podVlanMapSB", "podId", podId);
|
||||
errorMessage.append(" pod id=" + podId);
|
||||
} else {
|
||||
sc = AssignIpAddressSearch.create();
|
||||
errorMessage.append(" zone id=" + dcId);
|
||||
}
|
||||
StringBuilder errorMessage = new StringBuilder("Unable to get ip address in ");
|
||||
boolean fetchFromDedicatedRange = false;
|
||||
List<Long> dedicatedVlanDbIds = new ArrayList<Long>();
|
||||
List<Long> nonDedicatedVlanDbIds = new ArrayList<Long>();
|
||||
DataCenter zone = _entityMgr.findById(DataCenter.class, dcId);
|
||||
|
||||
// If owner has dedicated Public IP ranges, fetch IP from the dedicated range
|
||||
// Otherwise fetch IP from the system pool
|
||||
Network network = _networksDao.findById(guestNetworkId);
|
||||
//Checking if network is null in the case of system VM's. At the time of allocation of IP address to systemVm, no network is present.
|
||||
if(network == null || !(network.getGuestType() == GuestType.Shared && zone.getNetworkType() == NetworkType.Advanced)) {
|
||||
List<AccountVlanMapVO> maps = _accountVlanMapDao.listAccountVlanMapsByAccount(owner.getId());
|
||||
for (AccountVlanMapVO map : maps) {
|
||||
if (vlanDbIds == null || vlanDbIds.contains(map.getVlanDbId()))
|
||||
dedicatedVlanDbIds.add(map.getVlanDbId());
|
||||
}
|
||||
}
|
||||
List<DomainVlanMapVO> domainMaps = _domainVlanMapDao.listDomainVlanMapsByDomain(owner.getDomainId());
|
||||
for (DomainVlanMapVO map : domainMaps) {
|
||||
SearchCriteria<IPAddressVO> sc = null;
|
||||
if (podId != null) {
|
||||
sc = AssignIpAddressFromPodVlanSearch.create();
|
||||
sc.setJoinParameters("podVlanMapSB", "podId", podId);
|
||||
errorMessage.append(" pod id=" + podId);
|
||||
} else {
|
||||
sc = AssignIpAddressSearch.create();
|
||||
errorMessage.append(" zone id=" + dcId);
|
||||
}
|
||||
|
||||
sc.setParameters("dc", dcId);
|
||||
|
||||
// for direct network take ip addresses only from the vlans belonging to the network
|
||||
if (vlanUse == VlanType.DirectAttached) {
|
||||
sc.setJoinParameters("vlan", "networkId", guestNetworkId);
|
||||
errorMessage.append(", network id=" + guestNetworkId);
|
||||
}
|
||||
if (requestedGateway != null) {
|
||||
sc.setJoinParameters("vlan", "vlanGateway", requestedGateway);
|
||||
errorMessage.append(", requested gateway=" + requestedGateway);
|
||||
}
|
||||
sc.setJoinParameters("vlan", "type", vlanUse);
|
||||
|
||||
Network network = _networksDao.findById(guestNetworkId);
|
||||
String routerIpAddress = null;
|
||||
if (network != null) {
|
||||
NetworkDetailVO routerIpDetail = _networkDetailsDao.findDetail(network.getId(), ApiConstants.ROUTER_IP);
|
||||
routerIpAddress = routerIpDetail != null ? routerIpDetail.getValue() : null;
|
||||
}
|
||||
if (requestedIp != null) {
|
||||
sc.addAnd("address", SearchCriteria.Op.EQ, requestedIp);
|
||||
errorMessage.append(": requested ip " + requestedIp + " is not available");
|
||||
} else if (routerIpAddress != null) {
|
||||
sc.addAnd("address", Op.NEQ, routerIpAddress);
|
||||
}
|
||||
|
||||
boolean ascOrder = ! forSystemVms;
|
||||
Filter filter = new Filter(IPAddressVO.class, "forSystemVms", ascOrder, 0l, 1l);
|
||||
|
||||
filter.addOrderBy(IPAddressVO.class,"vlanId", true);
|
||||
|
||||
List<IPAddressVO> addrs = new ArrayList<>();
|
||||
|
||||
if (forSystemVms) {
|
||||
// Get Public IPs for system vms in dedicated ranges
|
||||
sc.setParameters("forSystemVms", true);
|
||||
if (lockOneRow) {
|
||||
addrs = _ipAddressDao.lockRows(sc, filter, true);
|
||||
} else {
|
||||
addrs = new ArrayList<>(_ipAddressDao.search(sc, null));
|
||||
}
|
||||
}
|
||||
if ((!lockOneRow || (lockOneRow && CollectionUtils.isEmpty(addrs))) &&
|
||||
!(forSystemVms && SystemVmPublicIpReservationModeStrictness.value())) {
|
||||
sc.setParameters("forSystemVms", false);
|
||||
// If owner has dedicated Public IP ranges, fetch IP from the dedicated range
|
||||
// Otherwise fetch IP from the system pool
|
||||
// Checking if network is null in the case of system VM's. At the time of allocation of IP address to systemVm, no network is present.
|
||||
if (network == null || !(network.getGuestType() == GuestType.Shared && zone.getNetworkType() == NetworkType.Advanced)) {
|
||||
List<AccountVlanMapVO> maps = _accountVlanMapDao.listAccountVlanMapsByAccount(owner.getId());
|
||||
for (AccountVlanMapVO map : maps) {
|
||||
if (vlanDbIds == null || vlanDbIds.contains(map.getVlanDbId()))
|
||||
dedicatedVlanDbIds.add(map.getVlanDbId());
|
||||
}
|
||||
List<VlanVO> nonDedicatedVlans = _vlanDao.listZoneWideNonDedicatedVlans(dcId);
|
||||
for (VlanVO nonDedicatedVlan : nonDedicatedVlans) {
|
||||
if (vlanDbIds == null || vlanDbIds.contains(nonDedicatedVlan.getId()))
|
||||
nonDedicatedVlanDbIds.add(nonDedicatedVlan.getId());
|
||||
}
|
||||
|
||||
if (vlanUse == VlanType.VirtualNetwork) {
|
||||
if (!dedicatedVlanDbIds.isEmpty()) {
|
||||
fetchFromDedicatedRange = true;
|
||||
sc.setParameters("vlanId", dedicatedVlanDbIds.toArray());
|
||||
errorMessage.append(", vlanId id=" + Arrays.toString(dedicatedVlanDbIds.toArray()));
|
||||
} else if (!nonDedicatedVlanDbIds.isEmpty()) {
|
||||
sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray());
|
||||
errorMessage.append(", vlanId id=" + Arrays.toString(nonDedicatedVlanDbIds.toArray()));
|
||||
} else {
|
||||
if (podId != null) {
|
||||
InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", Pod.class, podId);
|
||||
ex.addProxyObject(ApiDBUtils.findPodById(podId).getUuid());
|
||||
throw ex;
|
||||
}
|
||||
s_logger.warn(errorMessage.toString());
|
||||
InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, dcId);
|
||||
ex.addProxyObject(ApiDBUtils.findZoneById(dcId).getUuid());
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
sc.setParameters("dc", dcId);
|
||||
|
||||
// for direct network take ip addresses only from the vlans belonging to the network
|
||||
if (vlanUse == VlanType.DirectAttached) {
|
||||
sc.setJoinParameters("vlan", "networkId", guestNetworkId);
|
||||
errorMessage.append(", network id=" + guestNetworkId);
|
||||
}
|
||||
if (requestedGateway != null) {
|
||||
sc.setJoinParameters("vlan", "vlanGateway", requestedGateway);
|
||||
errorMessage.append(", requested gateway=" + requestedGateway);
|
||||
}
|
||||
sc.setJoinParameters("vlan", "type", vlanUse);
|
||||
String routerIpAddress = null;
|
||||
if (network != null) {
|
||||
NetworkDetailVO routerIpDetail = _networkDetailsDao.findDetail(network.getId(), ApiConstants.ROUTER_IP);
|
||||
routerIpAddress = routerIpDetail != null ? routerIpDetail.getValue() : null;
|
||||
}
|
||||
if (requestedIp != null) {
|
||||
sc.addAnd("address", SearchCriteria.Op.EQ, requestedIp);
|
||||
errorMessage.append(": requested ip " + requestedIp + " is not available");
|
||||
} else if (routerIpAddress != null) {
|
||||
sc.addAnd("address", Op.NEQ, routerIpAddress);
|
||||
}
|
||||
|
||||
boolean ascOrder = ! forSystemVms;
|
||||
Filter filter = new Filter(IPAddressVO.class, "forSystemVms", ascOrder, 0l, 1l);
|
||||
if (SystemVmPublicIpReservationModeStrictness.value()) {
|
||||
sc.setParameters("forSystemVms", forSystemVms);
|
||||
}
|
||||
|
||||
filter.addOrderBy(IPAddressVO.class,"vlanId", true);
|
||||
|
||||
List<IPAddressVO> addrs;
|
||||
|
||||
if (lockOneRow) {
|
||||
addrs = _ipAddressDao.lockRows(sc, filter, true);
|
||||
}
|
||||
List<DomainVlanMapVO> domainMaps = _domainVlanMapDao.listDomainVlanMapsByDomain(owner.getDomainId());
|
||||
for (DomainVlanMapVO map : domainMaps) {
|
||||
if (vlanDbIds == null || vlanDbIds.contains(map.getVlanDbId()))
|
||||
dedicatedVlanDbIds.add(map.getVlanDbId());
|
||||
}
|
||||
List<VlanVO> nonDedicatedVlans = _vlanDao.listZoneWideNonDedicatedVlans(dcId);
|
||||
for (VlanVO nonDedicatedVlan : nonDedicatedVlans) {
|
||||
if (vlanDbIds == null || vlanDbIds.contains(nonDedicatedVlan.getId()))
|
||||
nonDedicatedVlanDbIds.add(nonDedicatedVlan.getId());
|
||||
}
|
||||
if (vlanUse == VlanType.VirtualNetwork) {
|
||||
if (!dedicatedVlanDbIds.isEmpty()) {
|
||||
fetchFromDedicatedRange = true;
|
||||
sc.setParameters("vlanId", dedicatedVlanDbIds.toArray());
|
||||
errorMessage.append(", vlanId id=" + Arrays.toString(dedicatedVlanDbIds.toArray()));
|
||||
} else if (!nonDedicatedVlanDbIds.isEmpty()) {
|
||||
sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray());
|
||||
errorMessage.append(", vlanId id=" + Arrays.toString(nonDedicatedVlanDbIds.toArray()));
|
||||
} else {
|
||||
addrs = new ArrayList<>(_ipAddressDao.search(sc, null));
|
||||
}
|
||||
|
||||
// If all the dedicated IPs of the owner are in use fetch an IP from the system pool
|
||||
if ((!lockOneRow || (lockOneRow && addrs.size() == 0)) && fetchFromDedicatedRange && vlanUse == VlanType.VirtualNetwork) {
|
||||
// Verify if account is allowed to acquire IPs from the system
|
||||
boolean useSystemIps = UseSystemPublicIps.valueIn(owner.getId());
|
||||
if (useSystemIps && !nonDedicatedVlanDbIds.isEmpty()) {
|
||||
fetchFromDedicatedRange = false;
|
||||
sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray());
|
||||
errorMessage.append(", vlanId id=" + Arrays.toString(nonDedicatedVlanDbIds.toArray()));
|
||||
if (lockOneRow) {
|
||||
addrs = _ipAddressDao.lockRows(sc, filter, true);
|
||||
} else {
|
||||
addrs.addAll(_ipAddressDao.search(sc, null));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (lockOneRow && addrs.size() == 0) {
|
||||
if (podId != null) {
|
||||
InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", Pod.class, podId);
|
||||
// for now, we hardcode the table names, but we should ideally do a lookup for the tablename from the VO object.
|
||||
ex.addProxyObject(ApiDBUtils.findPodById(podId).getUuid());
|
||||
throw ex;
|
||||
}
|
||||
|
|
@ -968,17 +933,58 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
|||
ex.addProxyObject(ApiDBUtils.findZoneById(dcId).getUuid());
|
||||
throw ex;
|
||||
}
|
||||
|
||||
if (lockOneRow) {
|
||||
assert (addrs.size() == 1) : "Return size is incorrect: " + addrs.size();
|
||||
}
|
||||
if (assign) {
|
||||
assignAndAllocateIpAddressEntry(owner, vlanUse, guestNetworkId, sourceNat, allocate,
|
||||
isSystem,vpcId, displayIp, fetchFromDedicatedRange, addrs);
|
||||
}
|
||||
return addrs;
|
||||
}
|
||||
});
|
||||
if (lockOneRow) {
|
||||
addrs = _ipAddressDao.lockRows(sc, filter, true);
|
||||
} else {
|
||||
addrs = new ArrayList<>(_ipAddressDao.search(sc, null));
|
||||
}
|
||||
|
||||
// If all the dedicated IPs of the owner are in use fetch an IP from the system pool
|
||||
if ((!lockOneRow || (lockOneRow && addrs.size() == 0)) && fetchFromDedicatedRange && vlanUse == VlanType.VirtualNetwork) {
|
||||
// Verify if account is allowed to acquire IPs from the system
|
||||
boolean useSystemIps = UseSystemPublicIps.valueIn(owner.getId());
|
||||
if (useSystemIps && !nonDedicatedVlanDbIds.isEmpty()) {
|
||||
fetchFromDedicatedRange = false;
|
||||
sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray());
|
||||
errorMessage.append(", vlanId id=" + Arrays.toString(nonDedicatedVlanDbIds.toArray()));
|
||||
if (lockOneRow) {
|
||||
addrs = _ipAddressDao.lockRows(sc, filter, true);
|
||||
} else {
|
||||
addrs.addAll(_ipAddressDao.search(sc, null));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (lockOneRow && addrs.size() == 0) {
|
||||
if (podId != null) {
|
||||
InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", Pod.class, podId);
|
||||
// for now, we hardcode the table names, but we should ideally do a lookup for the tablename from the VO object.
|
||||
ex.addProxyObject(ApiDBUtils.findPodById(podId).getUuid());
|
||||
throw ex;
|
||||
}
|
||||
s_logger.warn(errorMessage.toString());
|
||||
InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, dcId);
|
||||
ex.addProxyObject(ApiDBUtils.findZoneById(dcId).getUuid());
|
||||
throw ex;
|
||||
}
|
||||
|
||||
if (lockOneRow) {
|
||||
assert (addrs.size() == 1) : "Return size is incorrect: " + addrs.size();
|
||||
}
|
||||
|
||||
if (assign && !fetchFromDedicatedRange && VlanType.VirtualNetwork.equals(vlanUse)) {
|
||||
// Check that the maximum number of public IPs for the given accountId will not be exceeded
|
||||
try {
|
||||
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip);
|
||||
} catch (ResourceAllocationException ex) {
|
||||
s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner);
|
||||
throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded.");
|
||||
}
|
||||
}
|
||||
|
||||
return addrs;
|
||||
}
|
||||
|
||||
@DB
|
||||
|
|
@ -2341,7 +2347,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
|||
|
||||
@Override
|
||||
public ConfigKey<?>[] getConfigKeys() {
|
||||
return new ConfigKey<?>[] {UseSystemPublicIps, RulesContinueOnError, SystemVmPublicIpReservationModeStrictness, VrouterRedundantTiersPlacement};
|
||||
return new ConfigKey<?>[] {UseSystemPublicIps, RulesContinueOnError, SystemVmPublicIpReservationModeStrictness, VrouterRedundantTiersPlacement, AllowUserListAvailableIpsOnSharedNetwork};
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import java.util.Arrays;
|
|||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
|
@ -35,6 +36,7 @@ import org.apache.cloudstack.framework.config.ConfigKey;
|
|||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.network.router.deployment.RouterDeploymentDefinition;
|
||||
import org.apache.cloudstack.utils.CloudStackVersion;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
|
|
@ -42,6 +44,7 @@ import com.cloud.agent.api.Answer;
|
|||
import com.cloud.agent.api.to.NicTO;
|
||||
import com.cloud.agent.manager.Commands;
|
||||
import com.cloud.alert.AlertManager;
|
||||
import com.cloud.capacity.CapacityManager;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.dc.ClusterVO;
|
||||
import com.cloud.dc.DataCenter;
|
||||
|
|
@ -167,6 +170,8 @@ public class NetworkHelperImpl implements NetworkHelper {
|
|||
RouterHealthCheckResultDao _routerHealthCheckResultDao;
|
||||
@Inject
|
||||
Ipv6Service ipv6Service;
|
||||
@Inject
|
||||
CapacityManager capacityMgr;
|
||||
|
||||
protected final Map<HypervisorType, ConfigKey<String>> hypervisorsMap = new HashMap<>();
|
||||
|
||||
|
|
@ -502,12 +507,12 @@ public class NetworkHelperImpl implements NetworkHelper {
|
|||
// failed both times, throw the exception up
|
||||
final List<HypervisorType> hypervisors = getHypervisors(routerDeploymentDefinition);
|
||||
|
||||
int allocateRetry = 0;
|
||||
int startRetry = 0;
|
||||
DomainRouterVO router = null;
|
||||
for (final Iterator<HypervisorType> iter = hypervisors.iterator(); iter.hasNext();) {
|
||||
final HypervisorType hType = iter.next();
|
||||
try {
|
||||
checkIfZoneHasCapacity(routerDeploymentDefinition.getDest().getDataCenter(), hType, routerOffering);
|
||||
|
||||
final long id = _routerDao.getNextInSequence(Long.class, "id");
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug(String.format("Allocating the VR with id=%s in datacenter %s with the hypervisor type %s", id, routerDeploymentDefinition.getDest()
|
||||
|
|
@ -548,14 +553,12 @@ public class NetworkHelperImpl implements NetworkHelper {
|
|||
reallocateRouterNetworks(routerDeploymentDefinition, router, template, null);
|
||||
router = _routerDao.findById(router.getId());
|
||||
} catch (final InsufficientCapacityException ex) {
|
||||
if (allocateRetry < 2 && iter.hasNext()) {
|
||||
if (iter.hasNext()) {
|
||||
s_logger.debug("Failed to allocate the VR with hypervisor type " + hType + ", retrying one more time");
|
||||
continue;
|
||||
} else {
|
||||
throw ex;
|
||||
}
|
||||
} finally {
|
||||
allocateRetry++;
|
||||
}
|
||||
|
||||
if (startRouter) {
|
||||
|
|
@ -563,7 +566,7 @@ public class NetworkHelperImpl implements NetworkHelper {
|
|||
router = startVirtualRouter(router, _accountMgr.getSystemUser(), _accountMgr.getSystemAccount(), routerDeploymentDefinition.getParams());
|
||||
break;
|
||||
} catch (final InsufficientCapacityException ex) {
|
||||
if (startRetry < 2 && iter.hasNext()) {
|
||||
if (iter.hasNext()) {
|
||||
s_logger.debug("Failed to start the VR " + router + " with hypervisor type " + hType + ", " + "destroying it and recreating one more time");
|
||||
// destroy the router
|
||||
destroyRouter(router.getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM);
|
||||
|
|
@ -571,8 +574,6 @@ public class NetworkHelperImpl implements NetworkHelper {
|
|||
} else {
|
||||
throw ex;
|
||||
}
|
||||
} finally {
|
||||
startRetry++;
|
||||
}
|
||||
} else {
|
||||
// return stopped router
|
||||
|
|
@ -583,6 +584,25 @@ public class NetworkHelperImpl implements NetworkHelper {
|
|||
return router;
|
||||
}
|
||||
|
||||
private void checkIfZoneHasCapacity(final DataCenter zone, final HypervisorType hypervisorType, final ServiceOfferingVO routerOffering) throws InsufficientServerCapacityException {
|
||||
List <HostVO> hosts = _hostDao.listByDataCenterIdAndHypervisorType(zone.getId(), hypervisorType);
|
||||
if (CollectionUtils.isEmpty(hosts)) {
|
||||
String msg = String.format("Zone %s has no %s host available which is enabled and in Up state", zone.getName(), hypervisorType);
|
||||
s_logger.debug(msg);
|
||||
throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId());
|
||||
}
|
||||
for (HostVO host : hosts) {
|
||||
Pair<Boolean, Boolean> cpuCapabilityAndCapacity = capacityMgr.checkIfHostHasCpuCapabilityAndCapacity(host, routerOffering, false);
|
||||
if (cpuCapabilityAndCapacity.first() && cpuCapabilityAndCapacity.second()) {
|
||||
s_logger.debug("Host " + host + " has enough capacity for the router");
|
||||
return;
|
||||
}
|
||||
}
|
||||
String msg = String.format("Zone %s has no %s host which has enough capacity", zone.getName(), hypervisorType);
|
||||
s_logger.debug(msg);
|
||||
throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId());
|
||||
}
|
||||
|
||||
protected void filterSupportedHypervisors(final List<HypervisorType> hypervisors) {
|
||||
// For non vpc we keep them all assuming all types in the list are
|
||||
// supported
|
||||
|
|
@ -619,7 +639,7 @@ public class NetworkHelperImpl implements NetworkHelper {
|
|||
throw new InsufficientServerCapacityException("Unable to create virtual router, there are no clusters in the zone." + getNoHypervisorsErrMsgDetails(),
|
||||
DataCenter.class, dest.getDataCenter().getId());
|
||||
}
|
||||
return hypervisors;
|
||||
return new ArrayList(new LinkedHashSet<>(hypervisors));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -2322,6 +2322,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
isAllocated = Boolean.TRUE;
|
||||
}
|
||||
}
|
||||
boolean isAllocatedTemp = isAllocated;
|
||||
|
||||
VlanType vlanType = null;
|
||||
if (forVirtualNetwork != null) {
|
||||
|
|
@ -2332,6 +2333,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
|
||||
final Account caller = getCaller();
|
||||
List<IPAddressVO> addrs = new ArrayList<>();
|
||||
NetworkVO network = null; // shared network
|
||||
|
||||
if (vlanType == VlanType.DirectAttached && networkId == null && ipId == null) { // only root admin can list public ips in all shared networks
|
||||
if (caller.getType() != Account.Type.ADMIN) {
|
||||
|
|
@ -2340,7 +2342,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
} else if (vlanType == VlanType.DirectAttached) {
|
||||
// list public ip address on shared network
|
||||
// access control. admin: all Ips, domain admin/user: all Ips in shared network in the domain/sub-domain/user
|
||||
NetworkVO network = null;
|
||||
if (networkId == null) {
|
||||
IPAddressVO ip = _publicIpAddressDao.findById(ipId);
|
||||
if (ip == null) {
|
||||
|
|
@ -2474,7 +2475,20 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
for (IPAddressVO addr: freeAddrs) {
|
||||
freeAddrIds.add(addr.getId());
|
||||
}
|
||||
} else if (vlanType == VlanType.DirectAttached && network != null && !isAllocatedTemp && isAllocated) {
|
||||
if (caller.getType() != Account.Type.ADMIN && !IpAddressManager.AllowUserListAvailableIpsOnSharedNetwork.value()) {
|
||||
s_logger.debug("Non-admin users are not allowed to list available IPs on shared networks");
|
||||
} else {
|
||||
final SearchBuilder<IPAddressVO> searchBuilder = _publicIpAddressDao.createSearchBuilder();
|
||||
buildParameters(searchBuilder, cmd, false);
|
||||
|
||||
SearchCriteria<IPAddressVO> searchCriteria = searchBuilder.create();
|
||||
setParameters(searchCriteria, cmd, vlanType, false);
|
||||
searchCriteria.setParameters("state", IpAddress.State.Free.name());
|
||||
addrs.addAll(_publicIpAddressDao.search(searchCriteria, searchFilter)); // Free IPs on shared network
|
||||
}
|
||||
}
|
||||
|
||||
if (freeAddrIds.size() > 0) {
|
||||
final SearchBuilder<IPAddressVO> sb2 = _publicIpAddressDao.createSearchBuilder();
|
||||
buildParameters(sb2, cmd, false);
|
||||
|
|
|
|||
|
|
@ -2772,13 +2772,18 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
final List<String> userReadOnlySettings = Stream.of(QueryService.UserVMReadOnlyDetails.value().split(","))
|
||||
.map(item -> (item).trim())
|
||||
.collect(Collectors.toList());
|
||||
List<UserVmDetailVO> existingDetails = userVmDetailsDao.listDetails(id);
|
||||
if (cleanupDetails){
|
||||
if (caller != null && caller.getType() == Account.Type.ADMIN) {
|
||||
userVmDetailsDao.removeDetails(id);
|
||||
for (final UserVmDetailVO detail : existingDetails) {
|
||||
if (detail != null && detail.isDisplay()) {
|
||||
userVmDetailsDao.removeDetail(id, detail.getName());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (final UserVmDetailVO detail : userVmDetailsDao.listDetails(id)) {
|
||||
for (final UserVmDetailVO detail : existingDetails) {
|
||||
if (detail != null && !userDenyListedSettings.contains(detail.getName())
|
||||
&& !userReadOnlySettings.contains(detail.getName())) {
|
||||
&& !userReadOnlySettings.contains(detail.getName()) && detail.isDisplay()) {
|
||||
userVmDetailsDao.removeDetail(id, detail.getName());
|
||||
}
|
||||
}
|
||||
|
|
@ -2798,15 +2803,25 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
if (userReadOnlySettings.contains(detailName)) {
|
||||
throw new InvalidParameterValueException("You're not allowed to add or edit the read-only setting: " + detailName);
|
||||
}
|
||||
if (existingDetails.stream().anyMatch(d -> Objects.equals(d.getName(), detailName) && !d.isDisplay())){
|
||||
throw new InvalidParameterValueException("You're not allowed to add or edit the non-displayable setting: " + detailName);
|
||||
}
|
||||
}
|
||||
// Add any hidden/denied or read-only detail
|
||||
for (final UserVmDetailVO detail : userVmDetailsDao.listDetails(id)) {
|
||||
// Add any existing user denied or read-only details. We do it here because admins would already provide these (or can delete them).
|
||||
for (final UserVmDetailVO detail : existingDetails) {
|
||||
if (userDenyListedSettings.contains(detail.getName()) || userReadOnlySettings.contains(detail.getName())) {
|
||||
details.put(detail.getName(), detail.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ensure details marked as non-displayable are maintained, regardless of admin or not
|
||||
for (final UserVmDetailVO existingDetail : existingDetails) {
|
||||
if (!existingDetail.isDisplay()) {
|
||||
details.put(existingDetail.getName(), existingDetail.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
verifyVmLimits(vmInstance, details);
|
||||
vmInstance.setDetails(details);
|
||||
_vmDao.saveDetails(vmInstance);
|
||||
|
|
|
|||
|
|
@ -162,7 +162,7 @@ public class UserVmManagerImplTest {
|
|||
private EntityManager entityManager;
|
||||
|
||||
@Mock
|
||||
private UserVmDetailsDao userVmDetailVO;
|
||||
private UserVmDetailsDao userVmDetailsDao;
|
||||
|
||||
@Mock
|
||||
private UserVmVO userVmVoMock;
|
||||
|
|
@ -333,7 +333,7 @@ public class UserVmManagerImplTest {
|
|||
verifyMethodsThatAreAlwaysExecuted();
|
||||
|
||||
Mockito.verify(userVmManagerImpl).updateDisplayVmFlag(false, vmId, userVmVoMock);
|
||||
Mockito.verify(userVmDetailVO, Mockito.times(0)).removeDetails(vmId);
|
||||
Mockito.verify(userVmDetailsDao, Mockito.times(0)).removeDetail(anyLong(), anyString());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -343,12 +343,15 @@ public class UserVmManagerImplTest {
|
|||
Mockito.when(_serviceOfferingDao.findById(Mockito.anyLong(), Mockito.anyLong())).thenReturn((ServiceOfferingVO) offering);
|
||||
Mockito.when(updateVmCommand.isCleanupDetails()).thenReturn(true);
|
||||
Mockito.lenient().doNothing().when(userVmManagerImpl).updateDisplayVmFlag(false, vmId, userVmVoMock);
|
||||
Mockito.doNothing().when(userVmDetailVO).removeDetails(vmId);
|
||||
|
||||
Mockito.when(updateVmCommand.getUserdataId()).thenReturn(null);
|
||||
|
||||
prepareExistingDetails(vmId, "userdetail");
|
||||
|
||||
userVmManagerImpl.updateVirtualMachine(updateVmCommand);
|
||||
verifyMethodsThatAreAlwaysExecuted();
|
||||
Mockito.verify(userVmDetailVO).removeDetails(vmId);
|
||||
Mockito.verify(userVmDetailsDao).removeDetail(vmId, "userdetail");
|
||||
Mockito.verify(userVmDetailsDao, Mockito.times(0)).removeDetail(vmId, "systemdetail");
|
||||
Mockito.verify(userVmManagerImpl, Mockito.times(0)).updateDisplayVmFlag(false, vmId, userVmVoMock);
|
||||
}
|
||||
|
||||
|
|
@ -373,6 +376,16 @@ public class UserVmManagerImplTest {
|
|||
prepareAndExecuteMethodDealingWithDetails(false, false);
|
||||
}
|
||||
|
||||
private List<UserVmDetailVO> prepareExistingDetails(Long vmId, String... existingDetailKeys) {
|
||||
List<UserVmDetailVO> existingDetails = new ArrayList<>();
|
||||
for (String detail : existingDetailKeys) {
|
||||
existingDetails.add(new UserVmDetailVO(vmId, detail, "foo", true));
|
||||
}
|
||||
existingDetails.add(new UserVmDetailVO(vmId, "systemdetail", "bar", false));
|
||||
Mockito.when(userVmDetailsDao.listDetails(vmId)).thenReturn(existingDetails);
|
||||
return existingDetails;
|
||||
}
|
||||
|
||||
private void prepareAndExecuteMethodDealingWithDetails(boolean cleanUpDetails, boolean isDetailsEmpty) throws ResourceUnavailableException, InsufficientCapacityException {
|
||||
configureDoNothingForMethodsThatWeDoNotWantToTest();
|
||||
|
||||
|
|
@ -393,8 +406,9 @@ public class UserVmManagerImplTest {
|
|||
lenient().doNothing().when(_networkMgr).saveExtraDhcpOptions(anyString(), anyLong(), anyMap());
|
||||
HashMap<String, String> details = new HashMap<>();
|
||||
if(!isDetailsEmpty) {
|
||||
details.put("", "");
|
||||
details.put("newdetail", "foo");
|
||||
}
|
||||
prepareExistingDetails(vmId, "existingdetail");
|
||||
Mockito.when(updateVmCommand.getUserdataId()).thenReturn(null);
|
||||
Mockito.when(updateVmCommand.getDetails()).thenReturn(details);
|
||||
Mockito.when(updateVmCommand.isCleanupDetails()).thenReturn(cleanUpDetails);
|
||||
|
|
@ -404,14 +418,15 @@ public class UserVmManagerImplTest {
|
|||
verifyMethodsThatAreAlwaysExecuted();
|
||||
|
||||
Mockito.verify(userVmVoMock, Mockito.times(cleanUpDetails || isDetailsEmpty ? 0 : 1)).setDetails(details);
|
||||
Mockito.verify(userVmDetailVO, Mockito.times(cleanUpDetails ? 1: 0)).removeDetails(vmId);
|
||||
Mockito.verify(userVmDetailsDao, Mockito.times(cleanUpDetails ? 1 : 0)).removeDetail(vmId, "existingdetail");
|
||||
Mockito.verify(userVmDetailsDao, Mockito.times(0)).removeDetail(vmId, "systemdetail");
|
||||
Mockito.verify(userVmDao, Mockito.times(cleanUpDetails || isDetailsEmpty ? 0 : 1)).saveDetails(userVmVoMock);
|
||||
Mockito.verify(userVmManagerImpl, Mockito.times(0)).updateDisplayVmFlag(false, vmId, userVmVoMock);
|
||||
}
|
||||
|
||||
private void configureDoNothingForDetailsMethod() {
|
||||
Mockito.lenient().doNothing().when(userVmManagerImpl).updateDisplayVmFlag(false, vmId, userVmVoMock);
|
||||
Mockito.doNothing().when(userVmDetailVO).removeDetails(vmId);
|
||||
Mockito.doNothing().when(userVmDetailsDao).removeDetail(anyLong(), anyString());
|
||||
Mockito.doNothing().when(userVmDao).saveDetails(userVmVoMock);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@
|
|||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||
from marvin.lib.base import Account, VirtualMachine, ServiceOffering
|
||||
from marvin.lib.utils import cleanup_resources
|
||||
from marvin.lib.utils import (validateList, cleanup_resources)
|
||||
from marvin.lib.common import get_zone, get_domain, get_template
|
||||
from marvin.codes import PASS
|
||||
from nose.plugins.attrib import attr
|
||||
|
||||
class TestData(object):
|
||||
|
|
@ -59,6 +59,7 @@ class TestUpdateVirtualMachine(cloudstackTestCase):
|
|||
def setUp(self):
|
||||
self.testdata = TestData().testdata
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
|
||||
# Get Zone, Domain and Default Built-in template
|
||||
self.domain = get_domain(self.apiclient)
|
||||
|
|
@ -106,18 +107,7 @@ class TestUpdateVirtualMachine(cloudstackTestCase):
|
|||
templateid=self.template.id
|
||||
)
|
||||
|
||||
list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
|
||||
self.assertEqual(
|
||||
isinstance(list_vms, list),
|
||||
True,
|
||||
"List VM response was not a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_vms),
|
||||
0,
|
||||
"List VM response was empty"
|
||||
)
|
||||
vm = list_vms[0]
|
||||
vm = self.listVmById(self.virtual_machine.id)
|
||||
|
||||
self.debug(
|
||||
"VirtualMachine launched with id, name, displayname: %s %s %s"\
|
||||
|
|
@ -152,6 +142,112 @@ class TestUpdateVirtualMachine(cloudstackTestCase):
|
|||
self.assertEqual(vmnew.displayname, vmnewstarted.displayname,
|
||||
msg="display name changed on start, displayname is %s" % vmnewstarted.displayname)
|
||||
|
||||
@attr(tags=['advanced', 'simulator', 'basic', 'sg', 'details'], required_hardware="false")
|
||||
def test_update_vm_details_admin(self):
|
||||
"""Test Update VirtualMachine Details
|
||||
|
||||
# Set up a VM
|
||||
# Set up hidden detail in DB for VM
|
||||
|
||||
# Validate the following:
|
||||
# 1. Can add two details (detail1, detail2)
|
||||
# 2. Can fetch new details on VM
|
||||
# 3. Can delete detail1
|
||||
# 4. Hidden detail not removed
|
||||
# 6. The detail2 remains
|
||||
# 7. Ensure cleanup parameter doesn't remove hidden details
|
||||
"""
|
||||
hidden_detail_name = "configDriveLocation"
|
||||
detail1 = "detail1"
|
||||
detail2 = "detail2"
|
||||
|
||||
# set up a VM
|
||||
self.virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.testdata["virtual_machine"],
|
||||
accountid=self.account.name,
|
||||
zoneid=self.zone.id,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
templateid=self.template.id
|
||||
)
|
||||
self.cleanup.append(self.virtual_machine)
|
||||
|
||||
vm = self.listVmById(self.virtual_machine.id)
|
||||
|
||||
self.debug(
|
||||
"VirtualMachine launched with id, name, displayname: %s %s %s" \
|
||||
% (self.virtual_machine.id, vm.name, vm.displayname)
|
||||
)
|
||||
|
||||
# set up a hidden detail
|
||||
dbresult = self.dbclient.execute("select id from vm_instance where uuid='%s'" % vm.id)
|
||||
self.assertEqual(validateList(dbresult)[0], PASS, "sql query returned invalid response")
|
||||
vm_db_id = dbresult[0][0]
|
||||
self.debug("VM has database id %d" % vm_db_id)
|
||||
|
||||
self.dbclient.execute("insert into user_vm_details (vm_id, name, value, display) values (%d,'%s','HOST', 0)" % (vm_db_id, hidden_detail_name))
|
||||
|
||||
vm = self.listVmById(self.virtual_machine.id)
|
||||
self.debug("VirtualMachine fetched with details: %s of type %s" % (vm.details, type(vm.details)))
|
||||
|
||||
self.assertIsNone(vm.details[hidden_detail_name], "hidden detail should be hidden")
|
||||
|
||||
# add two details by appending to what was returned via API
|
||||
updating_vm_details = vm.details.__dict__
|
||||
updating_vm_details[detail1] = "foo"
|
||||
updating_vm_details[detail2] = "bar"
|
||||
|
||||
self.debug("Updating VM to new details: %s" % updating_vm_details)
|
||||
vm = self.virtual_machine.update(self.apiclient, details=[updating_vm_details])
|
||||
|
||||
self.assertIsNotNone(vm.details[detail1], "Expect " + detail1)
|
||||
self.assertIsNotNone(vm.details[detail2], "Expect " + detail2)
|
||||
self.assertIsNone(vm.details[hidden_detail_name], "hidden detail should be hidden")
|
||||
self.assertTrue(self.detailInDatabase(vm_db_id, hidden_detail_name), "hidden detail should still exist in db")
|
||||
|
||||
# delete one detail
|
||||
updating_vm_details = vm.details.__dict__
|
||||
del updating_vm_details["detail1"]
|
||||
|
||||
self.debug("Deleting one detail by updating details: %s" % updating_vm_details)
|
||||
vm = self.virtual_machine.update(self.apiclient, details=[updating_vm_details])
|
||||
|
||||
self.assertIsNone(vm.details[detail1], "Do not expect " + detail1)
|
||||
self.assertIsNotNone(vm.details[detail2], "Expect " + detail2)
|
||||
self.assertIsNone(vm.details[hidden_detail_name], "hidden detail should be hidden")
|
||||
self.assertTrue(self.detailInDatabase(vm_db_id, hidden_detail_name), "hidden detail should still exist in db")
|
||||
|
||||
# cleanup, ensure hidden detail is not deleted
|
||||
vm = self.virtual_machine.update(self.apiclient, cleanupdetails="true")
|
||||
self.assertIsNone(vm.details[detail1], "Do not expect " + detail1)
|
||||
self.assertIsNone(vm.details[detail2], "Do not expect " + detail2)
|
||||
self.assertIsNone(vm.details[hidden_detail_name], "hidden detail should be hidden")
|
||||
self.assertTrue(self.detailInDatabase(vm_db_id, hidden_detail_name), "hidden detail should still exist in db")
|
||||
|
||||
|
||||
def detailInDatabase(self, vm_id, detail_name):
|
||||
dbresult = self.dbclient.execute("select id from user_vm_details where vm_id=%s and name='%s'" % (vm_id, detail_name))
|
||||
self.debug("Detail %s for VM %s: %s" % (detail_name, vm_id, dbresult))
|
||||
if validateList(dbresult)[0] == PASS:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def listVmById(self, id):
|
||||
list_vms = VirtualMachine.list(self.apiclient, id=id)
|
||||
self.assertEqual(
|
||||
isinstance(list_vms, list),
|
||||
True,
|
||||
"List VM response was not a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_vms),
|
||||
0,
|
||||
"List VM response was empty"
|
||||
)
|
||||
return list_vms[0]
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
|
|
|
|||
|
|
@ -128,9 +128,8 @@ class TestDedicatePublicIPRange(cloudstackTestCase):
|
|||
id=self.public_ip_range.vlan.id
|
||||
)
|
||||
public_ip_response = list_public_ip_range_response[0]
|
||||
self.assertEqual(
|
||||
self.assertIsNone(
|
||||
public_ip_response.account,
|
||||
"system",
|
||||
"Check account name is system account in listVlanIpRanges"
|
||||
)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -1942,6 +1942,7 @@
|
|||
"label.suspend.project": "Suspend project",
|
||||
"label.switch.type": "Switch type",
|
||||
"label.sync.storage": "Sync storage pool",
|
||||
"label.system.ip.pool": "System Pool",
|
||||
"label.system.offering": "System offering",
|
||||
"label.system.offerings": "System offerings",
|
||||
"label.system.service.offering": "System service offering",
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ export default {
|
|||
(state, getters) => getters.headerNotices,
|
||||
(newValue, oldValue) => {
|
||||
if (oldValue !== newValue && newValue !== undefined) {
|
||||
this.notices = newValue.reverse()
|
||||
this.notices = newValue
|
||||
}
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -90,9 +90,8 @@ export default {
|
|||
getNextPage()
|
||||
}
|
||||
}).finally(() => {
|
||||
this.projects = _.orderBy(projects, ['displaytext'], ['asc'])
|
||||
this.projects.unshift({ name: this.$t('label.default.view') })
|
||||
this.loading = false
|
||||
this.$store.commit('RELOAD_ALL_PROJECTS', projects)
|
||||
})
|
||||
}
|
||||
getNextPage()
|
||||
|
|
@ -113,6 +112,17 @@ export default {
|
|||
filterProject (input, option) {
|
||||
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
|
||||
}
|
||||
},
|
||||
mounted () {
|
||||
this.$store.watch(
|
||||
(state, getters) => getters.allProjects,
|
||||
(newValue, oldValue) => {
|
||||
if (oldValue !== newValue && newValue !== undefined) {
|
||||
this.projects = _.orderBy(newValue, ['displaytext'], ['asc'])
|
||||
this.projects.unshift({ name: this.$t('label.default.view') })
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
</script>
|
||||
|
|
|
|||
|
|
@ -32,9 +32,9 @@ export default {
|
|||
permission: ['listVirtualMachinesMetrics'],
|
||||
resourceType: 'UserVm',
|
||||
params: () => {
|
||||
var params = {}
|
||||
var params = { details: 'servoff,tmpl,nics' }
|
||||
if (store.getters.metrics) {
|
||||
params = { state: 'running' }
|
||||
params = { details: 'servoff,tmpl,nics,stats' }
|
||||
}
|
||||
return params
|
||||
},
|
||||
|
|
|
|||
|
|
@ -49,6 +49,7 @@ const getters = {
|
|||
twoFaProvider: state => state.user.twoFaProvider,
|
||||
twoFaIssuer: state => state.user.twoFaIssuer,
|
||||
loginFlag: state => state.user.loginFlag,
|
||||
allProjects: (state) => state.app.allProjects,
|
||||
customHypervisorName: state => state.user.customHypervisorName
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,8 @@ import {
|
|||
USE_BROWSER_TIMEZONE,
|
||||
SERVER_MANAGER,
|
||||
VUE_VERSION,
|
||||
CUSTOM_COLUMNS
|
||||
CUSTOM_COLUMNS,
|
||||
RELOAD_ALL_PROJECTS
|
||||
} from '@/store/mutation-types'
|
||||
|
||||
const app = {
|
||||
|
|
@ -50,7 +51,8 @@ const app = {
|
|||
metrics: false,
|
||||
listAllProjects: false,
|
||||
server: '',
|
||||
vueVersion: ''
|
||||
vueVersion: '',
|
||||
allProjects: []
|
||||
},
|
||||
mutations: {
|
||||
SET_SIDEBAR_TYPE: (state, type) => {
|
||||
|
|
@ -122,6 +124,10 @@ const app = {
|
|||
vueProps.$localStorage.set(CUSTOM_COLUMNS, customColumns)
|
||||
state.customColumns = customColumns
|
||||
},
|
||||
RELOAD_ALL_PROJECTS: (state, allProjects = []) => {
|
||||
vueProps.$localStorage.set(RELOAD_ALL_PROJECTS, allProjects)
|
||||
state.allProjects = allProjects
|
||||
},
|
||||
SET_SHUTDOWN_TRIGGERED: (state, shutdownTriggered) => {
|
||||
state.shutdownTriggered = shutdownTriggered
|
||||
}
|
||||
|
|
@ -181,6 +187,9 @@ const app = {
|
|||
SetCustomColumns ({ commit }, bool) {
|
||||
commit('SET_CUSTOM_COLUMNS', bool)
|
||||
},
|
||||
ReloadAllProjects ({ commit, allProjects }) {
|
||||
commit('RELOAD_ALL_PROJECTS', allProjects)
|
||||
},
|
||||
SetShutdownTriggered ({ commit }, bool) {
|
||||
commit('SET_SHUTDOWN_TRIGGERED', bool)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -365,9 +365,13 @@ const user = {
|
|||
if (noticeIdx === -1) {
|
||||
noticeArray.push(noticeJson)
|
||||
} else {
|
||||
const existingNotice = noticeArray[noticeIdx]
|
||||
noticeJson.timestamp = existingNotice.timestamp
|
||||
noticeArray[noticeIdx] = noticeJson
|
||||
}
|
||||
|
||||
noticeArray.sort(function (a, b) {
|
||||
return new Date(b.timestamp) - new Date(a.timestamp)
|
||||
})
|
||||
commit('SET_HEADER_NOTICES', noticeArray)
|
||||
},
|
||||
ProjectView ({ commit }, projectid) {
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ export const DOMAIN_STORE = 'DOMAIN_STORE'
|
|||
export const DARK_MODE = 'DARK_MODE'
|
||||
export const VUE_VERSION = 'VUE_VERSION'
|
||||
export const CUSTOM_COLUMNS = 'CUSTOM_COLUMNS'
|
||||
export const RELOAD_ALL_PROJECTS = 'RELOAD_ALL_PROJECTS'
|
||||
|
||||
export const CONTENT_WIDTH_TYPE = {
|
||||
Fluid: 'Fluid',
|
||||
|
|
|
|||
|
|
@ -65,7 +65,8 @@ export const pollJobPlugin = {
|
|||
key: jobId,
|
||||
title,
|
||||
description,
|
||||
status: 'progress'
|
||||
status: 'progress',
|
||||
timestamp: new Date()
|
||||
})
|
||||
|
||||
eventBus.on('update-job-details', (args) => {
|
||||
|
|
@ -107,7 +108,8 @@ export const pollJobPlugin = {
|
|||
title,
|
||||
description,
|
||||
status: 'done',
|
||||
duration: 2
|
||||
duration: 2,
|
||||
timestamp: new Date()
|
||||
})
|
||||
eventBus.emit('update-job-details', { jobId, resourceId })
|
||||
// Ensure we refresh on the same / parent page
|
||||
|
|
@ -157,7 +159,8 @@ export const pollJobPlugin = {
|
|||
title,
|
||||
description: desc,
|
||||
status: 'failed',
|
||||
duration: 2
|
||||
duration: 2,
|
||||
timestamp: new Date()
|
||||
})
|
||||
eventBus.emit('update-job-details', { jobId, resourceId })
|
||||
// Ensure we refresh on the same / parent page
|
||||
|
|
|
|||
|
|
@ -192,8 +192,12 @@ const sourceToken = {
|
|||
},
|
||||
cancel: () => {
|
||||
if (!source) sourceToken.init()
|
||||
source.cancel()
|
||||
source = null
|
||||
if (source) {
|
||||
source.cancel()
|
||||
source = null
|
||||
} else {
|
||||
console.log('Source token failed to be cancelled')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -448,6 +448,7 @@ import { ref, reactive, toRaw } from 'vue'
|
|||
import { api } from '@/api'
|
||||
import { mixinDevice } from '@/utils/mixin.js'
|
||||
import { genericCompare } from '@/utils/sort.js'
|
||||
import { sourceToken } from '@/utils/request'
|
||||
import store from '@/store'
|
||||
import eventBus from '@/config/eventBus'
|
||||
|
||||
|
|
@ -627,6 +628,9 @@ export default {
|
|||
next()
|
||||
},
|
||||
beforeRouteLeave (to, from, next) {
|
||||
console.log('DEBUG - Due to route change, ignoring results for any on-going API request', this.apiName)
|
||||
sourceToken.cancel()
|
||||
sourceToken.init()
|
||||
this.currentPath = this.$route.fullPath
|
||||
next()
|
||||
},
|
||||
|
|
@ -718,8 +722,10 @@ export default {
|
|||
}
|
||||
api('listProjects', { id: projectId, listall: true, details: 'min' }).then(json => {
|
||||
if (!json || !json.listprojectsresponse || !json.listprojectsresponse.project) return
|
||||
const projects = json.listprojectsresponse.project
|
||||
const project = json.listprojectsresponse.project[0]
|
||||
this.$store.dispatch('SetProject', project)
|
||||
this.$store.commit('RELOAD_ALL_PROJECTS', projects)
|
||||
this.$store.dispatch('ToggleTheme', project.id === undefined ? 'light' : 'dark')
|
||||
this.$message.success(`${this.$t('message.switch.to')} "${project.name}"`)
|
||||
const query = Object.assign({}, this.$route.query)
|
||||
|
|
@ -882,6 +888,10 @@ export default {
|
|||
delete params.showunique
|
||||
}
|
||||
|
||||
if (['listVirtualMachinesMetrics'].includes(this.apiName) && this.dataView) {
|
||||
delete params.details
|
||||
}
|
||||
|
||||
this.loading = true
|
||||
if (this.$route.params && this.$route.params.id) {
|
||||
params.id = this.$route.params.id
|
||||
|
|
@ -944,25 +954,37 @@ export default {
|
|||
break
|
||||
}
|
||||
}
|
||||
this.itemCount = 0
|
||||
var apiItemCount = 0
|
||||
for (const key in json[responseName]) {
|
||||
if (key === 'count') {
|
||||
this.itemCount = json[responseName].count
|
||||
apiItemCount = json[responseName].count
|
||||
continue
|
||||
}
|
||||
objectName = key
|
||||
break
|
||||
}
|
||||
|
||||
if ('id' in this.$route.params && this.$route.params.id !== params.id) {
|
||||
console.log('DEBUG - Discarding API response as its `id` does not match the uuid on the browser path')
|
||||
return
|
||||
}
|
||||
if (this.dataView && apiItemCount > 1) {
|
||||
console.log('DEBUG - Discarding API response as got more than one item in data view', this.$route.params, this.items)
|
||||
return
|
||||
}
|
||||
|
||||
this.items = json[responseName][objectName]
|
||||
if (!this.items || this.items.length === 0) {
|
||||
this.items = []
|
||||
}
|
||||
this.itemCount = apiItemCount
|
||||
|
||||
if (['listTemplates', 'listIsos'].includes(this.apiName) && this.items.length > 1) {
|
||||
this.items = [...new Map(this.items.map(x => [x.id, x])).values()]
|
||||
}
|
||||
|
||||
if (this.apiName === 'listProjects' && this.items.length > 0) {
|
||||
this.$store.commit('RELOAD_ALL_PROJECTS', this.items)
|
||||
this.columns.map(col => {
|
||||
if (col.title === 'Account') {
|
||||
col.title = this.$t('label.project.owner')
|
||||
|
|
@ -1005,6 +1027,10 @@ export default {
|
|||
}
|
||||
}
|
||||
}).catch(error => {
|
||||
if (!error || !error.message) {
|
||||
console.log('API request likely got cancelled due to route change:', this.apiName)
|
||||
return
|
||||
}
|
||||
if ([401].includes(error.response.status)) {
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1815,11 +1815,17 @@ export default {
|
|||
if (template) {
|
||||
var size = template.size / (1024 * 1024 * 1024) || 0 // bytes to GB
|
||||
this.dataPreFill.minrootdisksize = Math.ceil(size)
|
||||
this.defaultBootType = this.template?.details?.UEFI ? 'UEFI' : ''
|
||||
this.fetchBootModes(this.defaultBootType)
|
||||
this.defaultBootMode = this.template?.details?.UEFI
|
||||
this.updateTemplateLinkedUserData(this.template.userdataid)
|
||||
this.userdataDefaultOverridePolicy = this.template.userdatapolicy
|
||||
this.updateTemplateLinkedUserData(template.userdataid)
|
||||
this.userdataDefaultOverridePolicy = template.userdatapolicy
|
||||
this.form.dynamicscalingenabled = template.isdynamicallyscalable
|
||||
this.defaultBootType = template.details?.UEFI ? 'UEFI' : 'BIOS'
|
||||
this.form.boottype = this.defaultBootType
|
||||
this.fetchBootModes(this.form.boottype)
|
||||
this.defaultBootMode = template.details?.UEFI || this.options.bootModes?.[0]?.id || undefined
|
||||
this.form.bootmode = this.defaultBootMode
|
||||
this.form.iothreadsenabled = template.details && Object.prototype.hasOwnProperty.call(template.details, 'iothreads')
|
||||
this.form.iodriverpolicy = template.details?.['io.policy']
|
||||
this.form.keyboard = template.details?.keyboard
|
||||
}
|
||||
} else if (name === 'isoid') {
|
||||
this.templateConfigurations = []
|
||||
|
|
|
|||
|
|
@ -48,21 +48,21 @@
|
|||
{{ record.endip || record.endipv6 }}
|
||||
</template>
|
||||
<template v-if="column.key === 'account' && !basicGuestNetwork">
|
||||
<a-button @click="() => handleOpenAccountModal(record)">{{ `[${record.domain}] ${record.account === undefined ? '' : record.account}` }}</a-button>
|
||||
<a-button @click="() => handleOpenAccountModal(record)">{{ record.domain === undefined ? `${$t('label.system.ip.pool')}` : `[ ${record.domain}] ${record.account === undefined ? '' : record.account}` }}</a-button>
|
||||
</template>
|
||||
<template v-if="column.key === 'actions'">
|
||||
<div
|
||||
class="actions"
|
||||
style="text-align: right" >
|
||||
<tooltip-button
|
||||
v-if="record.account === 'system' && !basicGuestNetwork && record.gateway && !record.ip6gateway"
|
||||
v-if="!record.domain && !basicGuestNetwork && record.gateway && !record.ip6gateway"
|
||||
tooltipPlacement="bottom"
|
||||
:tooltip="$t('label.add.account')"
|
||||
icon="user-add-outlined"
|
||||
@onClick="() => handleOpenAddAccountModal(record)"
|
||||
:disabled="!('dedicatePublicIpRange' in $store.getters.apis)" />
|
||||
<tooltip-button
|
||||
v-if="record.account !== 'system' && !basicGuestNetwork"
|
||||
v-if="record.domain && !basicGuestNetwork"
|
||||
tooltipPlacement="bottom"
|
||||
:tooltip="$t('label.release.account')"
|
||||
icon="user-delete-outlined"
|
||||
|
|
|
|||
|
|
@ -199,18 +199,21 @@ public class HostDatastoreSystemMO extends BaseMO {
|
|||
return _context.getService().queryAvailableDisksForVmfs(_mor, null);
|
||||
}
|
||||
|
||||
public ManagedObjectReference createVmfsDatastore(String datastoreName, HostScsiDisk hostScsiDisk) throws Exception {
|
||||
// just grab the first instance of VmfsDatastoreOption
|
||||
VmfsDatastoreOption vmfsDatastoreOption = _context.getService().queryVmfsDatastoreCreateOptions(_mor, hostScsiDisk.getDevicePath(), 5).get(0);
|
||||
|
||||
public ManagedObjectReference createVmfsDatastore(String datastoreName, HostScsiDisk hostScsiDisk, Integer vmfsVersion) throws Exception {
|
||||
VmfsDatastoreOption vmfsDatastoreOption = _context.getService().queryVmfsDatastoreCreateOptions(_mor, hostScsiDisk.getDevicePath(), vmfsVersion).get(0);
|
||||
VmfsDatastoreCreateSpec vmfsDatastoreCreateSpec = (VmfsDatastoreCreateSpec)vmfsDatastoreOption.getSpec();
|
||||
|
||||
// set the name of the datastore to be created
|
||||
vmfsDatastoreCreateSpec.getVmfs().setVolumeName(datastoreName);
|
||||
|
||||
return _context.getService().createVmfsDatastore(_mor, vmfsDatastoreCreateSpec);
|
||||
}
|
||||
|
||||
public ManagedObjectReference createVmfs5Datastore(String datastoreName, HostScsiDisk hostScsiDisk) throws Exception {
|
||||
return createVmfsDatastore(datastoreName, hostScsiDisk, 5);
|
||||
}
|
||||
|
||||
public ManagedObjectReference createVmfs6Datastore(String datastoreName, HostScsiDisk hostScsiDisk) throws Exception {
|
||||
return createVmfsDatastore(datastoreName, hostScsiDisk, 6);
|
||||
}
|
||||
|
||||
public boolean deleteDatastore(String name) throws Exception {
|
||||
ManagedObjectReference morDatastore = findDatastore(name);
|
||||
if (morDatastore != null) {
|
||||
|
|
|
|||
|
|
@ -98,6 +98,7 @@ public class VmwareHelper {
|
|||
public static final int MAX_SUPPORTED_DEVICES_SCSI_CONTROLLER = MAX_ALLOWED_DEVICES_SCSI_CONTROLLER - 1; // One device node is unavailable for hard disks or SCSI devices
|
||||
public static final int MAX_USABLE_SCSI_CONTROLLERS = 2;
|
||||
public static final String MIN_VERSION_UEFI_LEGACY = "5.5";
|
||||
public static final String MIN_VERSION_VMFS6 = "6.5";
|
||||
|
||||
public static boolean isReservedScsiDeviceNumber(int deviceNumber) {
|
||||
// The SCSI controller is assigned to virtual device node (z:7), so that device node is unavailable for hard disks or SCSI devices.
|
||||
|
|
|
|||
Loading…
Reference in New Issue