mirror of https://github.com/apache/cloudstack.git
Merge branch '4.20' of https://github.com/apache/cloudstack
This commit is contained in:
commit
3a28a87483
|
|
@ -24,6 +24,7 @@ public class MigrationOptions implements Serializable {
|
|||
|
||||
private String srcPoolUuid;
|
||||
private Storage.StoragePoolType srcPoolType;
|
||||
private Long srcPoolClusterId;
|
||||
private Type type;
|
||||
private ScopeType scopeType;
|
||||
private String srcBackingFilePath;
|
||||
|
|
@ -38,21 +39,23 @@ public class MigrationOptions implements Serializable {
|
|||
public MigrationOptions() {
|
||||
}
|
||||
|
||||
public MigrationOptions(String srcPoolUuid, Storage.StoragePoolType srcPoolType, String srcBackingFilePath, boolean copySrcTemplate, ScopeType scopeType) {
|
||||
public MigrationOptions(String srcPoolUuid, Storage.StoragePoolType srcPoolType, String srcBackingFilePath, boolean copySrcTemplate, ScopeType scopeType, Long srcPoolClusterId) {
|
||||
this.srcPoolUuid = srcPoolUuid;
|
||||
this.srcPoolType = srcPoolType;
|
||||
this.type = Type.LinkedClone;
|
||||
this.scopeType = scopeType;
|
||||
this.srcBackingFilePath = srcBackingFilePath;
|
||||
this.copySrcTemplate = copySrcTemplate;
|
||||
this.srcPoolClusterId = srcPoolClusterId;
|
||||
}
|
||||
|
||||
public MigrationOptions(String srcPoolUuid, Storage.StoragePoolType srcPoolType, String srcVolumeUuid, ScopeType scopeType) {
|
||||
public MigrationOptions(String srcPoolUuid, Storage.StoragePoolType srcPoolType, String srcVolumeUuid, ScopeType scopeType, Long srcPoolClusterId) {
|
||||
this.srcPoolUuid = srcPoolUuid;
|
||||
this.srcPoolType = srcPoolType;
|
||||
this.type = Type.FullClone;
|
||||
this.scopeType = scopeType;
|
||||
this.srcVolumeUuid = srcVolumeUuid;
|
||||
this.srcPoolClusterId = srcPoolClusterId;
|
||||
}
|
||||
|
||||
public String getSrcPoolUuid() {
|
||||
|
|
@ -63,6 +66,10 @@ public class MigrationOptions implements Serializable {
|
|||
return srcPoolType;
|
||||
}
|
||||
|
||||
public Long getSrcPoolClusterId() {
|
||||
return srcPoolClusterId;
|
||||
}
|
||||
|
||||
public ScopeType getScopeType() { return scopeType; }
|
||||
|
||||
public String getSrcBackingFilePath() {
|
||||
|
|
|
|||
|
|
@ -77,19 +77,19 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
|
|||
processReport(hostId, translatedInfo, force);
|
||||
}
|
||||
|
||||
private void updateAndPublishVmPowerStates(long hostId, Map<Long, VirtualMachine.PowerState> instancePowerStates,
|
||||
Date updateTime) {
|
||||
protected void updateAndPublishVmPowerStates(long hostId, Map<Long, VirtualMachine.PowerState> instancePowerStates,
|
||||
Date updateTime) {
|
||||
if (instancePowerStates.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
Set<Long> vmIds = instancePowerStates.keySet();
|
||||
Map<Long, VirtualMachine.PowerState> notUpdated = _instanceDao.updatePowerState(instancePowerStates, hostId,
|
||||
updateTime);
|
||||
Map<Long, VirtualMachine.PowerState> notUpdated =
|
||||
_instanceDao.updatePowerState(instancePowerStates, hostId, updateTime);
|
||||
if (notUpdated.size() > vmIds.size()) {
|
||||
return;
|
||||
}
|
||||
for (Long vmId : vmIds) {
|
||||
if (!notUpdated.isEmpty() && !notUpdated.containsKey(vmId)) {
|
||||
if (!notUpdated.containsKey(vmId)) {
|
||||
logger.debug("VM state report is updated. {}, {}, power state: {}",
|
||||
() -> hostCache.get(hostId), () -> vmCache.get(vmId), () -> instancePowerStates.get(vmId));
|
||||
_messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,107 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.vm;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.cloudstack.framework.messagebus.MessageBus;
|
||||
import org.apache.cloudstack.framework.messagebus.PublishScope;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class VirtualMachinePowerStateSyncImplTest {
|
||||
@Mock
|
||||
MessageBus messageBus;
|
||||
@Mock
|
||||
VMInstanceDao instanceDao;
|
||||
@Mock
|
||||
HostDao hostDao;
|
||||
|
||||
@InjectMocks
|
||||
VirtualMachinePowerStateSyncImpl virtualMachinePowerStateSync = new VirtualMachinePowerStateSyncImpl();
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
Mockito.lenient().when(instanceDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(VMInstanceVO.class));
|
||||
Mockito.lenient().when(hostDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(HostVO.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_updateAndPublishVmPowerStates_emptyStates() {
|
||||
virtualMachinePowerStateSync.updateAndPublishVmPowerStates(1L, new HashMap<>(), new Date());
|
||||
Mockito.verify(instanceDao, Mockito.never()).updatePowerState(Mockito.anyMap(), Mockito.anyLong(),
|
||||
Mockito.any(Date.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_updateAndPublishVmPowerStates_moreNotUpdated() {
|
||||
Map<Long, VirtualMachine.PowerState> powerStates = new HashMap<>();
|
||||
powerStates.put(1L, VirtualMachine.PowerState.PowerOff);
|
||||
Map<Long, VirtualMachine.PowerState> notUpdated = new HashMap<>(powerStates);
|
||||
notUpdated.put(2L, VirtualMachine.PowerState.PowerOn);
|
||||
Mockito.when(instanceDao.updatePowerState(Mockito.anyMap(), Mockito.anyLong(),
|
||||
Mockito.any(Date.class))).thenReturn(notUpdated);
|
||||
virtualMachinePowerStateSync.updateAndPublishVmPowerStates(1L, powerStates, new Date());
|
||||
Mockito.verify(messageBus, Mockito.never()).publish(Mockito.nullable(String.class), Mockito.anyString(),
|
||||
Mockito.any(PublishScope.class), Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_updateAndPublishVmPowerStates_allUpdated() {
|
||||
Map<Long, VirtualMachine.PowerState> powerStates = new HashMap<>();
|
||||
powerStates.put(1L, VirtualMachine.PowerState.PowerOff);
|
||||
Mockito.when(instanceDao.updatePowerState(Mockito.anyMap(), Mockito.anyLong(),
|
||||
Mockito.any(Date.class))).thenReturn(new HashMap<>());
|
||||
virtualMachinePowerStateSync.updateAndPublishVmPowerStates(1L, powerStates, new Date());
|
||||
Mockito.verify(messageBus, Mockito.times(1)).publish(null,
|
||||
VirtualMachineManager.Topics.VM_POWER_STATE,
|
||||
PublishScope.GLOBAL,
|
||||
1L);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_updateAndPublishVmPowerStates_partialUpdated() {
|
||||
Map<Long, VirtualMachine.PowerState> powerStates = new HashMap<>();
|
||||
powerStates.put(1L, VirtualMachine.PowerState.PowerOn);
|
||||
powerStates.put(2L, VirtualMachine.PowerState.PowerOff);
|
||||
Map<Long, VirtualMachine.PowerState> notUpdated = new HashMap<>();
|
||||
notUpdated.put(2L, VirtualMachine.PowerState.PowerOff);
|
||||
Mockito.when(instanceDao.updatePowerState(Mockito.anyMap(), Mockito.anyLong(),
|
||||
Mockito.any(Date.class))).thenReturn(notUpdated);
|
||||
virtualMachinePowerStateSync.updateAndPublishVmPowerStates(1L, powerStates, new Date());
|
||||
Mockito.verify(messageBus, Mockito.times(1)).publish(null,
|
||||
VirtualMachineManager.Topics.VM_POWER_STATE,
|
||||
PublishScope.GLOBAL,
|
||||
1L);
|
||||
Mockito.verify(messageBus, Mockito.never()).publish(null,
|
||||
VirtualMachineManager.Topics.VM_POWER_STATE,
|
||||
PublishScope.GLOBAL,
|
||||
2L);
|
||||
}
|
||||
}
|
||||
|
|
@ -215,7 +215,7 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot
|
|||
}
|
||||
|
||||
VMTemplateStoragePoolVO sourceVolumeTemplateStoragePoolVO = vmTemplatePoolDao.findByPoolTemplate(destStoragePool.getId(), srcVolumeInfo.getTemplateId(), null);
|
||||
if (sourceVolumeTemplateStoragePoolVO == null && (isStoragePoolTypeInList(destStoragePool.getPoolType(), StoragePoolType.Filesystem, StoragePoolType.SharedMountPoint))) {
|
||||
if (sourceVolumeTemplateStoragePoolVO == null && (isStoragePoolTypeInList(destStoragePool.getPoolType(), StoragePoolType.NetworkFilesystem, StoragePoolType.Filesystem, StoragePoolType.SharedMountPoint))) {
|
||||
DataStore sourceTemplateDataStore = dataStoreManagerImpl.getRandomImageStore(srcVolumeInfo.getDataCenterId());
|
||||
if (sourceTemplateDataStore != null) {
|
||||
TemplateInfo sourceTemplateInfo = templateDataFactory.getTemplate(srcVolumeInfo.getTemplateId(), sourceTemplateDataStore);
|
||||
|
|
|
|||
|
|
@ -1949,18 +1949,26 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
|||
/**
|
||||
* Return expected MigrationOptions for a linked clone volume live storage migration
|
||||
*/
|
||||
protected MigrationOptions createLinkedCloneMigrationOptions(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, String srcVolumeBackingFile, String srcPoolUuid, Storage.StoragePoolType srcPoolType) {
|
||||
protected MigrationOptions createLinkedCloneMigrationOptions(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, String srcVolumeBackingFile, StoragePoolVO srcPool) {
|
||||
String srcPoolUuid = srcPool.getUuid();
|
||||
Storage.StoragePoolType srcPoolType = srcPool.getPoolType();
|
||||
Long srcPoolClusterId = srcPool.getClusterId();
|
||||
VMTemplateStoragePoolVO ref = templatePoolDao.findByPoolTemplate(destVolumeInfo.getPoolId(), srcVolumeInfo.getTemplateId(), null);
|
||||
boolean updateBackingFileReference = ref == null;
|
||||
String backingFile = !updateBackingFileReference ? ref.getInstallPath() : srcVolumeBackingFile;
|
||||
return new MigrationOptions(srcPoolUuid, srcPoolType, backingFile, updateBackingFileReference, srcVolumeInfo.getDataStore().getScope().getScopeType());
|
||||
ScopeType scopeType = srcVolumeInfo.getDataStore().getScope().getScopeType();
|
||||
return new MigrationOptions(srcPoolUuid, srcPoolType, backingFile, updateBackingFileReference, scopeType, srcPoolClusterId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return expected MigrationOptions for a full clone volume live storage migration
|
||||
*/
|
||||
protected MigrationOptions createFullCloneMigrationOptions(VolumeInfo srcVolumeInfo, VirtualMachineTO vmTO, Host srcHost, String srcPoolUuid, Storage.StoragePoolType srcPoolType) {
|
||||
return new MigrationOptions(srcPoolUuid, srcPoolType, srcVolumeInfo.getPath(), srcVolumeInfo.getDataStore().getScope().getScopeType());
|
||||
protected MigrationOptions createFullCloneMigrationOptions(VolumeInfo srcVolumeInfo, VirtualMachineTO vmTO, Host srcHost, StoragePoolVO srcPool) {
|
||||
String srcPoolUuid = srcPool.getUuid();
|
||||
Storage.StoragePoolType srcPoolType = srcPool.getPoolType();
|
||||
Long srcPoolClusterId = srcPool.getClusterId();
|
||||
ScopeType scopeType = srcVolumeInfo.getDataStore().getScope().getScopeType();
|
||||
return new MigrationOptions(srcPoolUuid, srcPoolType, srcVolumeInfo.getPath(), scopeType, srcPoolClusterId);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1983,9 +1991,9 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
|||
|
||||
MigrationOptions migrationOptions;
|
||||
if (MigrationOptions.Type.LinkedClone.equals(migrationType)) {
|
||||
migrationOptions = createLinkedCloneMigrationOptions(srcVolumeInfo, destVolumeInfo, srcVolumeBackingFile, srcPoolUuid, srcPoolType);
|
||||
migrationOptions = createLinkedCloneMigrationOptions(srcVolumeInfo, destVolumeInfo, srcVolumeBackingFile, srcPool);
|
||||
} else {
|
||||
migrationOptions = createFullCloneMigrationOptions(srcVolumeInfo, vmTO, srcHost, srcPoolUuid, srcPoolType);
|
||||
migrationOptions = createFullCloneMigrationOptions(srcVolumeInfo, vmTO, srcHost, srcPool);
|
||||
}
|
||||
migrationOptions.setTimeout(StorageManager.KvmStorageOnlineMigrationWait.value());
|
||||
destVolumeInfo.setMigrationOptions(migrationOptions);
|
||||
|
|
|
|||
|
|
@ -331,6 +331,16 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
public static final String UBUNTU_WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD = "dpkg -l virtio-win";
|
||||
public static final String UBUNTU_NBDKIT_PKG_CHECK_CMD = "dpkg -l nbdkit";
|
||||
|
||||
public static final int LIBVIRT_CGROUP_CPU_SHARES_MIN = 2;
|
||||
public static final int LIBVIRT_CGROUP_CPU_SHARES_MAX = 262144;
|
||||
/**
|
||||
* The minimal value for the LIBVIRT_CGROUPV2_WEIGHT_MIN is actually 1.
|
||||
* However, due to an old libvirt bug, it is raised to 2.
|
||||
* See: https://github.com/libvirt/libvirt/commit/38af6497610075e5fe386734b87186731d4c17ac
|
||||
*/
|
||||
public static final int LIBVIRT_CGROUPV2_WEIGHT_MIN = 2;
|
||||
public static final int LIBVIRT_CGROUPV2_WEIGHT_MAX = 10000;
|
||||
|
||||
private String modifyVlanPath;
|
||||
private String versionStringPath;
|
||||
private String patchScriptPath;
|
||||
|
|
@ -512,8 +522,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
|
||||
private static int hostCpuMaxCapacity = 0;
|
||||
|
||||
private static final int CGROUP_V2_UPPER_LIMIT = 10000;
|
||||
|
||||
private static final String COMMAND_GET_CGROUP_HOST_VERSION = "stat -fc %T /sys/fs/cgroup/";
|
||||
|
||||
public static final String CGROUP_V2 = "cgroup2fs";
|
||||
|
|
@ -641,6 +649,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
return libvirtUtilitiesHelper;
|
||||
}
|
||||
|
||||
public String getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public CPUStat getCPUStat() {
|
||||
return cpuStat;
|
||||
}
|
||||
|
|
@ -2821,14 +2833,24 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
int requestedCpuShares = vCpus * cpuSpeed;
|
||||
int hostCpuMaxCapacity = getHostCpuMaxCapacity();
|
||||
|
||||
// cgroup v2 is in use
|
||||
if (hostCpuMaxCapacity > 0) {
|
||||
int updatedCpuShares = (int) Math.ceil((requestedCpuShares * CGROUP_V2_UPPER_LIMIT) / (double) hostCpuMaxCapacity);
|
||||
LOGGER.debug(String.format("This host utilizes cgroupv2 (as the max shares value is [%s]), thus, the VM requested shares of [%s] will be converted to " +
|
||||
"consider the host limits; the new CPU shares value is [%s].", hostCpuMaxCapacity, requestedCpuShares, updatedCpuShares));
|
||||
|
||||
int updatedCpuShares = (int) Math.ceil((requestedCpuShares * LIBVIRT_CGROUPV2_WEIGHT_MAX) / (double) hostCpuMaxCapacity);
|
||||
LOGGER.debug("This host utilizes cgroupv2 (as the max shares value is [{}]), thus, the VM requested shares of [{}] will be converted to " +
|
||||
"consider the host limits; the new CPU shares value is [{}].", hostCpuMaxCapacity, requestedCpuShares, updatedCpuShares);
|
||||
|
||||
if (updatedCpuShares < LIBVIRT_CGROUPV2_WEIGHT_MIN) updatedCpuShares = LIBVIRT_CGROUPV2_WEIGHT_MIN;
|
||||
if (updatedCpuShares > LIBVIRT_CGROUPV2_WEIGHT_MAX) updatedCpuShares = LIBVIRT_CGROUPV2_WEIGHT_MAX;
|
||||
return updatedCpuShares;
|
||||
}
|
||||
LOGGER.debug(String.format("This host does not have a maximum CPU shares set; therefore, this host utilizes cgroupv1 and the VM requested CPU shares [%s] will not be " +
|
||||
"converted.", requestedCpuShares));
|
||||
|
||||
// cgroup v1 is in use
|
||||
LOGGER.debug("This host does not have a maximum CPU shares set; therefore, this host utilizes cgroupv1 and the VM requested CPU shares [{}] will not be " +
|
||||
"converted.", requestedCpuShares);
|
||||
|
||||
if (requestedCpuShares < LIBVIRT_CGROUP_CPU_SHARES_MIN) requestedCpuShares = LIBVIRT_CGROUP_CPU_SHARES_MIN;
|
||||
if (requestedCpuShares > LIBVIRT_CGROUP_CPU_SHARES_MAX) requestedCpuShares = LIBVIRT_CGROUP_CPU_SHARES_MAX;
|
||||
return requestedCpuShares;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2660,6 +2660,12 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
return localPool;
|
||||
}
|
||||
|
||||
if (migrationOptions.getScopeType().equals(ScopeType.CLUSTER)
|
||||
&& migrationOptions.getSrcPoolClusterId() != null
|
||||
&& !migrationOptions.getSrcPoolClusterId().toString().equals(resource.getClusterId())) {
|
||||
return localPool;
|
||||
}
|
||||
|
||||
return storagePoolMgr.getStoragePool(migrationOptions.getSrcPoolType(), migrationOptions.getSrcPoolUuid());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1208,6 +1208,11 @@ export default {
|
|||
if (item.value) {
|
||||
query[item.param] = this.resource[item.value]
|
||||
} else {
|
||||
if (item.name === 'template') {
|
||||
query.templatefilter = 'self'
|
||||
query.filter = 'self'
|
||||
}
|
||||
|
||||
if (item.param === 'account') {
|
||||
query[item.param] = this.resource.name
|
||||
query.domainid = this.resource.domainid
|
||||
|
|
|
|||
|
|
@ -160,6 +160,10 @@
|
|||
|
||||
<a-tag>static-nat</a-tag>
|
||||
</span>
|
||||
<span v-if="record.issystem">
|
||||
|
||||
<a-tag>system</a-tag>
|
||||
</span>
|
||||
</template>
|
||||
<template v-if="column.key === 'ip6address'" href="javascript:;">
|
||||
<span>{{ ipV6Address(text, record) }}</span>
|
||||
|
|
@ -421,8 +425,8 @@
|
|||
<status :text="record.enabled ? record.enabled.toString() : 'false'" />
|
||||
{{ record.enabled ? 'Enabled' : 'Disabled' }}
|
||||
</template>
|
||||
<template v-if="['created', 'sent', 'removed', 'effectiveDate', 'endDate'].includes(column.key) || (['startdate'].includes(column.key) && ['webhook'].includes($route.path.split('/')[1])) || (column.key === 'allocated' && ['asnumbers', 'publicip', 'ipv4subnets'].includes($route.meta.name) && text)">
|
||||
{{ $toLocaleDate(text) }}
|
||||
<template v-if="['created', 'sent', 'removed', 'effectiveDate', 'endDate', 'allocated'].includes(column.key) || (['startdate'].includes(column.key) && ['webhook'].includes($route.path.split('/')[1])) || (column.key === 'allocated' && ['asnumbers', 'publicip', 'ipv4subnets'].includes($route.meta.name) && text)">
|
||||
{{ text && $toLocaleDate(text) }}
|
||||
</template>
|
||||
<template v-if="['startdate', 'enddate'].includes(column.key) && ['vm', 'vnfapp'].includes($route.path.split('/')[1])">
|
||||
{{ getDateAtTimeZone(text, record.timezone) }}
|
||||
|
|
|
|||
|
|
@ -840,10 +840,13 @@ export default {
|
|||
message: 'message.action.release.ip',
|
||||
docHelp: 'adminguide/networking_and_traffic.html#releasing-an-ip-address-alloted-to-a-vpc',
|
||||
dataView: true,
|
||||
show: (record) => { return record.state === 'Allocated' && !record.issourcenat },
|
||||
show: (record) => { return record.state === 'Allocated' && !record.issourcenat && !record.issystem },
|
||||
groupAction: true,
|
||||
popup: true,
|
||||
groupMap: (selection) => { return selection.map(x => { return { id: x } }) }
|
||||
groupMap: (selection) => { return selection.map(x => { return { id: x } }) },
|
||||
groupShow: (selectedIps) => {
|
||||
return selectedIps.every((ip) => ip.state === 'Allocated' && !ip.issourcenat && !ip.issystem)
|
||||
}
|
||||
},
|
||||
{
|
||||
api: 'reserveIpAddress',
|
||||
|
|
@ -863,7 +866,10 @@ export default {
|
|||
show: (record) => { return record.state === 'Reserved' },
|
||||
groupAction: true,
|
||||
popup: true,
|
||||
groupMap: (selection) => { return selection.map(x => { return { id: x } }) }
|
||||
groupMap: (selection) => { return selection.map(x => { return { id: x } }) },
|
||||
groupShow: (selectedIps) => {
|
||||
return selectedIps.every((ip) => ip.state === 'Reserved')
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
|
|||
Loading…
Reference in New Issue