mirror of https://github.com/apache/cloudstack.git
Merge branch 'main' into cks-enhancements-upstream
This commit is contained in:
commit
e904b1b60c
|
|
@ -24,6 +24,7 @@ public class MigrationOptions implements Serializable {
|
|||
|
||||
private String srcPoolUuid;
|
||||
private Storage.StoragePoolType srcPoolType;
|
||||
private Long srcPoolClusterId;
|
||||
private Type type;
|
||||
private ScopeType scopeType;
|
||||
private String srcBackingFilePath;
|
||||
|
|
@ -38,21 +39,23 @@ public class MigrationOptions implements Serializable {
|
|||
public MigrationOptions() {
|
||||
}
|
||||
|
||||
public MigrationOptions(String srcPoolUuid, Storage.StoragePoolType srcPoolType, String srcBackingFilePath, boolean copySrcTemplate, ScopeType scopeType) {
|
||||
public MigrationOptions(String srcPoolUuid, Storage.StoragePoolType srcPoolType, String srcBackingFilePath, boolean copySrcTemplate, ScopeType scopeType, Long srcPoolClusterId) {
|
||||
this.srcPoolUuid = srcPoolUuid;
|
||||
this.srcPoolType = srcPoolType;
|
||||
this.type = Type.LinkedClone;
|
||||
this.scopeType = scopeType;
|
||||
this.srcBackingFilePath = srcBackingFilePath;
|
||||
this.copySrcTemplate = copySrcTemplate;
|
||||
this.srcPoolClusterId = srcPoolClusterId;
|
||||
}
|
||||
|
||||
public MigrationOptions(String srcPoolUuid, Storage.StoragePoolType srcPoolType, String srcVolumeUuid, ScopeType scopeType) {
|
||||
public MigrationOptions(String srcPoolUuid, Storage.StoragePoolType srcPoolType, String srcVolumeUuid, ScopeType scopeType, Long srcPoolClusterId) {
|
||||
this.srcPoolUuid = srcPoolUuid;
|
||||
this.srcPoolType = srcPoolType;
|
||||
this.type = Type.FullClone;
|
||||
this.scopeType = scopeType;
|
||||
this.srcVolumeUuid = srcVolumeUuid;
|
||||
this.srcPoolClusterId = srcPoolClusterId;
|
||||
}
|
||||
|
||||
public String getSrcPoolUuid() {
|
||||
|
|
@ -63,6 +66,10 @@ public class MigrationOptions implements Serializable {
|
|||
return srcPoolType;
|
||||
}
|
||||
|
||||
public Long getSrcPoolClusterId() {
|
||||
return srcPoolClusterId;
|
||||
}
|
||||
|
||||
public ScopeType getScopeType() { return scopeType; }
|
||||
|
||||
public String getSrcBackingFilePath() {
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import org.apache.cloudstack.api.ApiErrorCode;
|
|||
import org.apache.cloudstack.api.BaseCmd;
|
||||
import org.apache.cloudstack.api.Parameter;
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.api.response.BackupScheduleResponse;
|
||||
import org.apache.cloudstack.api.response.SuccessResponse;
|
||||
import org.apache.cloudstack.api.response.UserVmResponse;
|
||||
import org.apache.cloudstack.backup.BackupManager;
|
||||
|
|
@ -54,10 +55,16 @@ public class DeleteBackupScheduleCmd extends BaseCmd {
|
|||
@Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID,
|
||||
type = CommandType.UUID,
|
||||
entityType = UserVmResponse.class,
|
||||
required = true,
|
||||
description = "ID of the VM")
|
||||
private Long vmId;
|
||||
|
||||
@Parameter(name = ApiConstants.ID,
|
||||
type = CommandType.UUID,
|
||||
entityType = BackupScheduleResponse.class,
|
||||
description = "ID of the schedule",
|
||||
since = "4.20.1")
|
||||
private Long id;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -66,6 +73,9 @@ public class DeleteBackupScheduleCmd extends BaseCmd {
|
|||
return vmId;
|
||||
}
|
||||
|
||||
public Long getId() { return id; }
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -73,7 +83,7 @@ public class DeleteBackupScheduleCmd extends BaseCmd {
|
|||
@Override
|
||||
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
|
||||
try {
|
||||
boolean result = backupManager.deleteBackupSchedule(getVmId());
|
||||
boolean result = backupManager.deleteBackupSchedule(this);
|
||||
if (result) {
|
||||
SuccessResponse response = new SuccessResponse(getCommandName());
|
||||
response.setResponseName(getCommandName());
|
||||
|
|
|
|||
|
|
@ -57,10 +57,6 @@ public class BackupRepositoryResponse extends BaseResponse {
|
|||
@Param(description = "backup type")
|
||||
private String type;
|
||||
|
||||
@SerializedName(ApiConstants.MOUNT_OPTIONS)
|
||||
@Param(description = "mount options for the backup repository")
|
||||
private String mountOptions;
|
||||
|
||||
@SerializedName(ApiConstants.CAPACITY_BYTES)
|
||||
@Param(description = "capacity of the backup repository")
|
||||
private Long capacityBytes;
|
||||
|
|
@ -112,14 +108,6 @@ public class BackupRepositoryResponse extends BaseResponse {
|
|||
this.address = address;
|
||||
}
|
||||
|
||||
public String getMountOptions() {
|
||||
return mountOptions;
|
||||
}
|
||||
|
||||
public void setMountOptions(String mountOptions) {
|
||||
this.mountOptions = mountOptions;
|
||||
}
|
||||
|
||||
public String getProviderName() {
|
||||
return providerName;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import com.cloud.exception.ResourceAllocationException;
|
|||
import org.apache.cloudstack.api.command.admin.backup.ImportBackupOfferingCmd;
|
||||
import org.apache.cloudstack.api.command.admin.backup.UpdateBackupOfferingCmd;
|
||||
import org.apache.cloudstack.api.command.user.backup.CreateBackupScheduleCmd;
|
||||
import org.apache.cloudstack.api.command.user.backup.DeleteBackupScheduleCmd;
|
||||
import org.apache.cloudstack.api.command.user.backup.ListBackupOfferingsCmd;
|
||||
import org.apache.cloudstack.api.command.user.backup.ListBackupsCmd;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
|
|
@ -192,10 +193,10 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer
|
|||
|
||||
/**
|
||||
* Deletes VM backup schedule for a VM
|
||||
* @param vmId
|
||||
* @param cmd
|
||||
* @return
|
||||
*/
|
||||
boolean deleteBackupSchedule(Long vmId);
|
||||
boolean deleteBackupSchedule(DeleteBackupScheduleCmd cmd);
|
||||
|
||||
/**
|
||||
* Creates backup of a VM
|
||||
|
|
|
|||
|
|
@ -642,6 +642,11 @@
|
|||
<artifactId>cloud-plugin-storage-object-ceph</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-plugin-storage-object-cloudian</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-plugin-storage-object-simulator</artifactId>
|
||||
|
|
|
|||
|
|
@ -1473,7 +1473,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||
}
|
||||
if (!BooleanUtils.toBoolean(EnableKVMAutoEnableDisable.valueIn(host.getClusterId()))) {
|
||||
logger.debug("{} is disabled for the cluster {}, cannot process the health check result " +
|
||||
"received for the host {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host);
|
||||
"received for {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6071,6 +6071,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
@Override
|
||||
public Map<Long, Boolean> getDiskOfferingSuitabilityForVm(long vmId, List<Long> diskOfferingIds) {
|
||||
VMInstanceVO vm = _vmDao.findById(vmId);
|
||||
if (userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.DEPLOY_VM) != null) {
|
||||
return new HashMap<>();
|
||||
}
|
||||
VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
|
||||
Pair<Long, Long> clusterAndHost = findClusterAndHostIdForVm(vm, false);
|
||||
Long clusterId = clusterAndHost.first();
|
||||
|
|
|
|||
|
|
@ -77,19 +77,19 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
|
|||
processReport(hostId, translatedInfo, force);
|
||||
}
|
||||
|
||||
private void updateAndPublishVmPowerStates(long hostId, Map<Long, VirtualMachine.PowerState> instancePowerStates,
|
||||
Date updateTime) {
|
||||
protected void updateAndPublishVmPowerStates(long hostId, Map<Long, VirtualMachine.PowerState> instancePowerStates,
|
||||
Date updateTime) {
|
||||
if (instancePowerStates.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
Set<Long> vmIds = instancePowerStates.keySet();
|
||||
Map<Long, VirtualMachine.PowerState> notUpdated = _instanceDao.updatePowerState(instancePowerStates, hostId,
|
||||
updateTime);
|
||||
Map<Long, VirtualMachine.PowerState> notUpdated =
|
||||
_instanceDao.updatePowerState(instancePowerStates, hostId, updateTime);
|
||||
if (notUpdated.size() > vmIds.size()) {
|
||||
return;
|
||||
}
|
||||
for (Long vmId : vmIds) {
|
||||
if (!notUpdated.isEmpty() && !notUpdated.containsKey(vmId)) {
|
||||
if (!notUpdated.containsKey(vmId)) {
|
||||
logger.debug("VM state report is updated. {}, {}, power state: {}",
|
||||
() -> hostCache.get(hostId), () -> vmCache.get(vmId), () -> instancePowerStates.get(vmId));
|
||||
_messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,107 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.vm;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.cloudstack.framework.messagebus.MessageBus;
|
||||
import org.apache.cloudstack.framework.messagebus.PublishScope;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class VirtualMachinePowerStateSyncImplTest {
|
||||
@Mock
|
||||
MessageBus messageBus;
|
||||
@Mock
|
||||
VMInstanceDao instanceDao;
|
||||
@Mock
|
||||
HostDao hostDao;
|
||||
|
||||
@InjectMocks
|
||||
VirtualMachinePowerStateSyncImpl virtualMachinePowerStateSync = new VirtualMachinePowerStateSyncImpl();
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
Mockito.lenient().when(instanceDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(VMInstanceVO.class));
|
||||
Mockito.lenient().when(hostDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(HostVO.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_updateAndPublishVmPowerStates_emptyStates() {
|
||||
virtualMachinePowerStateSync.updateAndPublishVmPowerStates(1L, new HashMap<>(), new Date());
|
||||
Mockito.verify(instanceDao, Mockito.never()).updatePowerState(Mockito.anyMap(), Mockito.anyLong(),
|
||||
Mockito.any(Date.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_updateAndPublishVmPowerStates_moreNotUpdated() {
|
||||
Map<Long, VirtualMachine.PowerState> powerStates = new HashMap<>();
|
||||
powerStates.put(1L, VirtualMachine.PowerState.PowerOff);
|
||||
Map<Long, VirtualMachine.PowerState> notUpdated = new HashMap<>(powerStates);
|
||||
notUpdated.put(2L, VirtualMachine.PowerState.PowerOn);
|
||||
Mockito.when(instanceDao.updatePowerState(Mockito.anyMap(), Mockito.anyLong(),
|
||||
Mockito.any(Date.class))).thenReturn(notUpdated);
|
||||
virtualMachinePowerStateSync.updateAndPublishVmPowerStates(1L, powerStates, new Date());
|
||||
Mockito.verify(messageBus, Mockito.never()).publish(Mockito.nullable(String.class), Mockito.anyString(),
|
||||
Mockito.any(PublishScope.class), Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_updateAndPublishVmPowerStates_allUpdated() {
|
||||
Map<Long, VirtualMachine.PowerState> powerStates = new HashMap<>();
|
||||
powerStates.put(1L, VirtualMachine.PowerState.PowerOff);
|
||||
Mockito.when(instanceDao.updatePowerState(Mockito.anyMap(), Mockito.anyLong(),
|
||||
Mockito.any(Date.class))).thenReturn(new HashMap<>());
|
||||
virtualMachinePowerStateSync.updateAndPublishVmPowerStates(1L, powerStates, new Date());
|
||||
Mockito.verify(messageBus, Mockito.times(1)).publish(null,
|
||||
VirtualMachineManager.Topics.VM_POWER_STATE,
|
||||
PublishScope.GLOBAL,
|
||||
1L);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_updateAndPublishVmPowerStates_partialUpdated() {
|
||||
Map<Long, VirtualMachine.PowerState> powerStates = new HashMap<>();
|
||||
powerStates.put(1L, VirtualMachine.PowerState.PowerOn);
|
||||
powerStates.put(2L, VirtualMachine.PowerState.PowerOff);
|
||||
Map<Long, VirtualMachine.PowerState> notUpdated = new HashMap<>();
|
||||
notUpdated.put(2L, VirtualMachine.PowerState.PowerOff);
|
||||
Mockito.when(instanceDao.updatePowerState(Mockito.anyMap(), Mockito.anyLong(),
|
||||
Mockito.any(Date.class))).thenReturn(notUpdated);
|
||||
virtualMachinePowerStateSync.updateAndPublishVmPowerStates(1L, powerStates, new Date());
|
||||
Mockito.verify(messageBus, Mockito.times(1)).publish(null,
|
||||
VirtualMachineManager.Topics.VM_POWER_STATE,
|
||||
PublishScope.GLOBAL,
|
||||
1L);
|
||||
Mockito.verify(messageBus, Mockito.never()).publish(null,
|
||||
VirtualMachineManager.Topics.VM_POWER_STATE,
|
||||
PublishScope.GLOBAL,
|
||||
2L);
|
||||
}
|
||||
}
|
||||
|
|
@ -215,7 +215,7 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot
|
|||
}
|
||||
|
||||
VMTemplateStoragePoolVO sourceVolumeTemplateStoragePoolVO = vmTemplatePoolDao.findByPoolTemplate(destStoragePool.getId(), srcVolumeInfo.getTemplateId(), null);
|
||||
if (sourceVolumeTemplateStoragePoolVO == null && (isStoragePoolTypeInList(destStoragePool.getPoolType(), StoragePoolType.Filesystem, StoragePoolType.SharedMountPoint))) {
|
||||
if (sourceVolumeTemplateStoragePoolVO == null && (isStoragePoolTypeInList(destStoragePool.getPoolType(), StoragePoolType.NetworkFilesystem, StoragePoolType.Filesystem, StoragePoolType.SharedMountPoint))) {
|
||||
DataStore sourceTemplateDataStore = dataStoreManagerImpl.getRandomImageStore(srcVolumeInfo.getDataCenterId());
|
||||
if (sourceTemplateDataStore != null) {
|
||||
TemplateInfo sourceTemplateInfo = templateDataFactory.getTemplate(srcVolumeInfo.getTemplateId(), sourceTemplateDataStore);
|
||||
|
|
|
|||
|
|
@ -1949,18 +1949,26 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
|||
/**
|
||||
* Return expected MigrationOptions for a linked clone volume live storage migration
|
||||
*/
|
||||
protected MigrationOptions createLinkedCloneMigrationOptions(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, String srcVolumeBackingFile, String srcPoolUuid, Storage.StoragePoolType srcPoolType) {
|
||||
protected MigrationOptions createLinkedCloneMigrationOptions(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, String srcVolumeBackingFile, StoragePoolVO srcPool) {
|
||||
String srcPoolUuid = srcPool.getUuid();
|
||||
Storage.StoragePoolType srcPoolType = srcPool.getPoolType();
|
||||
Long srcPoolClusterId = srcPool.getClusterId();
|
||||
VMTemplateStoragePoolVO ref = templatePoolDao.findByPoolTemplate(destVolumeInfo.getPoolId(), srcVolumeInfo.getTemplateId(), null);
|
||||
boolean updateBackingFileReference = ref == null;
|
||||
String backingFile = !updateBackingFileReference ? ref.getInstallPath() : srcVolumeBackingFile;
|
||||
return new MigrationOptions(srcPoolUuid, srcPoolType, backingFile, updateBackingFileReference, srcVolumeInfo.getDataStore().getScope().getScopeType());
|
||||
ScopeType scopeType = srcVolumeInfo.getDataStore().getScope().getScopeType();
|
||||
return new MigrationOptions(srcPoolUuid, srcPoolType, backingFile, updateBackingFileReference, scopeType, srcPoolClusterId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return expected MigrationOptions for a full clone volume live storage migration
|
||||
*/
|
||||
protected MigrationOptions createFullCloneMigrationOptions(VolumeInfo srcVolumeInfo, VirtualMachineTO vmTO, Host srcHost, String srcPoolUuid, Storage.StoragePoolType srcPoolType) {
|
||||
return new MigrationOptions(srcPoolUuid, srcPoolType, srcVolumeInfo.getPath(), srcVolumeInfo.getDataStore().getScope().getScopeType());
|
||||
protected MigrationOptions createFullCloneMigrationOptions(VolumeInfo srcVolumeInfo, VirtualMachineTO vmTO, Host srcHost, StoragePoolVO srcPool) {
|
||||
String srcPoolUuid = srcPool.getUuid();
|
||||
Storage.StoragePoolType srcPoolType = srcPool.getPoolType();
|
||||
Long srcPoolClusterId = srcPool.getClusterId();
|
||||
ScopeType scopeType = srcVolumeInfo.getDataStore().getScope().getScopeType();
|
||||
return new MigrationOptions(srcPoolUuid, srcPoolType, srcVolumeInfo.getPath(), scopeType, srcPoolClusterId);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1983,9 +1991,9 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
|
|||
|
||||
MigrationOptions migrationOptions;
|
||||
if (MigrationOptions.Type.LinkedClone.equals(migrationType)) {
|
||||
migrationOptions = createLinkedCloneMigrationOptions(srcVolumeInfo, destVolumeInfo, srcVolumeBackingFile, srcPoolUuid, srcPoolType);
|
||||
migrationOptions = createLinkedCloneMigrationOptions(srcVolumeInfo, destVolumeInfo, srcVolumeBackingFile, srcPool);
|
||||
} else {
|
||||
migrationOptions = createFullCloneMigrationOptions(srcVolumeInfo, vmTO, srcHost, srcPoolUuid, srcPoolType);
|
||||
migrationOptions = createFullCloneMigrationOptions(srcVolumeInfo, vmTO, srcHost, srcPool);
|
||||
}
|
||||
migrationOptions.setTimeout(StorageManager.KvmStorageOnlineMigrationWait.value());
|
||||
destVolumeInfo.setMigrationOptions(migrationOptions);
|
||||
|
|
|
|||
|
|
@ -114,6 +114,7 @@ Requires: iproute
|
|||
Requires: ipset
|
||||
Requires: perl
|
||||
Requires: rsync
|
||||
Requires: cifs-utils
|
||||
Requires: (python3-libvirt or python3-libvirt-python)
|
||||
Requires: (qemu-img or qemu-tools)
|
||||
Requires: qemu-kvm
|
||||
|
|
|
|||
|
|
@ -219,6 +219,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
|
|||
restoreCommand.setBackupPath(backup.getExternalId());
|
||||
restoreCommand.setBackupRepoType(backupRepository.getType());
|
||||
restoreCommand.setBackupRepoAddress(backupRepository.getAddress());
|
||||
restoreCommand.setMountOptions(backupRepository.getMountOptions());
|
||||
restoreCommand.setVmName(vm.getName());
|
||||
restoreCommand.setVolumePaths(getVolumePaths(volumes));
|
||||
restoreCommand.setVmExists(vm.getRemoved() == null);
|
||||
|
|
@ -287,6 +288,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
|
|||
restoreCommand.setVmName(vmNameAndState.first());
|
||||
restoreCommand.setVolumePaths(Collections.singletonList(String.format("%s/%s", dataStore.getLocalPath(), volumeUUID)));
|
||||
restoreCommand.setDiskType(volume.getVolumeType().name().toLowerCase(Locale.ROOT));
|
||||
restoreCommand.setMountOptions(backupRepository.getMountOptions());
|
||||
restoreCommand.setVmExists(null);
|
||||
restoreCommand.setVmState(vmNameAndState.second());
|
||||
restoreCommand.setRestoreVolumeUUID(volumeUuid);
|
||||
|
|
@ -372,8 +374,12 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
|
|||
Long vmBackupSize = 0L;
|
||||
Long vmBackupProtectedSize = 0L;
|
||||
for (final Backup backup: backupDao.listByVmId(null, vm.getId())) {
|
||||
vmBackupSize += backup.getSize();
|
||||
vmBackupProtectedSize += backup.getProtectedSize();
|
||||
if (Objects.nonNull(backup.getSize())) {
|
||||
vmBackupSize += backup.getSize();
|
||||
}
|
||||
if (Objects.nonNull(backup.getProtectedSize())) {
|
||||
vmBackupProtectedSize += backup.getProtectedSize();
|
||||
}
|
||||
}
|
||||
Backup.Metric vmBackupMetric = new Backup.Metric(vmBackupSize,vmBackupProtectedSize);
|
||||
LOG.debug("Metrics for VM {} is [backup size: {}, data size: {}].", vm, vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize());
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
|||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
public class KVMInvestigator extends AdapterBase implements Investigator {
|
||||
|
|
@ -81,7 +82,7 @@ public class KVMInvestigator extends AdapterBase implements Investigator {
|
|||
return haManager.getHostStatus(agent);
|
||||
}
|
||||
|
||||
List<StoragePoolVO> clusterPools = _storagePoolDao.listPoolsByCluster(agent.getClusterId());
|
||||
List<StoragePoolVO> clusterPools = _storagePoolDao.findPoolsInClusters(Arrays.asList(agent.getClusterId()), null);
|
||||
boolean storageSupportHA = storageSupportHa(clusterPools);
|
||||
if (!storageSupportHA) {
|
||||
List<StoragePoolVO> zonePools = _storagePoolDao.findZoneWideStoragePoolsByHypervisor(agent.getDataCenterId(), agent.getHypervisorType());
|
||||
|
|
@ -89,7 +90,7 @@ public class KVMInvestigator extends AdapterBase implements Investigator {
|
|||
}
|
||||
if (!storageSupportHA) {
|
||||
logger.warn("Agent investigation was requested on host {}, but host does not support investigation because it has no NFS storage. Skipping investigation.", agent);
|
||||
return Status.Disconnected;
|
||||
return null;
|
||||
}
|
||||
|
||||
Status hostStatus = null;
|
||||
|
|
|
|||
|
|
@ -331,6 +331,16 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
public static final String UBUNTU_WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD = "dpkg -l virtio-win";
|
||||
public static final String UBUNTU_NBDKIT_PKG_CHECK_CMD = "dpkg -l nbdkit";
|
||||
|
||||
public static final int LIBVIRT_CGROUP_CPU_SHARES_MIN = 2;
|
||||
public static final int LIBVIRT_CGROUP_CPU_SHARES_MAX = 262144;
|
||||
/**
|
||||
* The minimal value for the LIBVIRT_CGROUPV2_WEIGHT_MIN is actually 1.
|
||||
* However, due to an old libvirt bug, it is raised to 2.
|
||||
* See: https://github.com/libvirt/libvirt/commit/38af6497610075e5fe386734b87186731d4c17ac
|
||||
*/
|
||||
public static final int LIBVIRT_CGROUPV2_WEIGHT_MIN = 2;
|
||||
public static final int LIBVIRT_CGROUPV2_WEIGHT_MAX = 10000;
|
||||
|
||||
private String modifyVlanPath;
|
||||
private String versionStringPath;
|
||||
private String patchScriptPath;
|
||||
|
|
@ -512,8 +522,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
|
||||
private static int hostCpuMaxCapacity = 0;
|
||||
|
||||
private static final int CGROUP_V2_UPPER_LIMIT = 10000;
|
||||
|
||||
private static final String COMMAND_GET_CGROUP_HOST_VERSION = "stat -fc %T /sys/fs/cgroup/";
|
||||
|
||||
public static final String CGROUP_V2 = "cgroup2fs";
|
||||
|
|
@ -641,6 +649,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
return libvirtUtilitiesHelper;
|
||||
}
|
||||
|
||||
public String getClusterId() {
|
||||
return clusterId;
|
||||
}
|
||||
|
||||
public CPUStat getCPUStat() {
|
||||
return cpuStat;
|
||||
}
|
||||
|
|
@ -2821,14 +2833,24 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
int requestedCpuShares = vCpus * cpuSpeed;
|
||||
int hostCpuMaxCapacity = getHostCpuMaxCapacity();
|
||||
|
||||
// cgroup v2 is in use
|
||||
if (hostCpuMaxCapacity > 0) {
|
||||
int updatedCpuShares = (int) Math.ceil((requestedCpuShares * CGROUP_V2_UPPER_LIMIT) / (double) hostCpuMaxCapacity);
|
||||
LOGGER.debug(String.format("This host utilizes cgroupv2 (as the max shares value is [%s]), thus, the VM requested shares of [%s] will be converted to " +
|
||||
"consider the host limits; the new CPU shares value is [%s].", hostCpuMaxCapacity, requestedCpuShares, updatedCpuShares));
|
||||
|
||||
int updatedCpuShares = (int) Math.ceil((requestedCpuShares * LIBVIRT_CGROUPV2_WEIGHT_MAX) / (double) hostCpuMaxCapacity);
|
||||
LOGGER.debug("This host utilizes cgroupv2 (as the max shares value is [{}]), thus, the VM requested shares of [{}] will be converted to " +
|
||||
"consider the host limits; the new CPU shares value is [{}].", hostCpuMaxCapacity, requestedCpuShares, updatedCpuShares);
|
||||
|
||||
if (updatedCpuShares < LIBVIRT_CGROUPV2_WEIGHT_MIN) updatedCpuShares = LIBVIRT_CGROUPV2_WEIGHT_MIN;
|
||||
if (updatedCpuShares > LIBVIRT_CGROUPV2_WEIGHT_MAX) updatedCpuShares = LIBVIRT_CGROUPV2_WEIGHT_MAX;
|
||||
return updatedCpuShares;
|
||||
}
|
||||
LOGGER.debug(String.format("This host does not have a maximum CPU shares set; therefore, this host utilizes cgroupv1 and the VM requested CPU shares [%s] will not be " +
|
||||
"converted.", requestedCpuShares));
|
||||
|
||||
// cgroup v1 is in use
|
||||
LOGGER.debug("This host does not have a maximum CPU shares set; therefore, this host utilizes cgroupv1 and the VM requested CPU shares [{}] will not be " +
|
||||
"converted.", requestedCpuShares);
|
||||
|
||||
if (requestedCpuShares < LIBVIRT_CGROUP_CPU_SHARES_MIN) requestedCpuShares = LIBVIRT_CGROUP_CPU_SHARES_MIN;
|
||||
if (requestedCpuShares > LIBVIRT_CGROUP_CPU_SHARES_MAX) requestedCpuShares = LIBVIRT_CGROUP_CPU_SHARES_MAX;
|
||||
return requestedCpuShares;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ import com.cloud.agent.api.GetVmIpAddressCommand;
|
|||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.net.NetUtils;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
|
|
@ -34,6 +35,26 @@ import com.cloud.utils.script.Script;
|
|||
public final class LibvirtGetVmIpAddressCommandWrapper extends CommandWrapper<GetVmIpAddressCommand, Answer, LibvirtComputingResource> {
|
||||
|
||||
|
||||
static String virsh_path = null;
|
||||
static String virt_win_reg_path = null;
|
||||
static String grep_path = null;
|
||||
static String awk_path = null;
|
||||
static String sed_path = null;
|
||||
static String virt_ls_path = null;
|
||||
static String virt_cat_path = null;
|
||||
static String tail_path = null;
|
||||
|
||||
static void init() {
|
||||
virt_ls_path = Script.getExecutableAbsolutePath("virt-ls");
|
||||
virt_cat_path = Script.getExecutableAbsolutePath("virt-cat");
|
||||
virt_win_reg_path = Script.getExecutableAbsolutePath("virt-win-reg");
|
||||
tail_path = Script.getExecutableAbsolutePath("tail");
|
||||
grep_path = Script.getExecutableAbsolutePath("grep");
|
||||
awk_path = Script.getExecutableAbsolutePath("awk");
|
||||
sed_path = Script.getExecutableAbsolutePath("sed");
|
||||
virsh_path = Script.getExecutableAbsolutePath("virsh");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer execute(final GetVmIpAddressCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||
String ip = null;
|
||||
|
|
@ -42,65 +63,113 @@ public final class LibvirtGetVmIpAddressCommandWrapper extends CommandWrapper<Ge
|
|||
if (!NetUtils.verifyDomainNameLabel(vmName, true)) {
|
||||
return new Answer(command, result, ip);
|
||||
}
|
||||
|
||||
String sanitizedVmName = sanitizeBashCommandArgument(vmName);
|
||||
String networkCidr = command.getVmNetworkCidr();
|
||||
List<String[]> commands = new ArrayList<>();
|
||||
final String virt_ls_path = Script.getExecutableAbsolutePath("virt-ls");
|
||||
final String virt_cat_path = Script.getExecutableAbsolutePath("virt-cat");
|
||||
final String virt_win_reg_path = Script.getExecutableAbsolutePath("virt-win-reg");
|
||||
final String tail_path = Script.getExecutableAbsolutePath("tail");
|
||||
final String grep_path = Script.getExecutableAbsolutePath("grep");
|
||||
final String awk_path = Script.getExecutableAbsolutePath("awk");
|
||||
final String sed_path = Script.getExecutableAbsolutePath("sed");
|
||||
if(!command.isWindows()) {
|
||||
//List all dhcp lease files inside guestVm
|
||||
commands.add(new String[]{virt_ls_path, sanitizedVmName, "/var/lib/dhclient/"});
|
||||
commands.add(new String[]{grep_path, ".*\\*.leases"});
|
||||
String leasesList = Script.executePipedCommands(commands, 0).second();
|
||||
if(leasesList != null) {
|
||||
String[] leasesFiles = leasesList.split("\n");
|
||||
for(String leaseFile : leasesFiles){
|
||||
//Read from each dhclient lease file inside guest Vm using virt-cat libguestfs utility
|
||||
commands = new ArrayList<>();
|
||||
commands.add(new String[]{virt_cat_path, sanitizedVmName, "/var/lib/dhclient/" + leaseFile});
|
||||
commands.add(new String[]{tail_path, "-16"});
|
||||
commands.add(new String[]{grep_path, "fixed-address"});
|
||||
commands.add(new String[]{awk_path, "{print $2}"});
|
||||
commands.add(new String[]{sed_path, "-e", "s/;//"});
|
||||
String ipAddr = Script.executePipedCommands(commands, 0).second();
|
||||
// Check if the IP belongs to the network
|
||||
if((ipAddr != null) && NetUtils.isIpWithInCidrRange(ipAddr, networkCidr)) {
|
||||
ip = ipAddr;
|
||||
break;
|
||||
}
|
||||
logger.debug("GetVmIp: "+ vmName + " Ip: "+ipAddr+" does not belong to network "+networkCidr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// For windows, read from guest Vm registry using virt-win-reg libguestfs ulitiy. Registry Path: HKEY_LOCAL_MACHINE\SYSTEM\ControlSet001\Services\Tcpip\Parameters\Interfaces\<service>\DhcpIPAddress
|
||||
commands = new ArrayList<>();
|
||||
commands.add(new String[]{virt_win_reg_path, "--unsafe-printable-strings", sanitizedVmName, "HKEY_LOCAL_MACHINE\\SYSTEM\\ControlSet001\\Services\\Tcpip\\Parameters\\Interfaces"});
|
||||
commands.add(new String[]{grep_path, "DhcpIPAddress"});
|
||||
commands.add(new String[]{awk_path, "-F", ":", "{print $2}"});
|
||||
commands.add(new String[]{sed_path, "-e", "s/^\"//", "-e", "s/\"$//"});
|
||||
String ipList = Script.executePipedCommands(commands, 0).second();
|
||||
if(ipList != null) {
|
||||
logger.debug("GetVmIp: "+ vmName + "Ips: "+ipList);
|
||||
String[] ips = ipList.split("\n");
|
||||
for (String ipAddr : ips){
|
||||
// Check if the IP belongs to the network
|
||||
if((ipAddr != null) && NetUtils.isIpWithInCidrRange(ipAddr, networkCidr)){
|
||||
ip = ipAddr;
|
||||
break;
|
||||
}
|
||||
logger.debug("GetVmIp: "+ vmName + " Ip: "+ipAddr+" does not belong to network "+networkCidr);
|
||||
}
|
||||
|
||||
ip = ipFromDomIf(sanitizedVmName, networkCidr);
|
||||
|
||||
if (ip == null) {
|
||||
if(!command.isWindows()) {
|
||||
ip = ipFromDhcpLeaseFile(sanitizedVmName, networkCidr);
|
||||
} else {
|
||||
ip = ipFromWindowsRegistry(sanitizedVmName, networkCidr);
|
||||
}
|
||||
}
|
||||
|
||||
if(ip != null){
|
||||
result = true;
|
||||
logger.debug("GetVmIp: "+ vmName + " Found Ip: "+ip);
|
||||
} else {
|
||||
logger.warn("GetVmIp: "+ vmName + " IP not found.");
|
||||
}
|
||||
|
||||
return new Answer(command, result, ip);
|
||||
}
|
||||
|
||||
private String ipFromDomIf(String sanitizedVmName, String networkCidr) {
|
||||
String ip = null;
|
||||
List<String[]> commands = new ArrayList<>();
|
||||
commands.add(new String[]{virsh_path, "domifaddr", sanitizedVmName, "--source", "agent"});
|
||||
Pair<Integer,String> response = executePipedCommands(commands, 0);
|
||||
if (response != null) {
|
||||
String output = response.second();
|
||||
String[] lines = output.split("\n");
|
||||
for (String line : lines) {
|
||||
if (line.contains("ipv4")) {
|
||||
String[] parts = line.split(" ");
|
||||
String[] ipParts = parts[parts.length-1].split("/");
|
||||
if (ipParts.length > 1) {
|
||||
if (NetUtils.isIpWithInCidrRange(ipParts[0], networkCidr)) {
|
||||
ip = ipParts[0];
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.error("ipFromDomIf: Command execution failed for VM: " + sanitizedVmName);
|
||||
}
|
||||
return ip;
|
||||
}
|
||||
|
||||
private String ipFromDhcpLeaseFile(String sanitizedVmName, String networkCidr) {
|
||||
String ip = null;
|
||||
List<String[]> commands = new ArrayList<>();
|
||||
commands.add(new String[]{virt_ls_path, sanitizedVmName, "/var/lib/dhclient/"});
|
||||
commands.add(new String[]{grep_path, ".*\\*.leases"});
|
||||
Pair<Integer,String> response = executePipedCommands(commands, 0);
|
||||
|
||||
if(response != null && response.second() != null) {
|
||||
String leasesList = response.second();
|
||||
String[] leasesFiles = leasesList.split("\n");
|
||||
for(String leaseFile : leasesFiles){
|
||||
commands = new ArrayList<>();
|
||||
commands.add(new String[]{virt_cat_path, sanitizedVmName, "/var/lib/dhclient/" + leaseFile});
|
||||
commands.add(new String[]{tail_path, "-16"});
|
||||
commands.add(new String[]{grep_path, "fixed-address"});
|
||||
commands.add(new String[]{awk_path, "{print $2}"});
|
||||
commands.add(new String[]{sed_path, "-e", "s/;//"});
|
||||
String ipAddr = executePipedCommands(commands, 0).second();
|
||||
if((ipAddr != null) && NetUtils.isIpWithInCidrRange(ipAddr, networkCidr)) {
|
||||
ip = ipAddr;
|
||||
break;
|
||||
}
|
||||
logger.debug("GetVmIp: "+ sanitizedVmName + " Ip: "+ipAddr+" does not belong to network "+networkCidr);
|
||||
}
|
||||
} else {
|
||||
logger.error("ipFromDhcpLeaseFile: Command execution failed for VM: " + sanitizedVmName);
|
||||
}
|
||||
return ip;
|
||||
}
|
||||
|
||||
private String ipFromWindowsRegistry(String sanitizedVmName, String networkCidr) {
|
||||
String ip = null;
|
||||
List<String[]> commands = new ArrayList<>();
|
||||
commands.add(new String[]{virt_win_reg_path, "--unsafe-printable-strings", sanitizedVmName, "HKEY_LOCAL_MACHINE\\SYSTEM\\ControlSet001\\Services\\Tcpip\\Parameters\\Interfaces"});
|
||||
commands.add(new String[]{grep_path, "DhcpIPAddress"});
|
||||
commands.add(new String[]{awk_path, "-F", ":", "{print $2}"});
|
||||
commands.add(new String[]{sed_path, "-e", "s/^\"//", "-e", "s/\"$//"});
|
||||
Pair<Integer,String> pair = executePipedCommands(commands, 0);
|
||||
if(pair != null && pair.second() != null) {
|
||||
String ipList = pair.second();
|
||||
ipList = ipList.replaceAll("\"", "");
|
||||
logger.debug("GetVmIp: "+ sanitizedVmName + "Ips: "+ipList);
|
||||
String[] ips = ipList.split("\n");
|
||||
for (String ipAddr : ips){
|
||||
if((ipAddr != null) && NetUtils.isIpWithInCidrRange(ipAddr, networkCidr)){
|
||||
ip = ipAddr;
|
||||
break;
|
||||
}
|
||||
logger.debug("GetVmIp: "+ sanitizedVmName + " Ip: "+ipAddr+" does not belong to network "+networkCidr);
|
||||
}
|
||||
} else {
|
||||
logger.error("ipFromWindowsRegistry: Command execution failed for VM: " + sanitizedVmName);
|
||||
}
|
||||
return ip;
|
||||
}
|
||||
|
||||
static Pair<Integer, String> executePipedCommands(List<String[]> commands, long timeout) {
|
||||
return Script.executePipedCommands(commands, timeout);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||
int lastIndex = volumePath.lastIndexOf("/");
|
||||
newVolumeId = volumePath.substring(lastIndex + 1);
|
||||
restoreVolume(backupPath, backupRepoType, backupRepoAddress, volumePath, diskType, restoreVolumeUuid,
|
||||
new Pair<>(vmName, command.getVmState()));
|
||||
new Pair<>(vmName, command.getVmState()), mountOptions);
|
||||
} else if (Boolean.TRUE.equals(vmExists)) {
|
||||
restoreVolumesOfExistingVM(volumePaths, backupPath, backupRepoType, backupRepoAddress, mountOptions);
|
||||
} else {
|
||||
|
|
@ -80,7 +80,7 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||
private void restoreVolumesOfExistingVM(List<String> volumePaths, String backupPath,
|
||||
String backupRepoType, String backupRepoAddress, String mountOptions) {
|
||||
String diskType = "root";
|
||||
String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType);
|
||||
String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType, mountOptions);
|
||||
try {
|
||||
for (int idx = 0; idx < volumePaths.size(); idx++) {
|
||||
String volumePath = volumePaths.get(idx);
|
||||
|
|
@ -101,7 +101,7 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||
|
||||
private void restoreVolumesOfDestroyedVMs(List<String> volumePaths, String vmName, String backupPath,
|
||||
String backupRepoType, String backupRepoAddress, String mountOptions) {
|
||||
String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType);
|
||||
String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType, mountOptions);
|
||||
String diskType = "root";
|
||||
try {
|
||||
for (int i = 0; i < volumePaths.size(); i++) {
|
||||
|
|
@ -121,8 +121,8 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||
}
|
||||
|
||||
private void restoreVolume(String backupPath, String backupRepoType, String backupRepoAddress, String volumePath,
|
||||
String diskType, String volumeUUID, Pair<String, VirtualMachine.State> vmNameAndState) {
|
||||
String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType);
|
||||
String diskType, String volumeUUID, Pair<String, VirtualMachine.State> vmNameAndState, String mountOptions) {
|
||||
String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType, mountOptions);
|
||||
Pair<String, String> bkpPathAndVolUuid;
|
||||
try {
|
||||
bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, volumeUUID);
|
||||
|
|
@ -145,12 +145,22 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||
}
|
||||
|
||||
|
||||
private String mountBackupDirectory(String backupRepoAddress, String backupRepoType) {
|
||||
private String mountBackupDirectory(String backupRepoAddress, String backupRepoType, String mountOptions) {
|
||||
String randomChars = RandomStringUtils.random(5, true, false);
|
||||
String mountDirectory = String.format("%s.%s",BACKUP_TEMP_FILE_PREFIX , randomChars);
|
||||
try {
|
||||
mountDirectory = Files.createTempDirectory(mountDirectory).toString();
|
||||
String mountOpts = null;
|
||||
if (Objects.nonNull(mountOptions)) {
|
||||
mountOpts = mountOptions;
|
||||
if ("cifs".equals(backupRepoType)) {
|
||||
mountOpts += ",nobrl";
|
||||
}
|
||||
}
|
||||
String mount = String.format(MOUNT_COMMAND, backupRepoType, backupRepoAddress, mountDirectory);
|
||||
if (Objects.nonNull(mountOpts)) {
|
||||
mount += " -o " + mountOpts;
|
||||
}
|
||||
Script.runSimpleBashScript(mount);
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException(String.format("Failed to mount %s to %s", backupRepoType, backupRepoAddress), e);
|
||||
|
|
|
|||
|
|
@ -2660,6 +2660,12 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
return localPool;
|
||||
}
|
||||
|
||||
if (migrationOptions.getScopeType().equals(ScopeType.CLUSTER)
|
||||
&& migrationOptions.getSrcPoolClusterId() != null
|
||||
&& !migrationOptions.getSrcPoolClusterId().toString().equals(resource.getClusterId())) {
|
||||
return localPool;
|
||||
}
|
||||
|
||||
return storagePoolMgr.getStoragePool(migrationOptions.getSrcPoolType(), migrationOptions.getSrcPoolUuid());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,320 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.ArgumentMatchers.anyList;
|
||||
import static org.mockito.Mockito.mockStatic;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.GetVmIpAddressCommand;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
public class LibvirtGetVmIpAddressCommandWrapperTest {
|
||||
|
||||
private static String VIRSH_DOMIF_OUTPUT = " Name MAC address Protocol Address\n" + //
|
||||
"-------------------------------------------------------------------------------\n" + //
|
||||
" lo 00:00:00:00:00:70 ipv4 127.0.0.1/8\n" + //
|
||||
" eth0 02:0c:02:f9:00:80 ipv4 192.168.0.10/24\n" + //
|
||||
" net1 b2:41:19:69:a4:90 N/A N/A\n" + //
|
||||
" net2 52:a2:36:cf:d1:50 ipv4 10.244.6.93/32\n" + //
|
||||
" net3 a6:1d:d3:52:d3:40 N/A N/A\n" + //
|
||||
" net4 2e:9b:60:dc:49:30 N/A N/A\n" + //
|
||||
" lxc5b7327203b6f 92:b2:77:0b:a9:20 N/A N/A\n";
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
MockitoAnnotations.openMocks(this);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteWithValidVmName() {
|
||||
LibvirtComputingResource libvirtComputingResource = mock(LibvirtComputingResource.class);
|
||||
GetVmIpAddressCommand getVmIpAddressCommand = mock(GetVmIpAddressCommand.class);
|
||||
LibvirtGetVmIpAddressCommandWrapper commandWrapper = new LibvirtGetVmIpAddressCommandWrapper();
|
||||
MockedStatic<Script> scriptMock = mockStatic(Script.class);
|
||||
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("validVmName");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.0.0/24");
|
||||
when(getVmIpAddressCommand.isWindows()).thenReturn(false);
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(new Pair<>(0, VIRSH_DOMIF_OUTPUT));
|
||||
|
||||
Answer answer = commandWrapper.execute(getVmIpAddressCommand, libvirtComputingResource);
|
||||
|
||||
try {
|
||||
assertTrue(answer.getResult());
|
||||
assertEquals("192.168.0.10", answer.getDetails());
|
||||
} finally {
|
||||
scriptMock.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteWithInvalidVmName() {
|
||||
LibvirtComputingResource libvirtComputingResource = mock(LibvirtComputingResource.class);
|
||||
GetVmIpAddressCommand getVmIpAddressCommand = mock(GetVmIpAddressCommand.class);
|
||||
LibvirtGetVmIpAddressCommandWrapper commandWrapper = new LibvirtGetVmIpAddressCommandWrapper();
|
||||
MockedStatic<Script> scriptMock = mockStatic(Script.class);
|
||||
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("invalidVmName!");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.0.0/24");
|
||||
when(getVmIpAddressCommand.isWindows()).thenReturn(false);
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(new Pair<>(0, VIRSH_DOMIF_OUTPUT));
|
||||
|
||||
Answer answer = commandWrapper.execute(getVmIpAddressCommand, libvirtComputingResource);
|
||||
|
||||
try {
|
||||
assertFalse(answer.getResult());
|
||||
assertNull(answer.getDetails());
|
||||
} finally {
|
||||
scriptMock.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteWithWindowsVm() {
|
||||
LibvirtComputingResource libvirtComputingResource = mock(LibvirtComputingResource.class);
|
||||
GetVmIpAddressCommand getVmIpAddressCommand = mock(GetVmIpAddressCommand.class);
|
||||
LibvirtGetVmIpAddressCommandWrapper commandWrapper = new LibvirtGetVmIpAddressCommandWrapper();
|
||||
MockedStatic<Script> scriptMock = null;
|
||||
|
||||
try {
|
||||
scriptMock = mockStatic(Script.class);
|
||||
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("validVmName");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.0.0/24");
|
||||
when(getVmIpAddressCommand.isWindows()).thenReturn(true);
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(new Pair<>(0, "192.168.0.10"));
|
||||
|
||||
Answer answer = commandWrapper.execute(getVmIpAddressCommand, libvirtComputingResource);
|
||||
|
||||
assertTrue(answer.getResult());
|
||||
assertEquals("192.168.0.10", answer.getDetails());
|
||||
} finally {
|
||||
if (scriptMock != null)
|
||||
scriptMock.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteWithNoIpFound() {
|
||||
LibvirtComputingResource libvirtComputingResource = mock(LibvirtComputingResource.class);
|
||||
GetVmIpAddressCommand getVmIpAddressCommand = mock(GetVmIpAddressCommand.class);
|
||||
LibvirtGetVmIpAddressCommandWrapper commandWrapper = new LibvirtGetVmIpAddressCommandWrapper();
|
||||
MockedStatic<Script> scriptMock = null;
|
||||
try {
|
||||
scriptMock = mockStatic(Script.class);
|
||||
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("validVmName");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.0.0/24");
|
||||
when(getVmIpAddressCommand.isWindows()).thenReturn(false);
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(new Pair<>(0, ""));
|
||||
|
||||
Answer answer = commandWrapper.execute(getVmIpAddressCommand, libvirtComputingResource);
|
||||
|
||||
assertFalse(answer.getResult());
|
||||
assertNull(answer.getDetails());
|
||||
} finally {
|
||||
if (scriptMock != null)
|
||||
scriptMock.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteWithValidVmNameAndNoIpFound() {
|
||||
LibvirtComputingResource libvirtComputingResource = mock(LibvirtComputingResource.class);
|
||||
GetVmIpAddressCommand getVmIpAddressCommand = mock(GetVmIpAddressCommand.class);
|
||||
LibvirtGetVmIpAddressCommandWrapper commandWrapper = new LibvirtGetVmIpAddressCommandWrapper();
|
||||
MockedStatic<Script> scriptMock = null;
|
||||
try {
|
||||
scriptMock = mockStatic(Script.class);
|
||||
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("validVmName");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.0.0/24");
|
||||
when(getVmIpAddressCommand.isWindows()).thenReturn(false);
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(new Pair<>(0, ""));
|
||||
|
||||
Answer answer = commandWrapper.execute(getVmIpAddressCommand, libvirtComputingResource);
|
||||
|
||||
assertFalse(answer.getResult());
|
||||
assertNull(answer.getDetails());
|
||||
} finally {
|
||||
if (scriptMock != null)
|
||||
scriptMock.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteWithValidVmNameAndIpFromDhcpLeaseFile() {
|
||||
LibvirtComputingResource libvirtComputingResource = mock(LibvirtComputingResource.class);
|
||||
GetVmIpAddressCommand getVmIpAddressCommand = mock(GetVmIpAddressCommand.class);
|
||||
LibvirtGetVmIpAddressCommandWrapper commandWrapper = new LibvirtGetVmIpAddressCommandWrapper();
|
||||
MockedStatic<Script> scriptMock = null;
|
||||
try {
|
||||
scriptMock = mockStatic(Script.class);
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("validVmName");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.0.0/24");
|
||||
when(getVmIpAddressCommand.isWindows()).thenReturn(false);
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(new Pair<>(0, "192.168.0.10"));
|
||||
|
||||
Answer answer = commandWrapper.execute(getVmIpAddressCommand, libvirtComputingResource);
|
||||
|
||||
assertTrue(answer.getResult());
|
||||
assertEquals("192.168.0.10", answer.getDetails());
|
||||
} finally {
|
||||
if (scriptMock != null)
|
||||
scriptMock.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteWithValidVmNameAndIpFromWindowsRegistry() {
|
||||
LibvirtComputingResource libvirtComputingResource = mock(LibvirtComputingResource.class);
|
||||
GetVmIpAddressCommand getVmIpAddressCommand = mock(GetVmIpAddressCommand.class);
|
||||
LibvirtGetVmIpAddressCommandWrapper commandWrapper = new LibvirtGetVmIpAddressCommandWrapper();
|
||||
MockedStatic<Script> scriptMock = null;
|
||||
try {
|
||||
scriptMock = mockStatic(Script.class);
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("validVmName");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.0.0/24");
|
||||
when(getVmIpAddressCommand.isWindows()).thenReturn(true);
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(new Pair<>(0, "\"192.168.0.10\""));
|
||||
|
||||
Answer answer = commandWrapper.execute(getVmIpAddressCommand, libvirtComputingResource);
|
||||
|
||||
assertTrue(answer.getResult());
|
||||
assertEquals("192.168.0.10", answer.getDetails());
|
||||
} finally {
|
||||
if (scriptMock != null)
|
||||
scriptMock.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIpFromDomIfCommandExecutionFailure() {
|
||||
LibvirtComputingResource libvirtComputingResource = mock(LibvirtComputingResource.class);
|
||||
GetVmIpAddressCommand getVmIpAddressCommand = mock(GetVmIpAddressCommand.class);
|
||||
LibvirtGetVmIpAddressCommandWrapper commandWrapper = new LibvirtGetVmIpAddressCommandWrapper();
|
||||
MockedStatic<Script> scriptMock = null;
|
||||
try {
|
||||
scriptMock = mockStatic(Script.class);
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("testVm");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.1.0/24");
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(null);
|
||||
|
||||
Answer answer = commandWrapper.execute(getVmIpAddressCommand, libvirtComputingResource);
|
||||
|
||||
assertFalse(answer.getResult());
|
||||
assertNull(answer.getDetails());
|
||||
} finally {
|
||||
if (scriptMock != null)
|
||||
scriptMock.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIpFromDhcpLeaseFileCommandExecutionFailure() {
|
||||
LibvirtComputingResource libvirtComputingResource = mock(LibvirtComputingResource.class);
|
||||
GetVmIpAddressCommand getVmIpAddressCommand = mock(GetVmIpAddressCommand.class);
|
||||
LibvirtGetVmIpAddressCommandWrapper commandWrapper = new LibvirtGetVmIpAddressCommandWrapper();
|
||||
MockedStatic<Script> scriptMock = null;
|
||||
try {
|
||||
scriptMock = mockStatic(Script.class);
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("testVm");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.1.0/24");
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(null);
|
||||
|
||||
Answer answer = commandWrapper.execute(getVmIpAddressCommand, libvirtComputingResource);
|
||||
|
||||
assertFalse(answer.getResult());
|
||||
assertNull(answer.getDetails());
|
||||
} finally {
|
||||
if (scriptMock != null)
|
||||
scriptMock.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIpFromWindowsRegistryCommandExecutionFailure() {
|
||||
LibvirtComputingResource libvirtComputingResource = mock(LibvirtComputingResource.class);
|
||||
GetVmIpAddressCommand getVmIpAddressCommand = mock(GetVmIpAddressCommand.class);
|
||||
LibvirtGetVmIpAddressCommandWrapper commandWrapper = new LibvirtGetVmIpAddressCommandWrapper();
|
||||
MockedStatic<Script> scriptMock = null;
|
||||
try {
|
||||
scriptMock = mockStatic(Script.class);
|
||||
when(getVmIpAddressCommand.getVmName()).thenReturn("testVm");
|
||||
when(getVmIpAddressCommand.getVmNetworkCidr()).thenReturn("192.168.1.0/24");
|
||||
when(getVmIpAddressCommand.isWindows()).thenReturn(true);
|
||||
when(Script.executePipedCommands(anyList(), anyLong())).thenReturn(null);
|
||||
|
||||
Answer answer = commandWrapper.execute(getVmIpAddressCommand, libvirtComputingResource);
|
||||
|
||||
assertFalse(answer.getResult());
|
||||
assertNull(answer.getDetails());
|
||||
} finally {
|
||||
if (scriptMock != null)
|
||||
scriptMock.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInit() {
|
||||
MockedStatic<Script> scriptMock = null;
|
||||
try {
|
||||
scriptMock = mockStatic(Script.class);
|
||||
scriptMock.when(() -> Script.getExecutableAbsolutePath("virt-ls")).thenReturn("/usr/bin/virt-ls");
|
||||
scriptMock.when(() -> Script.getExecutableAbsolutePath("virt-cat")).thenReturn("/usr/bin/virt-cat");
|
||||
scriptMock.when(() -> Script.getExecutableAbsolutePath("virt-win-reg")).thenReturn("/usr/bin/virt-win-reg");
|
||||
scriptMock.when(() -> Script.getExecutableAbsolutePath("tail")).thenReturn("/usr/bin/tail");
|
||||
scriptMock.when(() -> Script.getExecutableAbsolutePath("grep")).thenReturn("/usr/bin/grep");
|
||||
scriptMock.when(() -> Script.getExecutableAbsolutePath("awk")).thenReturn("/usr/bin/awk");
|
||||
scriptMock.when(() -> Script.getExecutableAbsolutePath("sed")).thenReturn("/usr/bin/sed");
|
||||
scriptMock.when(() -> Script.getExecutableAbsolutePath("virsh")).thenReturn("/usr/bin/virsh");
|
||||
|
||||
LibvirtGetVmIpAddressCommandWrapper.init();
|
||||
|
||||
assertEquals("/usr/bin/virt-ls", LibvirtGetVmIpAddressCommandWrapper.virt_ls_path);
|
||||
assertEquals("/usr/bin/virt-cat", LibvirtGetVmIpAddressCommandWrapper.virt_cat_path);
|
||||
assertEquals("/usr/bin/virt-win-reg", LibvirtGetVmIpAddressCommandWrapper.virt_win_reg_path);
|
||||
assertEquals("/usr/bin/tail", LibvirtGetVmIpAddressCommandWrapper.tail_path);
|
||||
assertEquals("/usr/bin/grep", LibvirtGetVmIpAddressCommandWrapper.grep_path);
|
||||
assertEquals("/usr/bin/awk", LibvirtGetVmIpAddressCommandWrapper.awk_path);
|
||||
assertEquals("/usr/bin/sed", LibvirtGetVmIpAddressCommandWrapper.sed_path);
|
||||
assertEquals("/usr/bin/virsh", LibvirtGetVmIpAddressCommandWrapper.virsh_path);
|
||||
} finally {
|
||||
if (scriptMock != null)
|
||||
scriptMock.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -18,6 +18,8 @@
|
|||
package org.apache.cloudstack.cloudian.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.PushbackInputStream;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.security.KeyManagementException;
|
||||
import java.security.KeyStoreException;
|
||||
|
|
@ -28,11 +30,13 @@ import java.util.Arrays;
|
|||
import java.util.List;
|
||||
|
||||
import javax.net.ssl.SSLContext;
|
||||
import javax.net.ssl.SSLException;
|
||||
import javax.net.ssl.X509TrustManager;
|
||||
|
||||
import org.apache.cloudstack.api.ApiErrorCode;
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.utils.security.SSLUtils;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.HttpStatus;
|
||||
|
|
@ -56,6 +60,7 @@ import org.apache.http.impl.auth.BasicScheme;
|
|||
import org.apache.http.impl.client.BasicAuthCache;
|
||||
import org.apache.http.impl.client.BasicCredentialsProvider;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
|
|
@ -88,7 +93,7 @@ public class CloudianClient {
|
|||
.setSocketTimeout(timeout * 1000)
|
||||
.build();
|
||||
|
||||
if (!validateSSlCertificate) {
|
||||
if (!validateSSlCertificate && "https".equalsIgnoreCase(scheme)) {
|
||||
final SSLContext sslcontext = SSLUtils.getSSLContext();
|
||||
sslcontext.init(null, new X509TrustManager[]{new TrustAllManager()}, new SecureRandom());
|
||||
final SSLConnectionSocketFactory factory = new SSLConnectionSocketFactory(sslcontext, NoopHostnameVerifier.INSTANCE);
|
||||
|
|
@ -108,7 +113,9 @@ public class CloudianClient {
|
|||
private void checkAuthFailure(final HttpResponse response) {
|
||||
if (response != null && response.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) {
|
||||
final Credentials credentials = httpContext.getCredentialsProvider().getCredentials(AuthScope.ANY);
|
||||
logger.error("Cloudian admin API authentication failed, please check Cloudian configuration. Admin auth principal=" + credentials.getUserPrincipal() + ", password=" + credentials.getPassword() + ", API url=" + adminApiUrl);
|
||||
// Don't dump the actual password in the log, but its useful to know the length perhaps.
|
||||
final String asteriskPassword = "*".repeat(credentials.getPassword().length());
|
||||
logger.error("Cloudian admin API authentication failed, please check Cloudian configuration. Admin auth principal=" + credentials.getUserPrincipal() + ", password=" + asteriskPassword + ", API url=" + adminApiUrl);
|
||||
throw new ServerApiException(ApiErrorCode.UNAUTHORIZED, "Cloudian backend API call unauthorized, please ask your administrator to fix integration issues.");
|
||||
}
|
||||
}
|
||||
|
|
@ -123,15 +130,54 @@ public class CloudianClient {
|
|||
}
|
||||
}
|
||||
|
||||
private boolean checkEmptyResponse(final HttpResponse response) throws IOException {
|
||||
return response != null && (response.getStatusLine().getStatusCode() == HttpStatus.SC_NO_CONTENT ||
|
||||
response.getEntity() == null ||
|
||||
response.getEntity().getContent() == null);
|
||||
/**
|
||||
* Return true if the response does not have an entity.
|
||||
* This is not the same thing as an empty body which is different and not detected here.
|
||||
* The 200 response for example should always return false even if it has no body bytes.
|
||||
* @param response the response to check
|
||||
* @return true if status code was 204 or the response does not have an entity. False otherwise.
|
||||
*/
|
||||
private boolean noResponseEntity(final HttpResponse response) {
|
||||
return response != null && (response.getStatusLine().getStatusCode() == HttpStatus.SC_NO_CONTENT || response.getEntity() == null);
|
||||
}
|
||||
|
||||
private void checkResponseTimeOut(final Exception e) {
|
||||
/**
|
||||
* Throw a specific exception for timeout or a more generic server error.
|
||||
* This method does not return to the caller and instead always throws an exception.
|
||||
* @param e IOException (including ClientProtocolException) as thrown by httpClient.execute()
|
||||
* @throws ServerApiException is always thrown
|
||||
*/
|
||||
private void throwTimeoutOrServerException(final IOException e) {
|
||||
if (e instanceof ConnectTimeoutException || e instanceof SocketTimeoutException) {
|
||||
throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, "Operation timed out, please try again.");
|
||||
} else if (e instanceof SSLException) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "SSL Error Connecting to Cloudian Admin Service", e);
|
||||
} else {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "internal error", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the body content stream only if the body has bytes.
|
||||
*
|
||||
* Unfortunately, some of the responses such as listGroups() or listUsers() return
|
||||
* an empty body instead of returning and empty list. The only way to detect this is
|
||||
* to try read from the body. This method handles this and will return null if the
|
||||
* body was empty or a valid stream with the body content otherwise.
|
||||
*
|
||||
* @param response the response to check for the body contents.
|
||||
* @return a valid InputStream or null if the body was empty.
|
||||
*
|
||||
* @throws IOException some error reading from the body such as timeout etc.
|
||||
*/
|
||||
protected InputStream getNonEmptyContentStream(HttpResponse response) throws IOException {
|
||||
PushbackInputStream iStream = new PushbackInputStream(response.getEntity().getContent());
|
||||
int firstByte=iStream.read();
|
||||
if (firstByte == -1) {
|
||||
return null;
|
||||
} else {
|
||||
iStream.unread(firstByte);
|
||||
return iStream;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -159,22 +205,110 @@ public class CloudianClient {
|
|||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a HTTP PUT operation using the path and optional JSON body item.
|
||||
* @param path the http path to use
|
||||
* @param item optional object to send in the body payload. Set to null if no body.
|
||||
* @return the HttpResponse object
|
||||
* @throws IOException if the request cannot be executed completely.
|
||||
* @throws ServerApiException if the request meets 401 unauthorized.
|
||||
*/
|
||||
private HttpResponse put(final String path, final Object item) throws IOException {
|
||||
final ObjectMapper mapper = new ObjectMapper();
|
||||
final String json = mapper.writeValueAsString(item);
|
||||
final StringEntity entity = new StringEntity(json);
|
||||
final HttpPut request = new HttpPut(adminApiUrl + path);
|
||||
request.setHeader("content-type", "application/json");
|
||||
request.setEntity(entity);
|
||||
if (item != null) {
|
||||
final ObjectMapper mapper = new ObjectMapper();
|
||||
final String json = mapper.writeValueAsString(item);
|
||||
final StringEntity entity = new StringEntity(json);
|
||||
request.setHeader("content-type", "application/json");
|
||||
request.setEntity(entity);
|
||||
}
|
||||
final HttpResponse response = httpClient.execute(request, httpContext);
|
||||
checkAuthFailure(response);
|
||||
return response;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
//////////////// Public APIs: Misc /////////////////////
|
||||
////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Get the HyperStore Server Version number.
|
||||
*
|
||||
* @return version number
|
||||
* @throws ServerApiException on non-200 response or timeout
|
||||
*/
|
||||
public String getServerVersion() {
|
||||
logger.debug("Getting server version");
|
||||
try {
|
||||
final HttpResponse response = get("/system/version");
|
||||
checkResponseOK(response);
|
||||
HttpEntity entity = response.getEntity();
|
||||
return EntityUtils.toString(entity, "UTF-8");
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to get HyperStore system version:", e);
|
||||
throwTimeoutOrServerException(e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get bucket usage information for a group, a user or a particular bucket.
|
||||
*
|
||||
* Note: Bucket Usage Statistics in HyperStore are disabled by default. They
|
||||
* can be enabled by the HyperStore Administrator by setting of the configuration
|
||||
* 's3.qos.bucketLevel=true'.
|
||||
*
|
||||
* @param groupId the groupId is required (and must exist)
|
||||
* @param userId the userId is optional (null) and if not set all group users are returned.
|
||||
* @param bucket the bucket is optional (null). If set, the userId must also be set.
|
||||
* @return a list of bucket usages (possibly empty).
|
||||
* @throws ServerApiException on non-200 response such as unknown groupId etc or response issue.
|
||||
*/
|
||||
public List<CloudianUserBucketUsage> getUserBucketUsages(final String groupId, final String userId, final String bucket) {
|
||||
if (StringUtils.isBlank(groupId) || (StringUtils.isBlank(userId) && !StringUtils.isBlank(bucket))) {
|
||||
String msg = String.format("Bad parameters groupId=%s userId=%s bucket=%s", groupId, userId, bucket);
|
||||
logger.error(msg);
|
||||
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, msg);
|
||||
}
|
||||
|
||||
logger.debug("Getting bucket usages for groupId={} userId={} bucket={}", groupId, userId, bucket);
|
||||
StringBuilder cmd = new StringBuilder("/system/bucketusage?groupId=");
|
||||
cmd.append(groupId);
|
||||
if (! StringUtils.isBlank(userId)) {
|
||||
cmd.append("&userId=");
|
||||
cmd.append(userId);
|
||||
}
|
||||
if (! StringUtils.isBlank(bucket)) {
|
||||
cmd.append("&bucket=");
|
||||
cmd.append(bucket);
|
||||
}
|
||||
|
||||
try {
|
||||
final HttpResponse response = get(cmd.toString());
|
||||
checkResponseOK(response);
|
||||
if (noResponseEntity(response)) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "API error");
|
||||
}
|
||||
// If the groupId exists, this request always returns a proper (possibly empty) list
|
||||
final ObjectMapper mapper = new ObjectMapper();
|
||||
return Arrays.asList(mapper.readValue(response.getEntity().getContent(), CloudianUserBucketUsage[].class));
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to get bucket usage stats due to:", e);
|
||||
throwTimeoutOrServerException(e);
|
||||
return new ArrayList<>(); // never reached
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
//////////////// Public APIs: User /////////////////////
|
||||
////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Create a new HyperStore user.
|
||||
* @param user the User object to create.
|
||||
* @return true if the user was successfully created, false if it exists or there was other non-200 (except 401) response.
|
||||
* @throws ServerApiException if there was any other issue such as 401 unauthorized or network error.
|
||||
*/
|
||||
public boolean addUser(final CloudianUser user) {
|
||||
if (user == null) {
|
||||
return false;
|
||||
|
|
@ -185,11 +319,18 @@ public class CloudianClient {
|
|||
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to add Cloudian user due to:", e);
|
||||
checkResponseTimeOut(e);
|
||||
throwTimeoutOrServerException(e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a HyperStore user.
|
||||
* @param userId the userId
|
||||
* @param groupId the groupId the user belongs to
|
||||
* @return CloudianUser if found, null if not found.
|
||||
* @throws ServerApiException if the is any problem.
|
||||
*/
|
||||
public CloudianUser listUser(final String userId, final String groupId) {
|
||||
if (StringUtils.isAnyEmpty(userId, groupId)) {
|
||||
return null;
|
||||
|
|
@ -198,18 +339,24 @@ public class CloudianClient {
|
|||
try {
|
||||
final HttpResponse response = get(String.format("/user?userId=%s&groupId=%s", userId, groupId));
|
||||
checkResponseOK(response);
|
||||
if (checkEmptyResponse(response)) {
|
||||
return null;
|
||||
if (noResponseEntity(response)) {
|
||||
return null; // User not found
|
||||
}
|
||||
final ObjectMapper mapper = new ObjectMapper();
|
||||
return mapper.readValue(response.getEntity().getContent(), CloudianUser.class);
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to list Cloudian user due to:", e);
|
||||
checkResponseTimeOut(e);
|
||||
throwTimeoutOrServerException(e);
|
||||
return null; // never reached
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a list of all active HyperStore users in a group.
|
||||
* @param groupId the target group to list
|
||||
* @return a possibly empty list of CloudianUser objects.
|
||||
* @throws ServerApiException if there is any problem or non-200 response.
|
||||
*/
|
||||
public List<CloudianUser> listUsers(final String groupId) {
|
||||
if (StringUtils.isEmpty(groupId)) {
|
||||
return new ArrayList<>();
|
||||
|
|
@ -218,16 +365,20 @@ public class CloudianClient {
|
|||
try {
|
||||
final HttpResponse response = get(String.format("/user/list?groupId=%s&userType=all&userStatus=active", groupId));
|
||||
checkResponseOK(response);
|
||||
if (checkEmptyResponse(response)) {
|
||||
return new ArrayList<>();
|
||||
if (noResponseEntity(response)) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "API error");
|
||||
}
|
||||
InputStream iStream = getNonEmptyContentStream(response);
|
||||
if (iStream == null) {
|
||||
return new ArrayList<>(); // empty body => empty list
|
||||
}
|
||||
final ObjectMapper mapper = new ObjectMapper();
|
||||
return Arrays.asList(mapper.readValue(response.getEntity().getContent(), CloudianUser[].class));
|
||||
return Arrays.asList(mapper.readValue(iStream, CloudianUser[].class));
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to list Cloudian users due to:", e);
|
||||
checkResponseTimeOut(e);
|
||||
throwTimeoutOrServerException(e);
|
||||
return new ArrayList<>(); // never reached
|
||||
}
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
public boolean updateUser(final CloudianUser user) {
|
||||
|
|
@ -240,7 +391,7 @@ public class CloudianClient {
|
|||
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to update Cloudian user due to:", e);
|
||||
checkResponseTimeOut(e);
|
||||
throwTimeoutOrServerException(e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
@ -255,11 +406,81 @@ public class CloudianClient {
|
|||
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to remove Cloudian user due to:", e);
|
||||
checkResponseTimeOut(e);
|
||||
throwTimeoutOrServerException(e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new HyperStore Root credential.
|
||||
* @param userId the userId
|
||||
* @param groupId the groupId
|
||||
* @return the new Credential (should never be null)
|
||||
* @throws ServerApiException if the request fails or bad parameters given
|
||||
*/
|
||||
public CloudianCredential createCredential(final String userId, final String groupId) {
|
||||
if (StringUtils.isAnyBlank(userId, groupId)) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "API error. Missing user or group");
|
||||
}
|
||||
logger.debug("Creating new credentials for user id={} group id={} ", userId, groupId);
|
||||
try {
|
||||
String cmd = String.format("/user/credentials?userId=%s&groupId=%s", userId, groupId);
|
||||
final HttpResponse response = put(cmd, null);
|
||||
if (response.getStatusLine().getStatusCode() == HttpStatus.SC_FORBIDDEN) {
|
||||
String msg = String.format("Maximum credentials reached for user id=%s group id=%s. Consult your HyperStore Administrator", userId, groupId);
|
||||
logger.error(msg);
|
||||
throw new ServerApiException(ApiErrorCode.ACCOUNT_RESOURCE_LIMIT_ERROR, msg);
|
||||
}
|
||||
checkResponseOK(response);
|
||||
final ObjectMapper mapper = new ObjectMapper();
|
||||
return mapper.readValue(response.getEntity().getContent(), CloudianCredential.class);
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to create credential due to:", e);
|
||||
throwTimeoutOrServerException(e);
|
||||
return null; // never reached
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of Root credentials for the given user.
|
||||
* @param userId Cloudian userId
|
||||
* @param groupId Cloudian groupId
|
||||
* @return a potentially empty list of Root CloudianCredentials
|
||||
* @throws ServerApiException on non-2xx response or timeout
|
||||
*/
|
||||
public List<CloudianCredential> listCredentials(final String userId, final String groupId) {
|
||||
return listCredentials(userId, groupId, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of credentials for the given user.
|
||||
* @param userId Cloudian userId
|
||||
* @param groupId Cloudian groupId
|
||||
* @param rootOnly true only returns root credentials, false returns IAM credentials also.
|
||||
* @return a potentially empty list of CloudianCredentials
|
||||
* @throws ServerApiException on non-2xx response or timeout
|
||||
*/
|
||||
public List<CloudianCredential> listCredentials(final String userId, final String groupId, final boolean rootOnly) {
|
||||
if (StringUtils.isAnyBlank(userId, groupId)) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
logger.debug("Listing credentials for Cloudian user id={} group id={}", userId, groupId);
|
||||
try {
|
||||
String cmd = String.format("/user/credentials/list?userId=%s&groupId=%s&isRootAccountOnly=%b", userId, groupId, rootOnly);
|
||||
final HttpResponse response = get(cmd);
|
||||
checkResponseOK(response);
|
||||
if (noResponseEntity(response)) {
|
||||
return new ArrayList<>(); // No credentials to be listed case -> 204
|
||||
}
|
||||
final ObjectMapper mapper = new ObjectMapper();
|
||||
return Arrays.asList(mapper.readValue(response.getEntity().getContent(), CloudianCredential[].class));
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to list credentials due to:", e);
|
||||
throwTimeoutOrServerException(e);
|
||||
return new ArrayList<>(); // never reached
|
||||
}
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////
|
||||
//////////////// Public APIs: Group /////////////////////
|
||||
/////////////////////////////////////////////////////////
|
||||
|
|
@ -274,11 +495,17 @@ public class CloudianClient {
|
|||
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to add Cloudian group due to:", e);
|
||||
checkResponseTimeOut(e);
|
||||
throwTimeoutOrServerException(e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the HyperStore group
|
||||
* @param groupId the group to return
|
||||
* @return the group if it exists or null if it does not exist.
|
||||
* @throws ServerApiException on error
|
||||
*/
|
||||
public CloudianGroup listGroup(final String groupId) {
|
||||
if (StringUtils.isEmpty(groupId)) {
|
||||
return null;
|
||||
|
|
@ -287,16 +514,16 @@ public class CloudianClient {
|
|||
try {
|
||||
final HttpResponse response = get(String.format("/group?groupId=%s", groupId));
|
||||
checkResponseOK(response);
|
||||
if (checkEmptyResponse(response)) {
|
||||
return null;
|
||||
if (noResponseEntity(response)) {
|
||||
return null; // Group Not Found
|
||||
}
|
||||
final ObjectMapper mapper = new ObjectMapper();
|
||||
return mapper.readValue(response.getEntity().getContent(), CloudianGroup.class);
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to list Cloudian group due to:", e);
|
||||
checkResponseTimeOut(e);
|
||||
throwTimeoutOrServerException(e);
|
||||
return null; // never reached
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public List<CloudianGroup> listGroups() {
|
||||
|
|
@ -304,16 +531,20 @@ public class CloudianClient {
|
|||
try {
|
||||
final HttpResponse response = get("/group/list");
|
||||
checkResponseOK(response);
|
||||
if (checkEmptyResponse(response)) {
|
||||
return new ArrayList<>();
|
||||
if (noResponseEntity(response)) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "API error");
|
||||
}
|
||||
InputStream iStream = getNonEmptyContentStream(response);
|
||||
if (iStream == null) {
|
||||
return new ArrayList<>(); // Empty body => empty list
|
||||
}
|
||||
final ObjectMapper mapper = new ObjectMapper();
|
||||
return Arrays.asList(mapper.readValue(response.getEntity().getContent(), CloudianGroup[].class));
|
||||
return Arrays.asList(mapper.readValue(iStream, CloudianGroup[].class));
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to list Cloudian groups due to:", e);
|
||||
checkResponseTimeOut(e);
|
||||
throwTimeoutOrServerException(e);
|
||||
return new ArrayList<>(); // never reached
|
||||
}
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
public boolean updateGroup(final CloudianGroup group) {
|
||||
|
|
@ -326,7 +557,7 @@ public class CloudianClient {
|
|||
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to update group due to:", e);
|
||||
checkResponseTimeOut(e);
|
||||
throwTimeoutOrServerException(e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
@ -341,7 +572,7 @@ public class CloudianClient {
|
|||
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to remove group due to:", e);
|
||||
checkResponseTimeOut(e);
|
||||
throwTimeoutOrServerException(e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,88 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.cloudian.client;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import java.util.Date;
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
public class CloudianCredential {
|
||||
|
||||
String accessKey;
|
||||
Boolean active;
|
||||
Date createDate;
|
||||
Date expireDate;
|
||||
String secretKey;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("Cloudian Credential [ak=%s, sk=***, createDate=%s, expireDate=%s, active=%s]", accessKey, createDate, expireDate, active);
|
||||
}
|
||||
|
||||
public String getAccessKey() {
|
||||
return accessKey;
|
||||
}
|
||||
|
||||
public void setAccessKey(String accessKey) {
|
||||
this.accessKey = accessKey;
|
||||
}
|
||||
|
||||
public String getSecretKey() {
|
||||
return secretKey;
|
||||
}
|
||||
|
||||
public void setSecretKey(String secretKey) {
|
||||
this.secretKey = secretKey;
|
||||
}
|
||||
|
||||
public Date getCreateDate() {
|
||||
return createDate;
|
||||
}
|
||||
|
||||
public void setCreateDate(Date createDate) {
|
||||
this.createDate = createDate;
|
||||
}
|
||||
|
||||
public Date getExpireDate() {
|
||||
return expireDate;
|
||||
}
|
||||
|
||||
public void setExpireDate(Date expireDate) {
|
||||
this.expireDate = expireDate;
|
||||
}
|
||||
|
||||
public Boolean getActive() {
|
||||
return active;
|
||||
}
|
||||
|
||||
public void setActive(Boolean active) {
|
||||
this.active = active;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if this credential is newer than the other credential.
|
||||
* @param other the credential to compare against
|
||||
* @return true only if it is known to be newer, false if anything is null.
|
||||
*/
|
||||
public boolean isNewerThan(CloudianCredential other) {
|
||||
if (this.createDate == null || other == null || other.createDate == null) {
|
||||
return false;
|
||||
}
|
||||
return this.createDate.after(other.createDate);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.cloudian.client;
|
||||
|
||||
import java.util.List;
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
public class CloudianUserBucketUsage {
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
public static class CloudianBucketUsage {
|
||||
private String bucketName;
|
||||
private Long byteCount;
|
||||
private Long objectCount;
|
||||
private String policyName;
|
||||
|
||||
/**
|
||||
* Get the name of the bucket the usage stats belong to
|
||||
* @return the bucket name
|
||||
*/
|
||||
public String getBucketName() {
|
||||
return bucketName;
|
||||
}
|
||||
public void setBucketName(String bucketName) {
|
||||
this.bucketName = bucketName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of bytes used by this bucket.
|
||||
*
|
||||
* Note: This size includes bucket and object metadata.
|
||||
*
|
||||
* @return bytes used by the bucket
|
||||
*/
|
||||
public Long getByteCount() {
|
||||
return byteCount;
|
||||
}
|
||||
public void setByteCount(Long byteCount) {
|
||||
this.byteCount = byteCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of objects stored in the bucket.
|
||||
*
|
||||
* @return object count in the bucket
|
||||
*/
|
||||
public Long getObjectCount() {
|
||||
return objectCount;
|
||||
}
|
||||
public void setObjectCount(Long objectCount) {
|
||||
this.objectCount = objectCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the storage policy this bucket belongs to.
|
||||
* @return the name of the HyperStore storage policy.
|
||||
*/
|
||||
public String getPolicyName() {
|
||||
return policyName;
|
||||
}
|
||||
public void setPolicyName(String policyName) {
|
||||
this.policyName = policyName;
|
||||
}
|
||||
}
|
||||
|
||||
private String userId;
|
||||
private List<CloudianBucketUsage> buckets;
|
||||
|
||||
/**
|
||||
* Get the HyperStore userId this usage info belongs to
|
||||
* @return the HyperStore userId
|
||||
*/
|
||||
public String getUserId() {
|
||||
return userId;
|
||||
}
|
||||
public void setUserId(String userId) {
|
||||
this.userId = userId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the list of bucket usage objects belonging to this HyperStore userId.
|
||||
* @return list of bucket usage objects.
|
||||
*/
|
||||
public List<CloudianBucketUsage> getBuckets() {
|
||||
return buckets;
|
||||
}
|
||||
public void setBuckets(List<CloudianBucketUsage> buckets) {
|
||||
this.buckets = buckets;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,416 +0,0 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.cloudian;
|
||||
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.aResponse;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.containing;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.delete;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.deleteRequestedFor;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.equalTo;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.get;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.post;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.put;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.putRequestedFor;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.urlPathMatching;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.verify;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianClient;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianGroup;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianUser;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.github.tomakehurst.wiremock.client.BasicCredentials;
|
||||
import com.github.tomakehurst.wiremock.junit.WireMockRule;
|
||||
|
||||
public class CloudianClientTest {
|
||||
private final int port = 14333;
|
||||
private final int timeout = 2;
|
||||
private final String adminUsername = "admin";
|
||||
private final String adminPassword = "public";
|
||||
private CloudianClient client;
|
||||
|
||||
@Rule
|
||||
public WireMockRule wireMockRule = new WireMockRule(port);
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
client = new CloudianClient("localhost", port, "http", adminUsername, adminPassword, false, timeout);
|
||||
}
|
||||
|
||||
private CloudianUser getTestUser() {
|
||||
final CloudianUser user = new CloudianUser();
|
||||
user.setActive(true);
|
||||
user.setUserId("someUserId");
|
||||
user.setGroupId("someGroupId");
|
||||
user.setUserType(CloudianUser.USER);
|
||||
user.setFullName("John Doe");
|
||||
return user;
|
||||
}
|
||||
|
||||
private CloudianGroup getTestGroup() {
|
||||
final CloudianGroup group = new CloudianGroup();
|
||||
group.setActive(true);
|
||||
group.setGroupId("someGroupId");
|
||||
group.setGroupName("someGroupName");
|
||||
return group;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
//////////////// General API tests /////////////////////
|
||||
////////////////////////////////////////////////////////
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
public void testRequestTimeout() {
|
||||
wireMockRule.stubFor(get(urlEqualTo("/group/list"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(200)
|
||||
.withFixedDelay(2 * timeout * 1000)
|
||||
.withBody("")));
|
||||
client.listGroups();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBasicAuth() {
|
||||
wireMockRule.stubFor(get(urlEqualTo("/group/list"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("[]")));
|
||||
client.listGroups();
|
||||
verify(getRequestedFor(urlEqualTo("/group/list"))
|
||||
.withBasicAuth(new BasicCredentials(adminUsername, adminPassword)));
|
||||
}
|
||||
|
||||
@Test(expected = ServerApiException.class)
|
||||
public void testBasicAuthFailure() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(401)
|
||||
.withBody("")));
|
||||
client.listUser("someUserId", "somegGroupId");
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//////////////// User API tests /////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Test
|
||||
public void addUserAccount() {
|
||||
wireMockRule.stubFor(put(urlEqualTo("/user"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("")));
|
||||
|
||||
final CloudianUser user = getTestUser();
|
||||
boolean result = client.addUser(user);
|
||||
Assert.assertTrue(result);
|
||||
verify(putRequestedFor(urlEqualTo("/user"))
|
||||
.withRequestBody(containing("userId\":\"" + user.getUserId()))
|
||||
.withHeader("content-type", equalTo("application/json")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addUserAccountFail() {
|
||||
wireMockRule.stubFor(put(urlEqualTo("/user"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
|
||||
final CloudianUser user = getTestUser();
|
||||
boolean result = client.addUser(user);
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listUserAccount() {
|
||||
final String userId = "someUser";
|
||||
final String groupId = "someGroup";
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user?.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("{\"userId\":\"someUser\",\"userType\":\"User\",\"fullName\":\"John Doe (jdoe)\",\"emailAddr\":\"j@doe.com\",\"address1\":null,\"address2\":null,\"city\":null,\"state\":null,\"zip\":null,\"country\":null,\"phone\":null,\"groupId\":\"someGroup\",\"website\":null,\"active\":\"true\",\"canonicalUserId\":\"b3940886468689d375ebf8747b151c37\",\"ldapEnabled\":false}")));
|
||||
|
||||
final CloudianUser user = client.listUser(userId, groupId);
|
||||
Assert.assertEquals(user.getActive(), true);
|
||||
Assert.assertEquals(user.getUserId(), userId);
|
||||
Assert.assertEquals(user.getGroupId(), groupId);
|
||||
Assert.assertEquals(user.getUserType(), "User");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listUserAccountFail() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user?.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("")));
|
||||
|
||||
final CloudianUser user = client.listUser("abc", "xyz");
|
||||
Assert.assertNull(user);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listUserAccounts() {
|
||||
final String groupId = "someGroup";
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user/list?.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("[{\"userId\":\"someUser\",\"userType\":\"User\",\"fullName\":\"John Doe (jdoe)\",\"emailAddr\":\"j@doe.com\",\"address1\":null,\"address2\":null,\"city\":null,\"state\":null,\"zip\":null,\"country\":null,\"phone\":null,\"groupId\":\"someGroup\",\"website\":null,\"active\":\"true\",\"canonicalUserId\":\"b3940886468689d375ebf8747b151c37\",\"ldapEnabled\":false}]")));
|
||||
|
||||
final List<CloudianUser> users = client.listUsers(groupId);
|
||||
Assert.assertEquals(users.size(), 1);
|
||||
Assert.assertEquals(users.get(0).getActive(), true);
|
||||
Assert.assertEquals(users.get(0).getGroupId(), groupId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEmptyListUsersResponse() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user/list"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(204)
|
||||
.withBody("")));
|
||||
Assert.assertTrue(client.listUsers("someGroup").size() == 0);
|
||||
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(204)
|
||||
.withBody("")));
|
||||
Assert.assertNull(client.listUser("someUserId", "someGroupId"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listUserAccountsFail() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user/list?.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("")));
|
||||
|
||||
final List<CloudianUser> users = client.listUsers("xyz");
|
||||
Assert.assertEquals(users.size(), 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateUserAccount() {
|
||||
wireMockRule.stubFor(post(urlEqualTo("/user"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("")));
|
||||
|
||||
final CloudianUser user = getTestUser();
|
||||
boolean result = client.updateUser(user);
|
||||
Assert.assertTrue(result);
|
||||
verify(postRequestedFor(urlEqualTo("/user"))
|
||||
.withRequestBody(containing("userId\":\"" + user.getUserId()))
|
||||
.withHeader("content-type", equalTo("application/json")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateUserAccountFail() {
|
||||
wireMockRule.stubFor(post(urlEqualTo("/user"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
|
||||
boolean result = client.updateUser(getTestUser());
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void removeUserAccount() {
|
||||
wireMockRule.stubFor(delete(urlPathMatching("/user.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("")));
|
||||
final CloudianUser user = getTestUser();
|
||||
boolean result = client.removeUser(user.getUserId(), user.getGroupId());
|
||||
Assert.assertTrue(result);
|
||||
verify(deleteRequestedFor(urlPathMatching("/user.*"))
|
||||
.withQueryParam("userId", equalTo(user.getUserId())));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void removeUserAccountFail() {
|
||||
wireMockRule.stubFor(delete(urlPathMatching("/user.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
final CloudianUser user = getTestUser();
|
||||
boolean result = client.removeUser(user.getUserId(), user.getGroupId());
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
//////////////// Group API tests /////////////////////
|
||||
//////////////////////////////////////////////////////
|
||||
|
||||
@Test
|
||||
public void addGroup() {
|
||||
wireMockRule.stubFor(put(urlEqualTo("/group"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("")));
|
||||
|
||||
final CloudianGroup group = getTestGroup();
|
||||
boolean result = client.addGroup(group);
|
||||
Assert.assertTrue(result);
|
||||
verify(putRequestedFor(urlEqualTo("/group"))
|
||||
.withRequestBody(containing("groupId\":\"someGroupId"))
|
||||
.withHeader("content-type", equalTo("application/json")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addGroupFail() throws Exception {
|
||||
wireMockRule.stubFor(put(urlEqualTo("/group"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
|
||||
final CloudianGroup group = getTestGroup();
|
||||
boolean result = client.addGroup(group);
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listGroup() {
|
||||
final String groupId = "someGroup";
|
||||
wireMockRule.stubFor(get(urlPathMatching("/group.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("{\"groupId\":\"someGroup\",\"groupName\":\"/someDomain\",\"ldapGroup\":null,\"active\":\"true\",\"ldapEnabled\":false,\"ldapServerURL\":null,\"ldapUserDNTemplate\":null,\"ldapSearch\":null,\"ldapSearchUserBase\":null,\"ldapMatchAttribute\":null}")));
|
||||
|
||||
final CloudianGroup group = client.listGroup(groupId);
|
||||
Assert.assertEquals(group.getActive(), true);
|
||||
Assert.assertEquals(group.getGroupId(), groupId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listGroupFail() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/group.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("")));
|
||||
|
||||
final CloudianGroup group = client.listGroup("xyz");
|
||||
Assert.assertNull(group);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listGroups() {
|
||||
final String groupId = "someGroup";
|
||||
wireMockRule.stubFor(get(urlEqualTo("/group/list"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("[{\"groupId\":\"someGroup\",\"groupName\":\"/someDomain\",\"ldapGroup\":null,\"active\":\"true\",\"ldapEnabled\":false,\"ldapServerURL\":null,\"ldapUserDNTemplate\":null,\"ldapSearch\":null,\"ldapSearchUserBase\":null,\"ldapMatchAttribute\":null}]")));
|
||||
|
||||
final List<CloudianGroup> groups = client.listGroups();
|
||||
Assert.assertEquals(groups.size(), 1);
|
||||
Assert.assertEquals(groups.get(0).getActive(), true);
|
||||
Assert.assertEquals(groups.get(0).getGroupId(), groupId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listGroupsFail() {
|
||||
wireMockRule.stubFor(get(urlEqualTo("/group/list"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("")));
|
||||
|
||||
final List<CloudianGroup> groups = client.listGroups();
|
||||
Assert.assertEquals(groups.size(), 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEmptyListGroupResponse() {
|
||||
wireMockRule.stubFor(get(urlEqualTo("/group/list"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(204)
|
||||
.withBody("")));
|
||||
|
||||
Assert.assertTrue(client.listGroups().size() == 0);
|
||||
|
||||
|
||||
wireMockRule.stubFor(get(urlPathMatching("/group"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(204)
|
||||
.withBody("")));
|
||||
Assert.assertNull(client.listGroup("someGroup"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateGroup() {
|
||||
wireMockRule.stubFor(post(urlEqualTo("/group"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("")));
|
||||
|
||||
final CloudianGroup group = getTestGroup();
|
||||
boolean result = client.updateGroup(group);
|
||||
Assert.assertTrue(result);
|
||||
verify(postRequestedFor(urlEqualTo("/group"))
|
||||
.withRequestBody(containing("groupId\":\"" + group.getGroupId()))
|
||||
.withHeader("content-type", equalTo("application/json")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateGroupFail() {
|
||||
wireMockRule.stubFor(post(urlEqualTo("/group"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
|
||||
boolean result = client.updateGroup(getTestGroup());
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void removeGroup() {
|
||||
wireMockRule.stubFor(delete(urlPathMatching("/group.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("")));
|
||||
final CloudianGroup group = getTestGroup();
|
||||
boolean result = client.removeGroup(group.getGroupId());
|
||||
Assert.assertTrue(result);
|
||||
verify(deleteRequestedFor(urlPathMatching("/group.*"))
|
||||
.withQueryParam("groupId", equalTo(group.getGroupId())));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void removeGroupFail() {
|
||||
wireMockRule.stubFor(delete(urlPathMatching("/group.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
final CloudianGroup group = getTestGroup();
|
||||
boolean result = client.removeGroup(group.getGroupId());
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
}
|
||||
|
|
@ -37,7 +37,7 @@ public class CloudianUtilsTest {
|
|||
|
||||
// test expectations
|
||||
final String expPath = "/Cloudian/ssosecurelogin.htm";
|
||||
HashMap<String, String> expected = new HashMap();
|
||||
HashMap<String, String> expected = new HashMap<String, String>();
|
||||
expected.put("user", user);
|
||||
expected.put("group", group);
|
||||
expected.put("timestamp", null); // null value will not be checked by this test
|
||||
|
|
|
|||
|
|
@ -0,0 +1,790 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.cloudian.client;
|
||||
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.aResponse;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.containing;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.delete;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.deleteRequestedFor;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.equalTo;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.get;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.post;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.put;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.putRequestedFor;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.urlPathMatching;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.verify;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpResponse;
|
||||
|
||||
import org.apache.cloudstack.api.ApiErrorCode;
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianUserBucketUsage.CloudianBucketUsage;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.github.tomakehurst.wiremock.client.BasicCredentials;
|
||||
import com.github.tomakehurst.wiremock.junit.WireMockRule;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class CloudianClientTest {
|
||||
private final int port = 14333;
|
||||
private final int timeout = 2;
|
||||
private final String adminUsername = "admin";
|
||||
private final String adminPassword = "public";
|
||||
private CloudianClient client;
|
||||
|
||||
@Rule
|
||||
public WireMockRule wireMockRule = new WireMockRule(port);
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
client = new CloudianClient("localhost", port, "http", adminUsername, adminPassword, false, timeout);
|
||||
}
|
||||
|
||||
private CloudianUser getTestUser() {
|
||||
final CloudianUser user = new CloudianUser();
|
||||
user.setActive(true);
|
||||
user.setUserId("someUserId");
|
||||
user.setGroupId("someGroupId");
|
||||
user.setUserType(CloudianUser.USER);
|
||||
user.setFullName("John Doe");
|
||||
return user;
|
||||
}
|
||||
|
||||
private CloudianGroup getTestGroup() {
|
||||
final CloudianGroup group = new CloudianGroup();
|
||||
group.setActive(true);
|
||||
group.setGroupId("someGroupId");
|
||||
group.setGroupName("someGroupName");
|
||||
return group;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
//////////////// General API tests /////////////////////
|
||||
////////////////////////////////////////////////////////
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
public void testRequestTimeout() {
|
||||
wireMockRule.stubFor(get(urlEqualTo("/group/list"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(200)
|
||||
.withFixedDelay(2 * timeout * 1000)
|
||||
.withBody("")));
|
||||
client.listGroups();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBasicAuth() {
|
||||
wireMockRule.stubFor(get(urlEqualTo("/group/list"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("[]")));
|
||||
client.listGroups();
|
||||
verify(getRequestedFor(urlEqualTo("/group/list"))
|
||||
.withBasicAuth(new BasicCredentials(adminUsername, adminPassword)));
|
||||
}
|
||||
|
||||
@Test(expected = ServerApiException.class)
|
||||
public void testBasicAuthFailure() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(401)
|
||||
.withBody("")));
|
||||
client.listUser("someUserId", "somegGroupId");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getNonEmptyContentStreamEmpty() {
|
||||
InputStream emptyStream = new ByteArrayInputStream(new byte[]{});
|
||||
HttpEntity entity = mock(HttpEntity.class);
|
||||
HttpResponse response = mock(HttpResponse.class);
|
||||
when(response.getEntity()).thenReturn(entity);
|
||||
try {
|
||||
when(entity.getContent()).thenReturn(emptyStream);
|
||||
Assert.assertNull(client.getNonEmptyContentStream(response));
|
||||
} catch (IOException e) {
|
||||
Assert.fail("Should not be any exception here");
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getNonEmptyContentStreamWithContent() {
|
||||
InputStream nonEmptyStream = new ByteArrayInputStream(new byte[]{9, 8});
|
||||
HttpEntity entity = mock(HttpEntity.class);
|
||||
HttpResponse response = mock(HttpResponse.class);
|
||||
when(response.getEntity()).thenReturn(entity);
|
||||
try {
|
||||
when(entity.getContent()).thenReturn(nonEmptyStream);
|
||||
InputStream is = client.getNonEmptyContentStream(response);
|
||||
Assert.assertNotNull(is);
|
||||
Assert.assertEquals(9, is.read());
|
||||
Assert.assertEquals(8, is.read());
|
||||
Assert.assertEquals(-1, is.read());
|
||||
} catch (IOException e) {
|
||||
Assert.fail("Should not be any exception here");
|
||||
}
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//////////////// System API tests ///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Test
|
||||
public void getServerVersion() {
|
||||
final String expect = "8.1 Compiled: 2023-11-11 16:30";
|
||||
wireMockRule.stubFor(get(urlEqualTo("/system/version"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody(expect)));
|
||||
|
||||
String version = client.getServerVersion();
|
||||
Assert.assertEquals(expect, version);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getUserBucketUsagesBadUsageBlankGroup() {
|
||||
ServerApiException thrown = Assert.assertThrows(ServerApiException.class, () -> client.getUserBucketUsages(null, null, null));
|
||||
Assert.assertNotNull(thrown);
|
||||
Assert.assertEquals(ApiErrorCode.PARAM_ERROR, thrown.getErrorCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getUserBucketUsagesBadUsageBlankUserWithBucket() {
|
||||
ServerApiException thrown = Assert.assertThrows(ServerApiException.class, () -> client.getUserBucketUsages("group", "", "bucket"));
|
||||
Assert.assertNotNull(thrown);
|
||||
Assert.assertEquals(ApiErrorCode.PARAM_ERROR, thrown.getErrorCode());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getUserBucketUsagesEmptyGroup() {
|
||||
wireMockRule.stubFor(get(urlEqualTo("/system/bucketusage?groupId=mygroup"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(200)
|
||||
.withBody("[]")));
|
||||
List<CloudianUserBucketUsage> bucketUsages = client.getUserBucketUsages("mygroup", null, null);
|
||||
Assert.assertEquals(0, bucketUsages.size());
|
||||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
public void getUserBucketUsagesNoSuchGroup() {
|
||||
// no group, no user, no bucket etc are all 400
|
||||
wireMockRule.stubFor(get(urlEqualTo("/system/bucketusage?groupId=mygroup"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
client.getUserBucketUsages("mygroup", null, null);
|
||||
Assert.fail("The request should throw an exception");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getUserBucketUsagesUserNoBuckets() {
|
||||
wireMockRule.stubFor(get(urlEqualTo("/system/bucketusage?groupId=mygroup&userId=u1"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(200)
|
||||
.withBody("[{\"userId\": \"u1\", \"buckets\": []}]")));
|
||||
List<CloudianUserBucketUsage> bucketUsages = client.getUserBucketUsages("mygroup", "u1", null);
|
||||
Assert.assertEquals(1, bucketUsages.size());
|
||||
CloudianUserBucketUsage u1 = bucketUsages.get(0);
|
||||
Assert.assertEquals("u1", u1.getUserId());
|
||||
Assert.assertEquals(0, u1.getBuckets().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getUserBucketUsagesForBucket() {
|
||||
wireMockRule.stubFor(get(urlEqualTo("/system/bucketusage?groupId=mygroup&userId=u1&bucket=b1"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(200)
|
||||
.withBody("[{\"userId\": \"u1\", \"buckets\": [{\"bucketName\":\"b1\",\"objectCount\":1,\"byteCount\":5,\"policyName\":\"p1\"}]}]")));
|
||||
List<CloudianUserBucketUsage> bucketUsages = client.getUserBucketUsages("mygroup", "u1", "b1");
|
||||
Assert.assertEquals(1, bucketUsages.size());
|
||||
CloudianUserBucketUsage u1 = bucketUsages.get(0);
|
||||
Assert.assertEquals("u1", u1.getUserId());
|
||||
Assert.assertEquals(1, u1.getBuckets().size());
|
||||
CloudianBucketUsage cbu = u1.getBuckets().get(0);
|
||||
Assert.assertEquals("b1", cbu.getBucketName());
|
||||
Assert.assertEquals(5L, cbu.getByteCount().longValue());
|
||||
Assert.assertEquals(1L, cbu.getObjectCount().longValue());
|
||||
Assert.assertEquals("p1", cbu.getPolicyName());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getUserBucketUsagesOneUserTwoBuckets() {
|
||||
CloudianUserBucketUsage expect_u1 = new CloudianUserBucketUsage();
|
||||
expect_u1.setUserId("u1");
|
||||
CloudianBucketUsage b1 = new CloudianBucketUsage();
|
||||
b1.setBucketName("b1");
|
||||
b1.setByteCount(123L);
|
||||
b1.setObjectCount(456L);
|
||||
b1.setPolicyName("pname");
|
||||
CloudianBucketUsage b2 = new CloudianBucketUsage();
|
||||
b2.setBucketName("b2");
|
||||
b2.setByteCount(789L);
|
||||
b2.setObjectCount(0L);
|
||||
b2.setPolicyName("pname2");
|
||||
List<CloudianBucketUsage> buckets = new ArrayList<CloudianBucketUsage>();
|
||||
buckets.add(b1);
|
||||
buckets.add(b2);
|
||||
expect_u1.setBuckets(buckets);
|
||||
int expect_size = buckets.size();
|
||||
|
||||
int bucket_count = 0;
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("[{\"userId\": \"u1\", \"buckets\": [");
|
||||
for (CloudianBucketUsage b : buckets) {
|
||||
sb.append("{\"bucketName\": \"");
|
||||
sb.append(b.getBucketName());
|
||||
sb.append("\", \"byteCount\": ");
|
||||
sb.append(b.getByteCount());
|
||||
sb.append(", \"objectCount\": ");
|
||||
sb.append(b.getObjectCount());
|
||||
sb.append(", \"policyName\": \"");
|
||||
sb.append(b.getPolicyName());
|
||||
sb.append("\"}");
|
||||
if (++bucket_count < expect_size) {
|
||||
sb.append(",");
|
||||
}
|
||||
}
|
||||
sb.append("]}]");
|
||||
wireMockRule.stubFor(get(urlEqualTo("/system/bucketusage?groupId=mygroup&userId=u1"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(200)
|
||||
.withBody(sb.toString())));
|
||||
List<CloudianUserBucketUsage> bucketUsages = client.getUserBucketUsages("mygroup", "u1", null);
|
||||
Assert.assertEquals(1, bucketUsages.size());
|
||||
CloudianUserBucketUsage u1 = bucketUsages.get(0);
|
||||
Assert.assertEquals("u1", u1.getUserId());
|
||||
Assert.assertEquals(expect_size, u1.getBuckets().size());
|
||||
for (int i = 0; i < expect_size; i++) {
|
||||
CloudianBucketUsage actual = u1.getBuckets().get(i);
|
||||
CloudianBucketUsage expected = buckets.get(i);
|
||||
Assert.assertEquals(expected.getBucketName(), actual.getBucketName());
|
||||
Assert.assertEquals(expected.getByteCount(), actual.getByteCount());
|
||||
Assert.assertEquals(expected.getObjectCount(), actual.getObjectCount());
|
||||
Assert.assertEquals(expected.getPolicyName(), actual.getPolicyName());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getUserBucketUsagesTwoUsers() {
|
||||
CloudianUserBucketUsage expect_u1 = new CloudianUserBucketUsage();
|
||||
expect_u1.setUserId("u1");
|
||||
CloudianBucketUsage b1 = new CloudianBucketUsage();
|
||||
b1.setBucketName("b1");
|
||||
b1.setByteCount(123L);
|
||||
b1.setObjectCount(456L);
|
||||
b1.setPolicyName("pname");
|
||||
CloudianBucketUsage b2 = new CloudianBucketUsage();
|
||||
b2.setBucketName("b2");
|
||||
b2.setByteCount(789L);
|
||||
b2.setObjectCount(0L);
|
||||
b2.setPolicyName("pname2");
|
||||
List<CloudianBucketUsage> buckets = new ArrayList<CloudianBucketUsage>();
|
||||
buckets.add(b1);
|
||||
buckets.add(b2);
|
||||
expect_u1.setBuckets(buckets);
|
||||
int expect_size = buckets.size();
|
||||
|
||||
int bucket_count = 0;
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("[{\"userId\": \"u1\", \"buckets\": [");
|
||||
for (CloudianBucketUsage b : buckets) {
|
||||
sb.append("{\"bucketName\": \"");
|
||||
sb.append(b.getBucketName());
|
||||
sb.append("\", \"byteCount\": ");
|
||||
sb.append(b.getByteCount());
|
||||
sb.append(", \"objectCount\": ");
|
||||
sb.append(b.getObjectCount());
|
||||
sb.append(", \"policyName\": \"");
|
||||
sb.append(b.getPolicyName());
|
||||
sb.append("\"}");
|
||||
if (++bucket_count < expect_size) {
|
||||
sb.append(",");
|
||||
}
|
||||
}
|
||||
sb.append("]}, {\"userId\": \"u2\", \"buckets\": []}]");
|
||||
wireMockRule.stubFor(get(urlEqualTo("/system/bucketusage?groupId=mygroup"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(200)
|
||||
.withBody(sb.toString())));
|
||||
List<CloudianUserBucketUsage> bucketUsages = client.getUserBucketUsages("mygroup", null, null);
|
||||
Assert.assertEquals(2, bucketUsages.size());
|
||||
CloudianUserBucketUsage u1 = bucketUsages.get(0);
|
||||
Assert.assertEquals("u1", u1.getUserId());
|
||||
Assert.assertEquals(expect_size, u1.getBuckets().size());
|
||||
for (int i = 0; i < expect_size; i++) {
|
||||
CloudianBucketUsage actual = u1.getBuckets().get(i);
|
||||
CloudianBucketUsage expected = buckets.get(i);
|
||||
Assert.assertEquals(expected.getBucketName(), actual.getBucketName());
|
||||
Assert.assertEquals(expected.getByteCount(), actual.getByteCount());
|
||||
Assert.assertEquals(expected.getObjectCount(), actual.getObjectCount());
|
||||
Assert.assertEquals(expected.getPolicyName(), actual.getPolicyName());
|
||||
}
|
||||
// 2nd user has 0 buckets
|
||||
CloudianUserBucketUsage u2 = bucketUsages.get(1);
|
||||
Assert.assertEquals("u2", u2.getUserId());
|
||||
Assert.assertEquals(0, u2.getBuckets().size());
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//////////////// User API tests /////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Test
|
||||
public void addUserAccount() {
|
||||
wireMockRule.stubFor(put(urlEqualTo("/user"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("")));
|
||||
|
||||
final CloudianUser user = getTestUser();
|
||||
boolean result = client.addUser(user);
|
||||
Assert.assertTrue(result);
|
||||
verify(putRequestedFor(urlEqualTo("/user"))
|
||||
.withRequestBody(containing("userId\":\"" + user.getUserId()))
|
||||
.withHeader("content-type", equalTo("application/json")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addUserAccountFail() {
|
||||
wireMockRule.stubFor(put(urlEqualTo("/user"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
|
||||
final CloudianUser user = getTestUser();
|
||||
boolean result = client.addUser(user);
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listUserAccount() {
|
||||
final String userId = "someUser";
|
||||
final String groupId = "someGroup";
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user?.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("{\"userId\":\"someUser\",\"userType\":\"User\",\"fullName\":\"John Doe (jdoe)\",\"emailAddr\":\"j@doe.com\",\"address1\":null,\"address2\":null,\"city\":null,\"state\":null,\"zip\":null,\"country\":null,\"phone\":null,\"groupId\":\"someGroup\",\"website\":null,\"active\":\"true\",\"canonicalUserId\":\"b3940886468689d375ebf8747b151c37\",\"ldapEnabled\":false}")));
|
||||
|
||||
final CloudianUser user = client.listUser(userId, groupId);
|
||||
Assert.assertEquals(user.getActive(), true);
|
||||
Assert.assertEquals(user.getUserId(), userId);
|
||||
Assert.assertEquals(user.getGroupId(), groupId);
|
||||
Assert.assertEquals(user.getUserType(), "User");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listUserAccountNotFound() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user?.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(204) // 204 not found
|
||||
.withBody("")));
|
||||
|
||||
final CloudianUser user = client.listUser("abc", "xyz");
|
||||
Assert.assertNull(user);
|
||||
}
|
||||
|
||||
@Test(expected = ServerApiException.class)
|
||||
public void listUserAccountFail() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user?.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("")));
|
||||
|
||||
client.listUser("abc", "xyz");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listUserAccounts() {
|
||||
final String groupId = "someGroup";
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user/list?.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("[{\"userId\":\"someUser\",\"userType\":\"User\",\"fullName\":\"John Doe (jdoe)\",\"emailAddr\":\"j@doe.com\",\"address1\":null,\"address2\":null,\"city\":null,\"state\":null,\"zip\":null,\"country\":null,\"phone\":null,\"groupId\":\"someGroup\",\"website\":null,\"active\":\"true\",\"canonicalUserId\":\"b3940886468689d375ebf8747b151c37\",\"ldapEnabled\":false}]")));
|
||||
|
||||
final List<CloudianUser> users = client.listUsers(groupId);
|
||||
Assert.assertEquals(users.size(), 1);
|
||||
Assert.assertEquals(users.get(0).getActive(), true);
|
||||
Assert.assertEquals(users.get(0).getGroupId(), groupId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listUserAccountsEmptyList() {
|
||||
// empty body with 200 is returned if either:
|
||||
// 1. the group is unknown (ie. there is no not found case)
|
||||
// 2. the group contains no users
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user/list"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("")));
|
||||
Assert.assertEquals(0, client.listUsers("someGroup").size());
|
||||
}
|
||||
|
||||
@Test(expected = ServerApiException.class)
|
||||
public void listUserAccountsFail() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user/list?.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(204) // bad protocol response
|
||||
.withBody("")));
|
||||
|
||||
client.listUsers("xyz");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateUserAccount() {
|
||||
wireMockRule.stubFor(post(urlEqualTo("/user"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("")));
|
||||
|
||||
final CloudianUser user = getTestUser();
|
||||
boolean result = client.updateUser(user);
|
||||
Assert.assertTrue(result);
|
||||
verify(postRequestedFor(urlEqualTo("/user"))
|
||||
.withRequestBody(containing("userId\":\"" + user.getUserId()))
|
||||
.withHeader("content-type", equalTo("application/json")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateUserAccountFail() {
|
||||
wireMockRule.stubFor(post(urlEqualTo("/user"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
|
||||
boolean result = client.updateUser(getTestUser());
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void removeUserAccount() {
|
||||
wireMockRule.stubFor(delete(urlPathMatching("/user.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("")));
|
||||
final CloudianUser user = getTestUser();
|
||||
boolean result = client.removeUser(user.getUserId(), user.getGroupId());
|
||||
Assert.assertTrue(result);
|
||||
verify(deleteRequestedFor(urlPathMatching("/user.*"))
|
||||
.withQueryParam("userId", equalTo(user.getUserId())));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void removeUserAccountFail() {
|
||||
wireMockRule.stubFor(delete(urlPathMatching("/user.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
final CloudianUser user = getTestUser();
|
||||
boolean result = client.removeUser(user.getUserId(), user.getGroupId());
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void createCredential() {
|
||||
final String expected_ak = "28d945de2a2623fc9483";
|
||||
final String expected_sk = "j2OrPGHF69hp3YsZHRHOCWdAQDabppsBtD7kttr9";
|
||||
final long expected_createDate = 1502285593100L;
|
||||
|
||||
final String json = String.format("{\"accessKey\": \"%s\", \"active\": true, \"createDate\": 1502285593100, \"expireDate\": null, \"secretKey\": \"%s\"}", expected_ak, expected_sk);
|
||||
wireMockRule.stubFor(put(urlPathMatching("/user/credentials.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody(json)));
|
||||
|
||||
CloudianCredential credential = client.createCredential("u1", "g1");
|
||||
Assert.assertEquals(expected_ak, credential.getAccessKey());
|
||||
Assert.assertEquals(expected_sk, credential.getSecretKey());
|
||||
Assert.assertEquals(true, credential.getActive());
|
||||
Assert.assertEquals(expected_createDate, credential.getCreateDate().getTime());
|
||||
Assert.assertNull(credential.getExpireDate());
|
||||
}
|
||||
|
||||
@Test(expected = ServerApiException.class)
|
||||
public void createCredentialNoSuchUser() {
|
||||
wireMockRule.stubFor(put(urlPathMatching("/user/credentials.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
client.createCredential("u1", "g1");
|
||||
}
|
||||
|
||||
@Test(expected = ServerApiException.class)
|
||||
public void createCredentialMaxCredentials() {
|
||||
wireMockRule.stubFor(put(urlPathMatching("/user/credentials.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(403)
|
||||
.withBody("")));
|
||||
client.createCredential("u1", "g1");
|
||||
}
|
||||
|
||||
@Test(expected = ServerApiException.class)
|
||||
public void createCredentialBadMissingResponse() {
|
||||
wireMockRule.stubFor(put(urlPathMatching("/user/credentials.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody(""))); // 200 should return a credential
|
||||
client.createCredential("u1", "g1");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listCredentials() {
|
||||
final String expected_ak = "28d945de2a2623fc9483";
|
||||
final String expected_sk = "j2OrPGHF69hp3YsZHRHOCWdAQDabppsBtD7kttr9";
|
||||
|
||||
final String json = String.format("[{\"accessKey\": \"%s\", \"active\": true, \"createDate\": 1502285593100, \"expireDate\": null, \"secretKey\": \"%s\"}]", expected_ak, expected_sk);
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user/credentials/list.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody(json)));
|
||||
|
||||
List<CloudianCredential> credentials = client.listCredentials("u1", "g1");
|
||||
Assert.assertEquals(1, credentials.size());
|
||||
Assert.assertEquals(expected_ak, credentials.get(0).getAccessKey());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listCredentialsMany() {
|
||||
final String expected_ak = "28d945de2a2623fc9483";
|
||||
final String expected_sk = "j2OrPGHF69hp3YsZHRHOCWdAQDabppsBtD7kttr9";
|
||||
final int expected_size = 3;
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("[");
|
||||
for (int i = 0; i < expected_size; i++) {
|
||||
sb.append(String.format("{\"accessKey\": \"%s-%d\", \"active\": true, \"createDate\": 1502285593100, \"expireDate\": null, \"secretKey\": \"%s-%d\"}", expected_ak, i, expected_sk, i));
|
||||
if (i + 1 < expected_size) {
|
||||
sb.append(",");
|
||||
}
|
||||
}
|
||||
sb.append("]");
|
||||
String json = sb.toString();
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user/credentials/list.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody(json)));
|
||||
|
||||
List<CloudianCredential> credentials = client.listCredentials("u1", "g1");
|
||||
Assert.assertEquals(expected_size, credentials.size());
|
||||
Assert.assertEquals(expected_ak + "-2", credentials.get(2).getAccessKey());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listCredentialsEmptyList() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user/credentials/list.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(204) // 204 is empty list for credentials
|
||||
.withBody("")));
|
||||
|
||||
List<CloudianCredential> credentials = client.listCredentials("u1", "g1");
|
||||
Assert.assertEquals(0, credentials.size());
|
||||
}
|
||||
|
||||
@Test(expected = ServerApiException.class)
|
||||
public void listCredentialsNoSuchUser() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user/credentials/list.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400) // No such user case
|
||||
.withBody("")));
|
||||
|
||||
client.listCredentials("u1", "g1");
|
||||
}
|
||||
|
||||
@Test(expected = ServerApiException.class)
|
||||
public void listCredentialsBad200EmptyBody() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/user/credentials/list.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200) // Bad protocol. should be 204 if empty
|
||||
.withBody("")));
|
||||
|
||||
client.listCredentials("u1", "g1");
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
//////////////// Group API tests /////////////////////
|
||||
//////////////////////////////////////////////////////
|
||||
|
||||
@Test
|
||||
public void addGroup() {
|
||||
wireMockRule.stubFor(put(urlEqualTo("/group"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("")));
|
||||
|
||||
final CloudianGroup group = getTestGroup();
|
||||
boolean result = client.addGroup(group);
|
||||
Assert.assertTrue(result);
|
||||
verify(putRequestedFor(urlEqualTo("/group"))
|
||||
.withRequestBody(containing("groupId\":\"someGroupId"))
|
||||
.withHeader("content-type", equalTo("application/json")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addGroupFail() throws Exception {
|
||||
wireMockRule.stubFor(put(urlEqualTo("/group"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
|
||||
final CloudianGroup group = getTestGroup();
|
||||
boolean result = client.addGroup(group);
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listGroup() {
|
||||
final String groupId = "someGroup";
|
||||
wireMockRule.stubFor(get(urlPathMatching("/group.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("{\"groupId\":\"someGroup\",\"groupName\":\"/someDomain\",\"ldapGroup\":null,\"active\":\"true\",\"ldapEnabled\":false,\"ldapServerURL\":null,\"ldapUserDNTemplate\":null,\"ldapSearch\":null,\"ldapSearchUserBase\":null,\"ldapMatchAttribute\":null}")));
|
||||
|
||||
final CloudianGroup group = client.listGroup(groupId);
|
||||
Assert.assertEquals(group.getActive(), true);
|
||||
Assert.assertEquals(group.getGroupId(), groupId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listGroupNotFound() {
|
||||
wireMockRule.stubFor(get(urlPathMatching("/group.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(204) // group not found
|
||||
.withBody("")));
|
||||
Assert.assertNull(client.listGroup("someGroup"));
|
||||
}
|
||||
|
||||
@Test(expected = ServerApiException.class)
|
||||
public void listGroupFail() {
|
||||
// Returning 200 with an empty body is not expected behaviour
|
||||
wireMockRule.stubFor(get(urlPathMatching("/group.*"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("")));
|
||||
|
||||
client.listGroup("xyz");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listGroups() {
|
||||
final String groupId = "someGroup";
|
||||
wireMockRule.stubFor(get(urlEqualTo("/group/list"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("[{\"groupId\":\"someGroup\",\"groupName\":\"/someDomain\",\"ldapGroup\":null,\"active\":\"true\",\"ldapEnabled\":false,\"ldapServerURL\":null,\"ldapUserDNTemplate\":null,\"ldapSearch\":null,\"ldapSearchUserBase\":null,\"ldapMatchAttribute\":null}]")));
|
||||
|
||||
final List<CloudianGroup> groups = client.listGroups();
|
||||
Assert.assertEquals(groups.size(), 1);
|
||||
Assert.assertEquals(groups.get(0).getActive(), true);
|
||||
Assert.assertEquals(groups.get(0).getGroupId(), groupId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listGroupsEmptyList() {
|
||||
wireMockRule.stubFor(get(urlEqualTo("/group/list"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withBody("")));
|
||||
|
||||
final List<CloudianGroup> groups = client.listGroups();
|
||||
Assert.assertEquals(0, groups.size());
|
||||
}
|
||||
|
||||
@Test(expected = ServerApiException.class)
|
||||
public void listGroupsBad204Response() {
|
||||
wireMockRule.stubFor(get(urlEqualTo("/group/list"))
|
||||
.willReturn(aResponse()
|
||||
.withHeader("content-type", "application/json")
|
||||
.withStatus(204) // bad response. should never be 204
|
||||
.withBody("")));
|
||||
client.listGroups();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateGroup() {
|
||||
wireMockRule.stubFor(post(urlEqualTo("/group"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("")));
|
||||
|
||||
final CloudianGroup group = getTestGroup();
|
||||
boolean result = client.updateGroup(group);
|
||||
Assert.assertTrue(result);
|
||||
verify(postRequestedFor(urlEqualTo("/group"))
|
||||
.withRequestBody(containing("groupId\":\"" + group.getGroupId()))
|
||||
.withHeader("content-type", equalTo("application/json")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateGroupFail() {
|
||||
wireMockRule.stubFor(post(urlEqualTo("/group"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
|
||||
boolean result = client.updateGroup(getTestGroup());
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void removeGroup() {
|
||||
wireMockRule.stubFor(delete(urlPathMatching("/group.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody("")));
|
||||
final CloudianGroup group = getTestGroup();
|
||||
boolean result = client.removeGroup(group.getGroupId());
|
||||
Assert.assertTrue(result);
|
||||
verify(deleteRequestedFor(urlPathMatching("/group.*"))
|
||||
.withQueryParam("groupId", equalTo(group.getGroupId())));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void removeGroupFail() {
|
||||
wireMockRule.stubFor(delete(urlPathMatching("/group.*"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withBody("")));
|
||||
final CloudianGroup group = getTestGroup();
|
||||
boolean result = client.removeGroup(group.getGroupId());
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
}
|
||||
|
|
@ -1796,6 +1796,10 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||
}
|
||||
|
||||
List<KubernetesClusterVmMapVO> vmMapList = kubernetesClusterVmMapDao.listByClusterId(kubernetesClusterId);
|
||||
List<VMInstanceVO> vms = vmMapList.stream().map(vmMap -> vmInstanceDao.findById(vmMap.getVmId())).collect(Collectors.toList());
|
||||
if (checkIfVmsAssociatedWithBackupOffering(vms)) {
|
||||
throw new CloudRuntimeException("Unable to delete Kubernetes cluster, as node(s) are associated to a backup offering");
|
||||
}
|
||||
for (KubernetesClusterVmMapVO vmMap : vmMapList) {
|
||||
try {
|
||||
userVmService.destroyVm(vmMap.getVmId(), expunge);
|
||||
|
|
@ -1818,6 +1822,15 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||
}
|
||||
}
|
||||
|
||||
public static boolean checkIfVmsAssociatedWithBackupOffering(List<VMInstanceVO> vms) {
|
||||
for(VMInstanceVO vm : vms) {
|
||||
if (Objects.nonNull(vm.getBackupOfferingId())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ListResponse<KubernetesClusterResponse> listKubernetesClusters(ListKubernetesClustersCmd cmd) {
|
||||
if (!KubernetesServiceEnabled.value()) {
|
||||
|
|
|
|||
|
|
@ -263,6 +263,10 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod
|
|||
init();
|
||||
validateClusterSate();
|
||||
this.clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
|
||||
List<VMInstanceVO> vms = this.clusterVMs.stream().map(vmMap -> vmInstanceDao.findById(vmMap.getVmId())).collect(Collectors.toList());
|
||||
if (KubernetesClusterManagerImpl.checkIfVmsAssociatedWithBackupOffering(vms)) {
|
||||
throw new CloudRuntimeException("Unable to delete Kubernetes cluster, as node(s) are associated to a backup offering");
|
||||
}
|
||||
boolean cleanupNetwork = true;
|
||||
final KubernetesClusterDetailsVO clusterDetails = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "networkCleanup");
|
||||
if (clusterDetails != null) {
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ package org.apache.cloudstack.metrics;
|
|||
import java.math.BigDecimal;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
|
@ -302,7 +303,7 @@ public class PrometheusExporterImpl extends ManagerBase implements PrometheusExp
|
|||
.flatMap( h -> _hostTagsDao.getHostTags(h).stream())
|
||||
.distinct()
|
||||
.collect(Collectors.toList());
|
||||
List<String> allHostTags = new ArrayList<>();
|
||||
HashSet<String> allHostTags = new HashSet<>();
|
||||
allHostTagVOS.forEach(hostTagVO -> allHostTags.add(hostTagVO.getTag()));
|
||||
|
||||
for (final State state : State.values()) {
|
||||
|
|
|
|||
|
|
@ -139,6 +139,7 @@
|
|||
<module>storage/volume/primera</module>
|
||||
<module>storage/object/minio</module>
|
||||
<module>storage/object/ceph</module>
|
||||
<module>storage/object/cloudian</module>
|
||||
<module>storage/object/simulator</module>
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,175 @@
|
|||
# Cloudian HyperStore Object Storage Plugin
|
||||
|
||||
## Plugin Purpose
|
||||
|
||||
This plugin implements the Object Storage DataStore for Cloudian HyperStore.
|
||||
|
||||
## About Cloudian HyperStore
|
||||
|
||||
Cloudian HyperStore is a fully AWS-S3 compatible Object Storage solution. The following services are used by this plugin.
|
||||
|
||||
| Service | HTTP Port | HTTPS Port | Description |
|
||||
|:-------:|----------:|-----------:|:-----------------------|
|
||||
| Admin | | 19443 | User Management etc. |
|
||||
| S3 | 80 | 443 | AWS-S3 compatible API |
|
||||
| IAM | 16080 | 16443 | AWS-IAM compatible API |
|
||||
|
||||
## Configuration
|
||||
|
||||
### HyperStore Configuration
|
||||
|
||||
1. Enable Bucket Usage Statistics
|
||||
|
||||
Bucket Level QoS settings must be set to true. On HyperStore 8+, this can be done as follows. Earlier versions require puppet configuration which is not documented here.
|
||||
|
||||
```shell
|
||||
hsh$ hsctl config set s3.qos.bucketLevel=true
|
||||
hsh$ hsctl config apply s3 cmc
|
||||
hsh$ hsctl service restart s3 cmc --nodes=ALL
|
||||
```
|
||||
|
||||
2. The Admin API Username and Password
|
||||
|
||||
The connector requires an ADMIN API username and password to connect to the Admin service and create and manage HyperStore resources such as HyperStore Users and Groups. Please review your HyperStore Admin Guide and the settings under the `admin.auth` namespace.
|
||||
|
||||
3. Enable Object Lock via License
|
||||
|
||||
HyperStore fully supports S3 Object Lock. However, Object Lock is currently only available with a special Object Lock License from Cloudian. If the connected HyperStore system does not have an Object Lock license, it will only allow creating regular buckets. Contact Cloudian Support to request an Object Lock license if required.
|
||||
|
||||
### CloudStack Configuration
|
||||
|
||||
A new `Cloudian HyperStore` Object Store can be added by the CloudStack `admin` user via the UI -> Infrastructure -> Object Storage -> Add Object Storage button.
|
||||
|
||||
Once added, this passes various configuration parameters to the LifeCycle class as a map with the following keys and values.
|
||||
|
||||
```text
|
||||
DataStoreInfo MAP
|
||||
++++++++++++++++++++++++++++++++++++++
|
||||
| Key | Value |
|
||||
|-------------|----------------------|
|
||||
|name | <user`s choice> |
|
||||
|providerName | Cloudian HyperStore |
|
||||
|url | <ADMIN endpoint URL> |
|
||||
|details | <MAP> ===========|=====+
|
||||
++++++++++++++++++++++++++++++++++++++ v
|
||||
v
|
||||
+======================================+
|
||||
V
|
||||
Details MAP
|
||||
++++++++++++++++++++++++++++++++++
|
||||
| Key | Value |
|
||||
|-------------|------------------|
|
||||
| validateSSL | true/false |
|
||||
| accesskey | Admin Username |
|
||||
| secretkey | Admin Password |
|
||||
| s3Url | S3 endpoint URL |
|
||||
| iamUrl | IAM endpoint URL |
|
||||
++++++++++++++++++++++++++++++++++
|
||||
```
|
||||
|
||||
The following "details" map entries are all required.
|
||||
|
||||
- validateSSL : The ADMIN API is internal and may not have a proper SSL Certificate.
|
||||
- accesskey : Reuse of a shared configuration parameter to pass the Admin Username.
|
||||
- secretkey : Reuse of a shared configuration parameter to pass the Admin password.
|
||||
- s3Url : The HyperStore S3 endpoint URL. HTTPS is preferred when the service has a proper SSL Certificate which should be true in production.
|
||||
- iamUrl : The HyperStore IAM endpoint URL. Again HTTPS is preferred.
|
||||
|
||||
The LifeCycle initialize() method should validate connectivity to the different services.
|
||||
|
||||
## CloudStack Account Mappings
|
||||
|
||||
| CloudStack | HyperStore | Name Assigned |
|
||||
|:-----------|:-----------------|:---------------------|
|
||||
| Domain | HyperStore Group | Domain UUID |
|
||||
| Account | HyperStore User | Account UUID |
|
||||
| Project | HyperStore User | Project Account UUID |
|
||||
|
||||
When a CloudStack Account user creates a bucket under their account for the first time a new HyperStore User is allocated under the HyperStore Group that is mapped to the CloudStack Domain. A new HyperStore Group is also allocated if one does not already exist.
|
||||
|
||||
## HyperStore User Resources
|
||||
|
||||
The following additional resources are also created for each HyperStore User.
|
||||
|
||||
| Resource | Description |
|
||||
|--------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Root Credential Pair | These credentials have full access to the HyperStore User account. They are used to manage the IAM user resources listed below as well as to perform any top level bucket actions such as creating buckets, updating policies, enabling versioning etc. |
|
||||
| IAM User "CloudStack" | The "CloudStack" IAM user is created with an inline policy as-per below. The IAM user is used by the CloudStack Bucket Browser UI to manage bucket contents. |
|
||||
| IAM User Policy | This inline IAM user policy grants the "CloudStack" IAM user permission to any S3 action except `s3:createBucket` and `s3:deleteBucket`. This is mostly to ensure that all Buckets remain under CloudStack control as well as to restrict control over IAM actions. |
|
||||
| IAM User Credential Pair | The "CloudStack" IAM user credentials are also managed by the plugin and are made available to the user under the "Bucket Details" page. They are additionally used by the CloudStack Bucket Browser UI. They are restricted by the aforementioned user policy. |
|
||||
|
||||
## Bucket Management
|
||||
|
||||
The following are noteworthy.
|
||||
|
||||
### Bucket Quota is Unsupported
|
||||
|
||||
Cloudian HyperStore does not currently support restricting the size of a bucket to a particular quota limit. The plugin accepts a quota value of 0 to indicate no quota setting. When creating a bucket in the CloudStack UI, the user is required to set a quota of 0. Any other value will fail.
|
||||
|
||||
### Bucket Usage
|
||||
|
||||
HyperStore does not collect bucket usage statistics by default. They must be enabled by the HyperStore Administrator. On systems where this has not been enabled, bucket usage is reported as 0 bytes.
|
||||
|
||||
See the configuration section above for more details.
|
||||
|
||||
### Supported Bucket Policies
|
||||
|
||||
Two "policies" are configurable using the CloudStack interface.
|
||||
|
||||
- Private : Objects are only accessible to the bucket owner. This is the equivalent of no bucket policy (and is implemented that way).
|
||||
- Public : Objects are readable to everyone. Listing of all bucket objects is not granted so the object name must be known in order to access it.
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "PublicReadForObjects",
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Action": "s3:GetObject",
|
||||
"Resource": "arn:aws:s3:::BUCKET/*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Additional Bucket CORS Settings
|
||||
|
||||
Buckets created by the CloudStack plugin are additionally created with a Cross-Origin Resource Sharing (CORS) configuration. A permissive CORS setting on buckets is required by the CloudStack Bucket Browser UI functionality as it is written in JavaScript and runs in the end user's browser.
|
||||
|
||||
```xml
|
||||
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<CORSRule>
|
||||
<ID>AllowAny</ID>
|
||||
<AllowedOrigin>*</AllowedOrigin>
|
||||
<AllowedMethod>GET</AllowedMethod>
|
||||
<AllowedMethod>HEAD</AllowedMethod>
|
||||
<AllowedMethod>PUT</AllowedMethod>
|
||||
<AllowedMethod>POST</AllowedMethod>
|
||||
<AllowedMethod>DELETE</AllowedMethod>
|
||||
<AllowedHeader>*</AllowedHeader>
|
||||
</CORSRule>
|
||||
</CORSConfiguration>
|
||||
```
|
||||
|
||||
### Visibility of other Buckets under the same HyperStore User
|
||||
|
||||
While the "CloudStack" IAM user cannot create other buckets under the HyperStore User account, there are other reasons that buckets can exist under the HyperStore user but not be known by the CloudStack. These include network connectivity issues between creating a bucket and updating the database. Note that this can usually be rectified by retrying the create bucket operation.
|
||||
|
||||
While a bucket is not visible to CloudStack, a 3rd party application using the same IAM credentials will be able to see and operate on the bucket.
|
||||
|
||||
## Interoperability with Existing HyperStore Plugin
|
||||
|
||||
This plugin is mostly interoperable with the existing HyperStore Infrastructure plugin. However, it is recommended to use one or the other but __not both__ plugins.
|
||||
|
||||
The purpose of the older HyperStore infrastructure plugin is to grant full access to the HyperStore User that is mapped to the CloudStack Account. As such it grants the logged in CloudStack Account Single-Sign-On (SSO) into the Cloudian Management Console (CMC) as the Root User of the HyperStore User. This would allow the CloudStack Account to create and delete HyperStore User resources (credentials/IAM users/federated logins/buckets/etc) outside CloudStack control.
|
||||
|
||||
In comparison, this plugin attempts to restrict HyperStore User level, IAM and Bucket level actions by providing CloudStack Account access via IAM credentials.
|
||||
|
||||
## Known Issues
|
||||
|
||||
1. Currently, there is no way to edit the Object Storage Configuration for any of the parameters configured in the "details" map. It seems that other Object Storage providers have the same issue.
|
||||
2. The Bucket Browser UI feature may not work correctly on HyperStore versions older than 8.2 due to some bugs in the CORS implementation. However, everything else will still function correctly.
|
||||
3. Object metadata is not correctly displayed in the CloudStack Bucket Browser. This is due to the javascript client using a MinIO only (non-s3 compatible) extension call that collects the metadata as part of the bucket listing. To fix this for non-MinIO S3 Object Stores, Object Metadata should be collected using the S3 standard headObject operation.
|
||||
4. CloudStack does not yet have a deleteUser API for Object Stores so when a CloudStack Account is deleted, the mapped HyperStore User is not currently cleaned up.
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>cloud-plugin-storage-object-cloudian</artifactId>
|
||||
<name>Apache CloudStack Plugin - Cloudian HyperStore object storage provider</name>
|
||||
<parent>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloudstack-plugins</artifactId>
|
||||
<version>4.21.0.0-SNAPSHOT</version>
|
||||
<relativePath>../../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-engine-storage</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-engine-storage-object</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-engine-schema</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-plugin-integrations-cloudian-connector</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.amazonaws</groupId>
|
||||
<artifactId>aws-java-sdk-core</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.amazonaws</groupId>
|
||||
<artifactId>aws-java-sdk-iam</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.amazonaws</groupId>
|
||||
<artifactId>aws-java-sdk-s3</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.tomakehurst</groupId>
|
||||
<artifactId>wiremock-standalone</artifactId>
|
||||
<version>${cs.wiremock.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
|
|
@ -0,0 +1,890 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package org.apache.cloudstack.storage.datastore.driver;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianClient;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianCredential;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianGroup;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianUser;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianUserBucketUsage;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianUserBucketUsage.CloudianBucketUsage;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.util.CloudianHyperStoreUtil;
|
||||
import org.apache.cloudstack.storage.object.BaseObjectStoreDriverImpl;
|
||||
import org.apache.cloudstack.storage.object.Bucket;
|
||||
import org.apache.cloudstack.storage.object.BucketObject;
|
||||
|
||||
import com.amazonaws.AmazonClientException;
|
||||
import com.amazonaws.services.identitymanagement.AmazonIdentityManagement;
|
||||
import com.amazonaws.services.identitymanagement.model.AccessKey;
|
||||
import com.amazonaws.services.identitymanagement.model.AccessKeyMetadata;
|
||||
import com.amazonaws.services.identitymanagement.model.CreateAccessKeyRequest;
|
||||
import com.amazonaws.services.identitymanagement.model.CreateUserRequest;
|
||||
import com.amazonaws.services.identitymanagement.model.DeleteAccessKeyRequest;
|
||||
import com.amazonaws.services.identitymanagement.model.EntityAlreadyExistsException;
|
||||
import com.amazonaws.services.identitymanagement.model.ListAccessKeysRequest;
|
||||
import com.amazonaws.services.identitymanagement.model.ListAccessKeysResult;
|
||||
import com.amazonaws.services.identitymanagement.model.NoSuchEntityException;
|
||||
import com.amazonaws.services.identitymanagement.model.PutUserPolicyRequest;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.model.AccessControlList;
|
||||
import com.amazonaws.services.s3.model.BucketCrossOriginConfiguration;
|
||||
import com.amazonaws.services.s3.model.BucketPolicy;
|
||||
import com.amazonaws.services.s3.model.BucketVersioningConfiguration;
|
||||
import com.amazonaws.services.s3.model.CORSRule;
|
||||
import com.amazonaws.services.s3.model.CreateBucketRequest;
|
||||
import com.amazonaws.services.s3.model.SSEAlgorithm;
|
||||
import com.amazonaws.services.s3.model.ServerSideEncryptionByDefault;
|
||||
import com.amazonaws.services.s3.model.ServerSideEncryptionConfiguration;
|
||||
import com.amazonaws.services.s3.model.ServerSideEncryptionRule;
|
||||
import com.amazonaws.services.s3.model.SetBucketCrossOriginConfigurationRequest;
|
||||
import com.amazonaws.services.s3.model.SetBucketEncryptionRequest;
|
||||
import com.amazonaws.services.s3.model.SetBucketVersioningConfigurationRequest;
|
||||
import com.cloud.agent.api.to.BucketTO;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.storage.BucketVO;
|
||||
import com.cloud.storage.dao.BucketDao;
|
||||
import com.cloud.domain.Domain;
|
||||
import com.cloud.domain.dao.DomainDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountDetailsDao;
|
||||
import com.cloud.user.dao.AccountDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
public class CloudianHyperStoreObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
|
||||
@Inject
|
||||
AccountDao _accountDao;
|
||||
|
||||
@Inject
|
||||
AccountDetailsDao _accountDetailsDao;
|
||||
|
||||
@Inject
|
||||
DomainDao _domainDao;
|
||||
|
||||
@Inject
|
||||
ObjectStoreDao _storeDao;
|
||||
|
||||
@Inject
|
||||
BucketDao _bucketDao;
|
||||
|
||||
@Inject
|
||||
ObjectStoreDetailsDao _storeDetailsDao;
|
||||
|
||||
@Override
|
||||
public DataStoreTO getStoreTO(DataStore store) {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the HyperStore user id for the current account.
|
||||
* @param account the current account
|
||||
* @return the userId based on the CloudStack account uuid.
|
||||
*/
|
||||
protected String getHyperStoreUserId(Account account) {
|
||||
return account.getUuid();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the HyperStore tenant/group id for the current domain.
|
||||
* @param domain the current domain
|
||||
* @return the groupId based on the CloudStack domain uuid
|
||||
*/
|
||||
protected String getHyperStoreGroupId(Domain domain) {
|
||||
return domain.getUuid();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the HyperStore user resources matching this account if it doesn't exist.
|
||||
*
|
||||
* The following resources are created for the account:
|
||||
* - HyperStore Group to match the CloudStack Domain UUID
|
||||
* - HyperStore User to match the CloudStack Account UUID
|
||||
* - HyperStore Root User Credentials to manage Account Buckets etc (kept private to this plugin)
|
||||
* - HyperStore IAM User with IAM policy granting all S3 actions except create/delete buckets.
|
||||
* - HyperStore IAM User Credentials (visible to end user as part of Bucket Details)
|
||||
*
|
||||
* @param accountId the CloudStack account
|
||||
* @param storeId the object store.
|
||||
*
|
||||
* @return true if user exists or was created, false if there was some issue creating it.
|
||||
* @throws CloudRuntimeException on errors checking if the user exists or if the HyperStore user or group is disabled.
|
||||
*/
|
||||
@Override
|
||||
public boolean createUser(long accountId, long storeId) {
|
||||
Account account = _accountDao.findById(accountId);
|
||||
Domain domain = _domainDao.findById(account.getDomainId());
|
||||
String hsUserId = getHyperStoreUserId(account);
|
||||
String hsGroupId = getHyperStoreGroupId(domain);
|
||||
|
||||
CloudianClient client = getCloudianClientByStoreId(storeId);
|
||||
logger.debug("Checking if user id={} group id={} exists.", hsGroupId, hsUserId);
|
||||
CloudianUser user = client.listUser(hsUserId, hsGroupId);
|
||||
if (user == null) {
|
||||
// Create the group if it doesn't already exist
|
||||
createHSGroup(client, hsGroupId, domain);
|
||||
// Create the user under the group.
|
||||
user = createHSUser(client, hsUserId, hsGroupId, account);
|
||||
if (user == null) {
|
||||
return false; // already logged.
|
||||
}
|
||||
} else if (! user.getActive()) {
|
||||
// Normally this would be true unless an administrator has explicitly disabled the user account.
|
||||
String msg = String.format("The User id=%s group id=%s is Disabled. Consult your HyperStore Administrator.", hsUserId, hsGroupId);
|
||||
logger.error(msg);
|
||||
throw new CloudRuntimeException(msg);
|
||||
} else {
|
||||
// User exists and is active. We know that the group therefore exists but
|
||||
// we should ensure that it is active or it will lead to unknown access key errors
|
||||
// which might confuse the administrator. Checking is clearer.
|
||||
CloudianGroup group = client.listGroup(hsGroupId);
|
||||
if (group != null && ! group.getActive()) {
|
||||
String msg = String.format("The group id=%s is Disabled. Consult your HyperStore Administrator.", hsGroupId);
|
||||
logger.error(msg);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
}
|
||||
|
||||
// We either created a new account or found an existing one.
|
||||
CloudianCredential credential = createHSCredential(client, hsUserId, hsGroupId);
|
||||
|
||||
// Next, ensure we the IAM User Credentials exist. These are available
|
||||
// to the user as part of the bucket details instead of the Root credentials.
|
||||
Map<String, String> details = _accountDetailsDao.findDetails(accountId);
|
||||
AccessKey iamCredential = createIAMCredentials(storeId, details, credential);
|
||||
|
||||
// persist the root and iam credentials in the database and update all bucket details.
|
||||
persistCredentials(storeId, accountId, details, credential, iamCredential);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create IAM credentials if required.
|
||||
*
|
||||
* When the HyperStore user is first created, this method will create an IAM User with an appropriate
|
||||
* permission policy and a set of credentials which will be returned.
|
||||
* After the first run, the IAM resources should already be in place in which case we just ensure
|
||||
* the credentials we are using are still available and if not, it tries to recreate the IAM resources.
|
||||
*
|
||||
* @param storeId the store
|
||||
* @param details a map of existing account details that we know about including any saved IAM credentials.
|
||||
* @param credential the account Root User credentials (to manage the IAM resources).
|
||||
* @return an AccessKey object for newly created IAM credentials or null if existing credentials were ok
|
||||
* and nothing was created.
|
||||
*/
|
||||
protected AccessKey createIAMCredentials(long storeId, Map<String, String> details, CloudianCredential credential) {
|
||||
AmazonIdentityManagement iamClient = getIAMClientByStoreId(storeId, credential);
|
||||
final String iamUser = CloudianHyperStoreUtil.IAM_USER_USERNAME;
|
||||
|
||||
// If an accessKeyId is known to us, check IAM still has it.
|
||||
String iamAccessKeyId = details.get(CloudianHyperStoreUtil.KEY_IAM_ACCESS_KEY);
|
||||
if (iamAccessKeyId != null) {
|
||||
try {
|
||||
logger.debug("Looking for IAM credential {} for IAM User {}", iamAccessKeyId, iamUser);
|
||||
ListAccessKeysResult listAccessKeyResult = iamClient.listAccessKeys(new ListAccessKeysRequest().withUserName(iamUser));
|
||||
for (AccessKeyMetadata accessKeyMetadata : listAccessKeyResult.getAccessKeyMetadata()) {
|
||||
if (iamAccessKeyId.equals(accessKeyMetadata.getAccessKeyId())) {
|
||||
return null; // The IAM AccessKeyId still exists (as expected). return null.
|
||||
}
|
||||
// Usually, there will only be 1 credential that we manage, but an error persisting
|
||||
// credentials might leave an un-managed credential which we can just delete. It is better
|
||||
// to delete as otherwise, we may hit a max credential limit for this IAM user.
|
||||
deleteIAMCredential(iamClient, iamUser, accessKeyMetadata.getAccessKeyId());
|
||||
}
|
||||
} catch (NoSuchEntityException e) {
|
||||
// No IAM User. Ignore and fix this below.
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, a usable credential does not yet exist so create it.
|
||||
// Before creating it, we also need to ensure the IAM User that will own it exists.
|
||||
boolean createdUser = false;
|
||||
try {
|
||||
iamClient.createUser(new CreateUserRequest(iamUser));
|
||||
logger.info("Created IAM user {} for account", iamUser);
|
||||
createdUser = true;
|
||||
} catch (EntityAlreadyExistsException e) {
|
||||
// User already exists. Ignore and continue.
|
||||
}
|
||||
|
||||
// Always Add or Update the IAM policy
|
||||
iamClient.putUserPolicy(new PutUserPolicyRequest(iamUser, CloudianHyperStoreUtil.IAM_USER_POLICY_NAME, CloudianHyperStoreUtil.IAM_USER_POLICY));
|
||||
|
||||
if (! createdUser && iamAccessKeyId == null) {
|
||||
// User already exists but we never saved any access key before. We should try clean up
|
||||
logger.debug("Looking for any un-managed IAM credentials for IAM User {}", iamUser);
|
||||
ListAccessKeysResult listRes = iamClient.listAccessKeys(new ListAccessKeysRequest().withUserName(iamUser));
|
||||
for (AccessKeyMetadata accessKeyMetadata : listRes.getAccessKeyMetadata()) {
|
||||
deleteIAMCredential(iamClient, iamUser, accessKeyMetadata.getAccessKeyId());
|
||||
}
|
||||
}
|
||||
|
||||
// Create and return the new IAM credentials for this user.
|
||||
AccessKey iamAccessKey = iamClient.createAccessKey(new CreateAccessKeyRequest(iamUser)).getAccessKey();
|
||||
logger.info("Created IAM Credential {} for IAM User {}", iamAccessKey.getAccessKeyId(), iamUser);
|
||||
return iamAccessKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete an IAM Credential.
|
||||
*
|
||||
* @param iamClient a valid iam connection
|
||||
* @param iamUser the IAM user that owns the credential to delete.
|
||||
* @param accessKeyId The IAM credential to delete
|
||||
*/
|
||||
protected void deleteIAMCredential(AmazonIdentityManagement iamClient, String iamUser, String accessKeyId) {
|
||||
DeleteAccessKeyRequest deleteAccessKeyRequest = new DeleteAccessKeyRequest();
|
||||
deleteAccessKeyRequest.setUserName(iamUser);
|
||||
deleteAccessKeyRequest.setAccessKeyId(accessKeyId);
|
||||
logger.info("Deleting un-managed IAM AccessKeyId {} for IAM User {}", accessKeyId, iamUser);
|
||||
iamClient.deleteAccessKey(deleteAccessKeyRequest);
|
||||
}
|
||||
|
||||
/**
|
||||
* Persist the Root and IAM user credentials with the Account as required.
|
||||
* @param storeId the store
|
||||
* @param accountId the CloudStack account the credential belongs to
|
||||
* @param details the Account details map containing any pre-existing credential entries
|
||||
* @param credential the HyperStore credential assigned to this account.
|
||||
* @param iamCredential the new IAM credential or null if nothing new to persist.
|
||||
*/
|
||||
private void persistCredentials(long storeId, long accountId, Map<String, String> details, CloudianCredential credential, AccessKey iamCredential) {
|
||||
boolean persist = false;
|
||||
|
||||
String rootAccessKey = details.get(CloudianHyperStoreUtil.KEY_ROOT_ACCESS_KEY);
|
||||
if (! credential.getAccessKey().equals(rootAccessKey)) {
|
||||
// Persist the new (possibly rotated) credential pair
|
||||
details.put(CloudianHyperStoreUtil.KEY_ROOT_ACCESS_KEY, credential.getAccessKey());
|
||||
details.put(CloudianHyperStoreUtil.KEY_ROOT_SECRET_KEY, credential.getSecretKey());
|
||||
persist = true;
|
||||
}
|
||||
|
||||
if (iamCredential != null) {
|
||||
// Persist the new IAM credentials
|
||||
details.put(CloudianHyperStoreUtil.KEY_IAM_ACCESS_KEY, iamCredential.getAccessKeyId());
|
||||
details.put(CloudianHyperStoreUtil.KEY_IAM_SECRET_KEY, iamCredential.getSecretAccessKey());
|
||||
updateAccountBucketCredentials(storeId, accountId, iamCredential);
|
||||
persist = true;
|
||||
}
|
||||
|
||||
if (persist) {
|
||||
logger.debug("Persisting new credential information for accountId={}", accountId);
|
||||
_accountDetailsDao.persist(accountId, details);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update bucket details associated with this store/account to use the new IAM credentials.
|
||||
*
|
||||
* @param storeId the store
|
||||
* @param accountId the user account
|
||||
* @param iamCredential the IAM credentials to associate with any existing buckets.
|
||||
*/
|
||||
private void updateAccountBucketCredentials(long storeId, long accountId, AccessKey iamCredential) {
|
||||
List<BucketVO> bucketList = _bucketDao.listByObjectStoreIdAndAccountId(storeId, accountId);
|
||||
for (BucketVO bucketVO : bucketList) {
|
||||
logger.info("Updating accountId={} bucket {} with new IAM credentials", accountId, bucketVO.getName());
|
||||
bucketVO.setAccessKey(iamCredential.getAccessKeyId());
|
||||
bucketVO.setSecretKey(iamCredential.getSecretAccessKey());
|
||||
_bucketDao.update(bucketVO.getId(), bucketVO);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a HyperStore credential for the user if one does not already exist.
|
||||
* @param client ADMIN API connection
|
||||
* @param hsUserId HyperStore userId
|
||||
* @param hsGroupId HyperStore groupId
|
||||
*
|
||||
* @return a Root Credential (never null)
|
||||
* @throws ServerApiException if any error is encountered
|
||||
*/
|
||||
protected CloudianCredential createHSCredential(CloudianClient client, String hsUserId, String hsGroupId) {
|
||||
// find the oldest active Root credential in the account.
|
||||
List<CloudianCredential> credentials = client.listCredentials(hsUserId, hsGroupId);
|
||||
CloudianCredential credential = null;
|
||||
for (CloudianCredential candidate : credentials) {
|
||||
if (! candidate.getActive()) {
|
||||
continue;
|
||||
}
|
||||
if (credential == null || credential.isNewerThan(candidate)) {
|
||||
credential = candidate;
|
||||
}
|
||||
}
|
||||
|
||||
if (credential == null) {
|
||||
// nothing found, create one
|
||||
logger.debug("No active credentials found for groupId={} userId={}. Creating one.", hsGroupId, hsUserId);
|
||||
credential = client.createCredential(hsUserId, hsGroupId);
|
||||
logger.info("Created Root credentials for groupId={} userId={}.", hsGroupId, hsUserId);
|
||||
}
|
||||
|
||||
// Either found or successfully created a credential.
|
||||
return credential;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the HyperStore Group if it does not already exist.
|
||||
* @param client a CloudianClient connection
|
||||
* @param hsGroupId the name of the HyperStore group to create.
|
||||
* @param domain the domain that is being mapped to the HyperStore group.
|
||||
* @throws CloudRuntimeException if the group cannot be created or the group exists but is disabled.
|
||||
*/
|
||||
private void createHSGroup(CloudianClient client, String hsGroupId, Domain domain) {
|
||||
// The group will usually exist so lets look for it before trying to add it.
|
||||
logger.debug("Checking if group {} exists.", hsGroupId);
|
||||
CloudianGroup group = client.listGroup(hsGroupId);
|
||||
if (group == null) {
|
||||
group = new CloudianGroup();
|
||||
group.setGroupId(hsGroupId);
|
||||
group.setActive(Boolean.TRUE);
|
||||
group.setGroupName(domain.getPath());
|
||||
client.addGroup(group);
|
||||
logger.info("Created group {} for domain {} successfully.", hsGroupId, domain.getPath());
|
||||
return;
|
||||
}
|
||||
|
||||
// Group exists. Confirm that it is usable.
|
||||
if (! group.getActive()) {
|
||||
String msg = String.format("The group %s is Disabled. Consult your HyperStore Administrator.", hsGroupId);
|
||||
logger.error(msg);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
|
||||
// Group exists and is enabled. Nothing to log.
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new HyperStore user
|
||||
*
|
||||
* @param client admin api client
|
||||
* @param hsUserId the user to create
|
||||
* @param hsGroupId the group to add him to
|
||||
* @param account the account the user represents
|
||||
* @return user object if successfully created, null otherwise
|
||||
* @throws ServerAPIException if on other other.
|
||||
*/
|
||||
private CloudianUser createHSUser(CloudianClient client, String hsUserId, String hsGroupId, Account account) {
|
||||
CloudianUser user = new CloudianUser();
|
||||
user.setActive(Boolean.TRUE);
|
||||
user.setGroupId(hsGroupId);
|
||||
user.setUserId(hsUserId);
|
||||
user.setUserType(CloudianUser.USER);
|
||||
user.setFullName(account.getAccountName());
|
||||
|
||||
if (! client.addUser(user)) {
|
||||
// The failure shouldn't be that the user already exists at this point so its something else.
|
||||
logger.error("Failed to add user id={} groupId={}", hsUserId, hsGroupId);
|
||||
return null;
|
||||
} else {
|
||||
logger.info("Created new user id={} groupId={}", hsUserId, hsGroupId);
|
||||
return user;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a bucket in HyperStore under the Account listed in the bucket argument.
|
||||
*
|
||||
* @param bucket the bucket to create.
|
||||
* @param objectLock set to true to enable ObjectLock (requires an ObjectLock license), false for a normal bucket.
|
||||
*
|
||||
* @throws CloudRuntimeException if ObjectLock was requested but the feature is disabled due to license or any
|
||||
* other failure.
|
||||
*/
|
||||
@Override
|
||||
public Bucket createBucket(Bucket bucket, boolean objectLock) {
|
||||
String bucketName = bucket.getName();
|
||||
long storeId = bucket.getObjectStoreId();
|
||||
long accountId = bucket.getAccountId();
|
||||
|
||||
// get an s3client using Account Root User Credentials
|
||||
Map<String, String> storeDetails = _storeDetailsDao.getDetails(storeId);
|
||||
String s3url = storeDetails.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_S3_URL);
|
||||
Map<String, String> accountDetails = _accountDetailsDao.findDetails(accountId);
|
||||
String accessKey = accountDetails.get(CloudianHyperStoreUtil.KEY_ROOT_ACCESS_KEY);
|
||||
String secretKey = accountDetails.get(CloudianHyperStoreUtil.KEY_ROOT_SECRET_KEY);
|
||||
String iamAccessKey = accountDetails.get(CloudianHyperStoreUtil.KEY_IAM_ACCESS_KEY);
|
||||
String iamSecretKey = accountDetails.get(CloudianHyperStoreUtil.KEY_IAM_SECRET_KEY);
|
||||
AmazonS3 s3client = getS3Client(s3url, accessKey, secretKey);
|
||||
|
||||
// Step 1: Create the bucket
|
||||
try {
|
||||
// Create the bucket with ObjectLock if requested
|
||||
logger.info("Creating bucket {}", bucketName);
|
||||
CreateBucketRequest cbRequest = new CreateBucketRequest(bucketName);
|
||||
cbRequest.setObjectLockEnabledForBucket(objectLock);
|
||||
s3client.createBucket(cbRequest);
|
||||
} catch (AmazonClientException e) {
|
||||
logger.error("Create bucket failed", e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
|
||||
// Step 2: Any Exception here, we try to delete the bucket.
|
||||
// If deletion fails, it is not the end of the world as the
|
||||
// user can try again to create the bucket which if he is
|
||||
// already the owner, it will succeed.
|
||||
try {
|
||||
// Enable a permissive CORS configuration
|
||||
configureBucketCORS(s3client, bucketName);
|
||||
|
||||
// Update the Bucket Information (for Bucket details page etc)
|
||||
BucketVO bucketVO = _bucketDao.findById(bucket.getId());
|
||||
bucketVO.setAccessKey(iamAccessKey);
|
||||
bucketVO.setSecretKey(iamSecretKey);
|
||||
bucketVO.setBucketURL(s3url + "/" + bucketName);
|
||||
_bucketDao.update(bucket.getId(), bucketVO);
|
||||
return bucketVO;
|
||||
} catch (Exception e) {
|
||||
// Error with DB or CORS. Delete the bucket from S3
|
||||
logger.error("There was a failure after bucket creation. Trying to clean up", e);
|
||||
try {
|
||||
s3client.deleteBucket(bucketName);
|
||||
logger.info("cleanup succeeded.");
|
||||
} catch (AmazonClientException e1) {
|
||||
logger.error("Cleanup for create bucket also failed with", e);
|
||||
}
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure a permissive CrossOrigin setting on the given bucket.
|
||||
*
|
||||
* Cloudian does not enable CORS by default. The CORS configuration
|
||||
* is required by CloudStack so that the Javascript S3 bucket
|
||||
* browser can function properly.
|
||||
*
|
||||
* This method does not catch any exceptions which should be caught
|
||||
* by the calling method.
|
||||
*
|
||||
* @param s3client bucket owner s3client
|
||||
* @param bucketName the bucket name.
|
||||
*
|
||||
* @throws AmazonClientException and derivatives
|
||||
*/
|
||||
private void configureBucketCORS(AmazonS3 s3client, String bucketName) {
|
||||
logger.debug("Configuring CORS for bucket {}", bucketName);
|
||||
|
||||
List<CORSRule> corsRules = new ArrayList<CORSRule>();
|
||||
CORSRule allowAnyRule = new CORSRule().withId("AllowAny");
|
||||
allowAnyRule.setAllowedOrigins("*");
|
||||
allowAnyRule.setAllowedHeaders("*");
|
||||
allowAnyRule.setAllowedMethods(
|
||||
CORSRule.AllowedMethods.HEAD,
|
||||
CORSRule.AllowedMethods.GET,
|
||||
CORSRule.AllowedMethods.PUT,
|
||||
CORSRule.AllowedMethods.POST,
|
||||
CORSRule.AllowedMethods.DELETE);
|
||||
corsRules.add(allowAnyRule);
|
||||
BucketCrossOriginConfiguration corsConfig = new BucketCrossOriginConfiguration();
|
||||
corsConfig.setRules(corsRules);
|
||||
SetBucketCrossOriginConfigurationRequest corsRequest = new SetBucketCrossOriginConfigurationRequest(bucketName, corsConfig);
|
||||
s3client.setBucketCrossOriginConfiguration(corsRequest);
|
||||
logger.info("Successfully configured CORS for bucket {}", bucketName);
|
||||
}
|
||||
|
||||
/**
|
||||
* This API seems to be called by the StorageManagementImpl to validate that the
|
||||
* main Object Store URL (in our case the Admin API endpoint) is correct. As
|
||||
* such, let's return all buckets owned by accounts managed by this object
|
||||
* store using the same API as the bucket usage as that uses the ADMIN API.
|
||||
*
|
||||
* @return a list of Bucket objects where only the bucketName is set.
|
||||
*/
|
||||
@Override
|
||||
public List<Bucket> listBuckets(long storeId) {
|
||||
Map<String, Long> bucketUsage = getAllBucketsUsage(storeId);
|
||||
List<Bucket> bucketList = new ArrayList<Bucket>();
|
||||
for (String bucketName : bucketUsage.keySet()) {
|
||||
Bucket bucket = new BucketObject();
|
||||
bucket.setName(bucketName);
|
||||
bucketList.add(bucket);
|
||||
}
|
||||
return bucketList;
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete an empty bucket.
|
||||
* This operation fails if the bucket is not empty.
|
||||
* @param bucket the bucket to delete
|
||||
* @param storeId the store the bucket belongs to.
|
||||
* @returns true on success or throws an exception.
|
||||
* @throws CloudRuntimeException if the bucket deletion fails
|
||||
*/
|
||||
@Override
|
||||
public boolean deleteBucket(BucketTO bucket, long storeId) {
|
||||
AmazonS3 s3client = getS3ClientByBucketAndStore(bucket, storeId);
|
||||
logger.debug("Deleting bucket {}", bucket.getName());
|
||||
try {
|
||||
s3client.deleteBucket(bucket.getName());
|
||||
logger.info("Successfully deleted bucket {}", bucket.getName());
|
||||
return true;
|
||||
} catch (AmazonClientException e) {
|
||||
logger.error("Failed to delete bucket " + bucket.getName(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public AccessControlList getBucketAcl(BucketTO bucket, long storeId) {
|
||||
AmazonS3 s3client = getS3ClientByBucketAndStore(bucket, storeId);
|
||||
logger.debug("Getting the bucket ACL for {}", bucket.getName());
|
||||
try {
|
||||
AccessControlList acl = s3client.getBucketAcl(bucket.getName());
|
||||
logger.info("Successfully got the bucket ACL for {}", bucket.getName());
|
||||
return acl;
|
||||
} catch (AmazonClientException e) {
|
||||
logger.error("Failed to get the bucket ACL for " + bucket.getName(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBucketAcl(BucketTO bucket, AccessControlList acl, long storeId) {
|
||||
AmazonS3 s3client = getS3ClientByBucketAndStore(bucket, storeId);
|
||||
logger.debug("Setting the bucket ACL for {}", bucket.getName());
|
||||
try {
|
||||
s3client.setBucketAcl(bucket.getName(), acl);
|
||||
logger.info("Successfully set the bucket ACL for {}", bucket.getName());
|
||||
return;
|
||||
} catch (AmazonClientException e) {
|
||||
logger.error("Failed to set the bucket ACL for " + bucket.getName(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the bucket policy to either "public" or "private".
|
||||
* If set to private, we delete any existing policy.
|
||||
* For public, we allow objects to be read but not listed.
|
||||
*/
|
||||
@Override
|
||||
public void setBucketPolicy(BucketTO bucket, String policy, long storeId) {
|
||||
if ("private".equalsIgnoreCase(policy)) {
|
||||
deleteBucketPolicy(bucket, storeId);
|
||||
return;
|
||||
}
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("{\n");
|
||||
sb.append(" \"Version\": \"2012-10-17\",\n");
|
||||
sb.append(" \"Statement\": [\n");
|
||||
sb.append(" {\n");
|
||||
sb.append(" \"Sid\": \"PublicReadForObjects\",\n");
|
||||
sb.append(" \"Effect\": \"Allow\",\n");
|
||||
sb.append(" \"Principal\": \"*\",\n");
|
||||
sb.append(" \"Action\": \"s3:GetObject\",\n");
|
||||
sb.append(" \"Resource\": \"arn:aws:s3:::%s/*\"\n");
|
||||
sb.append(" }\n");
|
||||
sb.append(" ]\n");
|
||||
sb.append("}\n");
|
||||
|
||||
String jsonPolicy = String.format(sb.toString(), bucket.getName());
|
||||
|
||||
AmazonS3 s3client = getS3ClientByBucketAndStore(bucket, storeId);
|
||||
logger.debug("Setting the bucket policy to {} for {}", policy, bucket.getName());
|
||||
try {
|
||||
s3client.setBucketPolicy(bucket.getName(), jsonPolicy);
|
||||
logger.info("Successfully set the bucket policy to {} for {}", policy, bucket.getName());
|
||||
return;
|
||||
} catch (AmazonClientException e) {
|
||||
logger.error("Failed to set the bucket policy for " + bucket.getName(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketPolicy getBucketPolicy(BucketTO bucket, long storeId) {
|
||||
AmazonS3 s3client = getS3ClientByBucketAndStore(bucket, storeId);
|
||||
logger.debug("Getting the bucket policy for {}", bucket.getName());
|
||||
try {
|
||||
BucketPolicy bp = s3client.getBucketPolicy(bucket.getName());
|
||||
logger.info("Successfully got the bucket policy for {}", bucket.getName());
|
||||
return bp;
|
||||
} catch (AmazonClientException e) {
|
||||
logger.error("Failed to get the bucket policy for " + bucket.getName(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteBucketPolicy(BucketTO bucket, long storeId) {
|
||||
AmazonS3 s3client = getS3ClientByBucketAndStore(bucket, storeId);
|
||||
logger.debug("Deleting bucket policy for {}", bucket.getName());
|
||||
try {
|
||||
s3client.deleteBucketPolicy(bucket.getName());
|
||||
logger.info("Successfully deleted bucket policy for {}", bucket.getName());
|
||||
return;
|
||||
} catch (AmazonClientException e) {
|
||||
logger.error("Failed to delete bucket policy for " + bucket.getName(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean setBucketEncryption(BucketTO bucket, long storeId) {
|
||||
AmazonS3 s3client = getS3ClientByBucketAndStore(bucket, storeId);
|
||||
logger.debug("Enabling bucket encryption configuration for {}", bucket.getName());
|
||||
try {
|
||||
SetBucketEncryptionRequest eRequest = new SetBucketEncryptionRequest();
|
||||
eRequest.setBucketName(bucket.getName());
|
||||
|
||||
ServerSideEncryptionByDefault sseByDefault = new ServerSideEncryptionByDefault();
|
||||
sseByDefault.setSSEAlgorithm(SSEAlgorithm.AES256.toString());
|
||||
|
||||
ServerSideEncryptionRule sseRule = new ServerSideEncryptionRule();
|
||||
sseRule.setApplyServerSideEncryptionByDefault(sseByDefault);
|
||||
|
||||
List<ServerSideEncryptionRule> sseRules = new ArrayList<ServerSideEncryptionRule>();
|
||||
sseRules.add(sseRule);
|
||||
|
||||
ServerSideEncryptionConfiguration sseConf = new ServerSideEncryptionConfiguration();
|
||||
sseConf.setRules(sseRules);
|
||||
|
||||
eRequest.setServerSideEncryptionConfiguration(sseConf);
|
||||
s3client.setBucketEncryption(eRequest);
|
||||
|
||||
logger.info("Successfully enabled bucket encryption configuration for {}", bucket.getName());
|
||||
return true;
|
||||
} catch (AmazonClientException e) {
|
||||
logger.error("Failed to enable bucket encryption configuration for " + bucket.getName(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deleteBucketEncryption(BucketTO bucket, long storeId) {
|
||||
AmazonS3 s3client = getS3ClientByBucketAndStore(bucket, storeId);
|
||||
logger.debug("Deleting bucket encryption configuration for {}", bucket.getName());
|
||||
try {
|
||||
s3client.deleteBucketEncryption(bucket.getName());
|
||||
logger.info("Successfully deleted bucket encryption configuration for {}", bucket.getName());
|
||||
return true;
|
||||
} catch (AmazonClientException e) {
|
||||
logger.error("Failed to delete bucket encryption configuration for " + bucket.getName(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean setBucketVersioning(BucketTO bucket, long storeId) {
|
||||
AmazonS3 s3client = getS3ClientByBucketAndStore(bucket, storeId);
|
||||
logger.debug("Enabling versioning for bucket {}", bucket.getName());
|
||||
try {
|
||||
BucketVersioningConfiguration vConf = new BucketVersioningConfiguration(BucketVersioningConfiguration.ENABLED);
|
||||
SetBucketVersioningConfigurationRequest vRequest = new SetBucketVersioningConfigurationRequest(bucket.getName(), vConf);
|
||||
s3client.setBucketVersioningConfiguration(vRequest);
|
||||
logger.info("Successfully enabled versioning for bucket {}", bucket.getName());
|
||||
return true;
|
||||
} catch (AmazonClientException e) {
|
||||
logger.error("Failed to enable versioning for bucket " + bucket.getName(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deleteBucketVersioning(BucketTO bucket, long storeId) {
|
||||
AmazonS3 s3client = getS3ClientByBucketAndStore(bucket, storeId);
|
||||
logger.debug("Suspending versioning for bucket {}", bucket.getName());
|
||||
try {
|
||||
BucketVersioningConfiguration vConf = new BucketVersioningConfiguration(BucketVersioningConfiguration.SUSPENDED);
|
||||
SetBucketVersioningConfigurationRequest vRequest = new SetBucketVersioningConfigurationRequest(bucket.getName(), vConf);
|
||||
s3client.setBucketVersioningConfiguration(vRequest);
|
||||
logger.info("Successfully suspended versioning for bucket {}", bucket.getName());
|
||||
return true;
|
||||
} catch (AmazonClientException e) {
|
||||
logger.error("Failed to suspend versioning for bucket " + bucket.getName(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the bucket quota to a size limit specified in GiB.
|
||||
*
|
||||
* Cloudian HyperStore does not currently support bucket quota limits.
|
||||
* CloudStack itself requires a quota to be set. HyperStore may add
|
||||
* Bucket Quota support in a future version. Currently, we only support
|
||||
* setting the quota to zero to indicate no quota.
|
||||
*
|
||||
* @param bucket the bucket
|
||||
* @param storeId the store
|
||||
* @param size the GiB (1024^3) size to set the quota to. Only 0 is supported.
|
||||
* @throws CloudRuntimeException is thrown for any other value other than 0.
|
||||
*/
|
||||
@Override
|
||||
public void setBucketQuota(BucketTO bucket, long storeId, long size) {
|
||||
if (size == 0) {
|
||||
logger.debug("Bucket \"{}\" quota set to 0 (no quota).", bucket.getName());
|
||||
return;
|
||||
}
|
||||
// Any other setting, throw an exception.
|
||||
logger.warn("Unable to set quota for bucket \"{}\" to {}GiB. Only 0 is supported.", bucket.getName(), size);
|
||||
throw new CloudRuntimeException("This bucket does not support a quota. Use 0 to specify no quota.");
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a map of bucket names managed by this store and their sizes (in bytes).
|
||||
*
|
||||
* Note: Bucket Usage Statistics in HyperStore are disabled by default. They
|
||||
* can be enabled by the HyperStore Administrator by setting of the configuration
|
||||
* 's3.qos.bucketLevel=true'. If this is not enabled, the values returned will
|
||||
* either be 0 or out of date.
|
||||
*
|
||||
* @return map of bucket names to usage bytes.
|
||||
*/
|
||||
@Override
|
||||
public Map<String, Long> getAllBucketsUsage(long storeId) {
|
||||
Map<String, Long> bucketUsage = new HashMap<String, Long>();
|
||||
List<BucketVO> bucketList = _bucketDao.listByObjectStoreId(storeId);
|
||||
if (bucketList.isEmpty()) {
|
||||
return bucketUsage;
|
||||
}
|
||||
|
||||
// Create an unique list of domains from the bucket list
|
||||
// and add all the bucket names to the bucketUsage map with value -1 as a marker
|
||||
// to know which buckets CloudStack cares about. The -1 will be replaced later.
|
||||
List<Long> domainIds = new ArrayList<Long>();
|
||||
for (BucketVO bucket : bucketList) {
|
||||
long bucketDomainId = bucket.getDomainId();
|
||||
if (! domainIds.contains(bucketDomainId)) {
|
||||
domainIds.add(bucketDomainId);
|
||||
}
|
||||
bucketUsage.put(bucket.getName(), -1L);
|
||||
}
|
||||
|
||||
// Ask for bucket usages per domain (ie. per HyperStore Group)
|
||||
CloudianClient client = getCloudianClientByStoreId(storeId);
|
||||
for (long domainId : domainIds) {
|
||||
Domain domain = _domainDao.findById(domainId);
|
||||
final String hsGroupId = getHyperStoreGroupId(domain);
|
||||
List<CloudianUserBucketUsage> groupBucketUsages = client.getUserBucketUsages(hsGroupId, null, null);
|
||||
for (CloudianUserBucketUsage userBucketUsages : groupBucketUsages) {
|
||||
for (CloudianBucketUsage cbu : userBucketUsages.getBuckets()) {
|
||||
if (cbu.getByteCount() >= 0L) {
|
||||
// Update the -1 entry to actual byteCount.
|
||||
bucketUsage.replace(cbu.getBucketName(), cbu.getByteCount());
|
||||
} else {
|
||||
// Replace with 0 instead of actual value. Race condition can cause this and it
|
||||
// should be fixed automatically by a repair job.
|
||||
bucketUsage.replace(cbu.getBucketName(), 0L);
|
||||
logger.info("Ignoring negative bucket usage for \"{}\": {}", cbu.getBucketName(), cbu.getByteCount());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove any remaining -1 entries. These would probably be buckets that were
|
||||
// deleted outside of CloudStack control. A missing entry might be better than
|
||||
// returning the bucket name with -1 or 0.
|
||||
bucketUsage.entrySet().removeIf(entry -> entry.getValue() == -1);
|
||||
|
||||
return bucketUsage;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a connection to the Cloudian HyperStore ADMIN API Service.
|
||||
* @param storeId the object store containing connection info for HyperStore
|
||||
* @return a connection object (never null)
|
||||
* @throws CloudRuntimeException if the connection fails
|
||||
*/
|
||||
protected CloudianClient getCloudianClientByStoreId(long storeId) {
|
||||
ObjectStoreVO store = _storeDao.findById(storeId);
|
||||
String url = store.getUrl();
|
||||
Map<String, String> storeDetails = _storeDetailsDao.getDetails(storeId);
|
||||
String adminUsername = storeDetails.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_USER_NAME);
|
||||
String adminPassword = storeDetails.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_PASSWORD);
|
||||
String strValidateSSL = storeDetails.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_VALIDATE_SSL);
|
||||
boolean validateSSL = Boolean.parseBoolean(strValidateSSL);
|
||||
|
||||
return CloudianHyperStoreUtil.getCloudianClient(url, adminUsername, adminPassword, validateSSL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an S3 connection for the store and account identified by the bucket.
|
||||
* NOTE: https connections must use a trusted certificate.
|
||||
*
|
||||
* @param store the object store of the S3 service to connect to
|
||||
* @param bucket bucket information identifying the account which identifies the credentials to use.
|
||||
* @return an S3 connection (never null)
|
||||
* @throws CloudRuntimeException on failure.
|
||||
*/
|
||||
protected AmazonS3 getS3ClientByBucketAndStore(BucketTO bucket, long storeId) {
|
||||
// Find the S3 Root user credentials of the Account Owner rather than using the
|
||||
// credentials stored with the bucket which may be IAM User Credentials.
|
||||
for (BucketVO bvo : _bucketDao.listByObjectStoreId(storeId)) {
|
||||
if (bvo.getName().equals(bucket.getName())) {
|
||||
long accountId = bvo.getAccountId();
|
||||
Map<String, String> storeDetails = _storeDetailsDao.getDetails(storeId);
|
||||
String s3url = storeDetails.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_S3_URL);
|
||||
String accessKey = _accountDetailsDao.findDetail(accountId, CloudianHyperStoreUtil.KEY_ROOT_ACCESS_KEY).getValue();
|
||||
String secretKey = _accountDetailsDao.findDetail(accountId, CloudianHyperStoreUtil.KEY_ROOT_SECRET_KEY).getValue();
|
||||
logger.debug("Creating S3 connection to {} for {} ", s3url, accessKey);
|
||||
return CloudianHyperStoreUtil.getS3Client(s3url, accessKey, secretKey);
|
||||
}
|
||||
}
|
||||
throw new CloudRuntimeException(String.format("Bucket Name not found: %s", bucket.getName()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an S3 connection for the given endpoint and credentials.
|
||||
* NOTE: https connections must use a trusted certificate.
|
||||
* NOTE: The only reason this wrapper method is here is for unit test mocking.
|
||||
*
|
||||
* @param s3url the url of the S3 service
|
||||
* @param accessKey the credentials to use for the S3 connection.
|
||||
* @param secretKey the matching secret key.
|
||||
* @return an S3 connection (never null)
|
||||
* @throws CloudRuntimeException on failure.
|
||||
*/
|
||||
protected AmazonS3 getS3Client(String s3url, String accessKey, String secretKey) {
|
||||
return CloudianHyperStoreUtil.getS3Client(s3url, accessKey, secretKey);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an IAM connection for the given store using the given credentials.
|
||||
* NOTE: if the store uses https, it must use a trusted certificate.
|
||||
* NOTE: HyperStore IAM service is usually found on ports 16080/16443.
|
||||
*
|
||||
* @param storeId the object store
|
||||
* @param credential the credential pair to use for the iam connection.
|
||||
* @return an IAM connection (never null)
|
||||
* @throws CloudRuntimeException on failure.
|
||||
*/
|
||||
protected AmazonIdentityManagement getIAMClientByStoreId(long storeId, CloudianCredential credential) {
|
||||
Map<String, String> storeDetails = _storeDetailsDao.getDetails(storeId);
|
||||
String iamUrl = storeDetails.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_IAM_URL);
|
||||
logger.debug("Creating a new IAM connection to {} for {}", iamUrl, credential.getAccessKey());
|
||||
|
||||
return CloudianHyperStoreUtil.getIAMClient(iamUrl, credential.getAccessKey(), credential.getSecretKey());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,151 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package org.apache.cloudstack.storage.datastore.lifecycle;
|
||||
|
||||
import com.cloud.agent.api.StoragePoolInfo;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
import org.apache.cloudstack.cloudian.client.CloudianClient;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.util.CloudianHyperStoreUtil;
|
||||
import org.apache.cloudstack.storage.object.datastore.ObjectStoreHelper;
|
||||
import org.apache.cloudstack.storage.object.datastore.ObjectStoreProviderManager;
|
||||
import org.apache.cloudstack.storage.object.store.lifecycle.ObjectStoreLifeCycle;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class CloudianHyperStoreObjectStoreLifeCycleImpl implements ObjectStoreLifeCycle {
|
||||
|
||||
protected Logger logger = LogManager.getLogger(CloudianHyperStoreObjectStoreLifeCycleImpl.class);
|
||||
|
||||
@Inject
|
||||
ObjectStoreHelper objectStoreHelper;
|
||||
@Inject
|
||||
ObjectStoreProviderManager objectStoreMgr;
|
||||
|
||||
public CloudianHyperStoreObjectStoreLifeCycleImpl() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataStore initialize(Map<String, Object> dsInfos) {
|
||||
|
||||
String name = (String)dsInfos.get(CloudianHyperStoreUtil.STORE_KEY_NAME);
|
||||
String url = (String)dsInfos.get(CloudianHyperStoreUtil.STORE_KEY_URL);
|
||||
String providerName = (String)dsInfos.get(CloudianHyperStoreUtil.STORE_KEY_PROVIDER_NAME);
|
||||
|
||||
// Check the providerName is what we expect
|
||||
if (! StringUtils.equalsIgnoreCase(providerName, CloudianHyperStoreUtil.OBJECT_STORE_PROVIDER_NAME)) {
|
||||
String msg = String.format("Unexpected providerName \"%s\". Expected \"%s\"", providerName, CloudianHyperStoreUtil.OBJECT_STORE_PROVIDER_NAME);
|
||||
logger.error(msg);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
|
||||
Map<String, Object> objectStoreParameters = new HashMap<String, Object>();
|
||||
objectStoreParameters.put(CloudianHyperStoreUtil.STORE_KEY_NAME, name);
|
||||
objectStoreParameters.put(CloudianHyperStoreUtil.STORE_KEY_URL, url);
|
||||
objectStoreParameters.put(CloudianHyperStoreUtil.STORE_KEY_PROVIDER_NAME, providerName);
|
||||
|
||||
// Pull out the details map
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, String> details = (Map<String, String>) dsInfos.get(CloudianHyperStoreUtil.STORE_KEY_DETAILS);
|
||||
if (details == null) {
|
||||
String msg = String.format("Unexpected null receiving Object Store initialization \"%s\"", CloudianHyperStoreUtil.STORE_KEY_DETAILS);
|
||||
logger.error(msg);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
|
||||
// Note: The Admin Username/Password are available respectively as accesskey/secretkey
|
||||
String adminUsername = details.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_USER_NAME);
|
||||
String adminPassword = details.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_PASSWORD);
|
||||
String validateSSL = details.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_VALIDATE_SSL);
|
||||
boolean adminValidateSSL = Boolean.parseBoolean(validateSSL);
|
||||
String s3Url = details.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_S3_URL);
|
||||
String iamUrl = details.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_IAM_URL);
|
||||
|
||||
if (StringUtils.isAnyBlank(adminUsername, adminPassword, validateSSL, s3Url, iamUrl)) {
|
||||
final String asteriskPassword = (adminPassword == null) ? null : "*".repeat(adminPassword.length());
|
||||
logger.error("Required parameters are missing; username={} password={} validateSSL={} s3Url={} iamUrl={}",
|
||||
adminUsername, asteriskPassword, validateSSL, s3Url, iamUrl);
|
||||
throw new CloudRuntimeException("Required Cloudian HyperStore configuration parameters are missing/empty.");
|
||||
}
|
||||
|
||||
// Validate the ADMIN API Service Information
|
||||
logger.info("Confirming connection to the HyperStore Admin Service at: {}", url);
|
||||
CloudianClient client = CloudianHyperStoreUtil.getCloudianClient(url, adminUsername, adminPassword, adminValidateSSL);
|
||||
String version = client.getServerVersion();
|
||||
|
||||
// Validate S3 and IAM Service URLs.
|
||||
CloudianHyperStoreUtil.validateS3Url(s3Url);
|
||||
CloudianHyperStoreUtil.validateIAMUrl(iamUrl);
|
||||
|
||||
logger.info("Successfully connected to HyperStore: {}", version);
|
||||
|
||||
ObjectStoreVO objectStore = objectStoreHelper.createObjectStore(objectStoreParameters, details);
|
||||
return objectStoreMgr.getObjectStore(objectStore.getId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean attachCluster(DataStore store, ClusterScope scope) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean maintain(DataStore store) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancelMaintain(DataStore store) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deleteDataStore(DataStore store) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle#migrateToObjectStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore)
|
||||
*/
|
||||
@Override
|
||||
public boolean migrateToObjectStore(DataStore store) {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package org.apache.cloudstack.storage.datastore.provider;
|
||||
|
||||
import com.cloud.utils.component.ComponentContext;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectStoreProvider;
|
||||
import org.apache.cloudstack.storage.datastore.driver.CloudianHyperStoreObjectStoreDriverImpl;
|
||||
import org.apache.cloudstack.storage.datastore.lifecycle.CloudianHyperStoreObjectStoreLifeCycleImpl;
|
||||
import org.apache.cloudstack.storage.datastore.util.CloudianHyperStoreUtil;
|
||||
import org.apache.cloudstack.storage.object.ObjectStoreDriver;
|
||||
import org.apache.cloudstack.storage.object.datastore.ObjectStoreHelper;
|
||||
import org.apache.cloudstack.storage.object.datastore.ObjectStoreProviderManager;
|
||||
import org.apache.cloudstack.storage.object.store.lifecycle.ObjectStoreLifeCycle;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
@Component
|
||||
public class CloudianHyperStoreObjectStoreProviderImpl implements ObjectStoreProvider {
|
||||
|
||||
@Inject
|
||||
ObjectStoreProviderManager storeMgr;
|
||||
@Inject
|
||||
ObjectStoreHelper helper;
|
||||
|
||||
private final String providerName = CloudianHyperStoreUtil.OBJECT_STORE_PROVIDER_NAME;
|
||||
protected ObjectStoreLifeCycle lifeCycle;
|
||||
protected ObjectStoreDriver driver;
|
||||
|
||||
@Override
|
||||
public DataStoreLifeCycle getDataStoreLifeCycle() {
|
||||
return lifeCycle;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return this.providerName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean configure(Map<String, Object> params) {
|
||||
lifeCycle = ComponentContext.inject(CloudianHyperStoreObjectStoreLifeCycleImpl.class);
|
||||
driver = ComponentContext.inject(CloudianHyperStoreObjectStoreDriverImpl.class);
|
||||
storeMgr.registerDriver(this.getName(), driver);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataStoreDriver getDataStoreDriver() {
|
||||
return this.driver;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HypervisorHostListener getHostListener() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<DataStoreProviderType> getTypes() {
|
||||
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
|
||||
types.add(DataStoreProviderType.OBJECT);
|
||||
return types;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,211 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package org.apache.cloudstack.storage.datastore.util;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.security.KeyManagementException;
|
||||
import java.security.KeyStoreException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
|
||||
import org.apache.cloudstack.cloudian.client.CloudianClient;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.amazonaws.AmazonServiceException;
|
||||
import com.amazonaws.auth.AWSStaticCredentialsProvider;
|
||||
import com.amazonaws.auth.BasicAWSCredentials;
|
||||
import com.amazonaws.client.builder.AwsClientBuilder;
|
||||
import com.amazonaws.services.identitymanagement.AmazonIdentityManagement;
|
||||
import com.amazonaws.services.identitymanagement.AmazonIdentityManagementClientBuilder;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
public class CloudianHyperStoreUtil {
|
||||
|
||||
/** The name of our Object Store Provider */
|
||||
public static final String OBJECT_STORE_PROVIDER_NAME = "Cloudian HyperStore";
|
||||
|
||||
public static final String STORE_KEY_PROVIDER_NAME = "providerName";
|
||||
public static final String STORE_KEY_URL = "url";
|
||||
public static final String STORE_KEY_NAME = "name";
|
||||
public static final String STORE_KEY_DETAILS = "details";
|
||||
|
||||
// Store Details Map key names - managed outside of plugin
|
||||
public static final String STORE_DETAILS_KEY_USER_NAME = "accesskey"; // admin user name
|
||||
public static final String STORE_DETAILS_KEY_PASSWORD = "secretkey"; // admin password
|
||||
public static final String STORE_DETAILS_KEY_VALIDATE_SSL = "validateSSL";
|
||||
public static final String STORE_DETAILS_KEY_S3_URL = "s3Url";
|
||||
public static final String STORE_DETAILS_KEY_IAM_URL = "iamUrl";
|
||||
|
||||
// Account Detail Map key names
|
||||
public static final String KEY_ROOT_ACCESS_KEY = "hs_AccessKey";
|
||||
public static final String KEY_ROOT_SECRET_KEY = "hs_SecretKey";
|
||||
public static final String KEY_IAM_ACCESS_KEY = "hs_IAMAccessKey";
|
||||
public static final String KEY_IAM_SECRET_KEY = "hs_IAMSecretKey";
|
||||
|
||||
public static final int DEFAULT_ADMIN_PORT = 19443;
|
||||
public static final int DEFAULT_ADMIN_TIMEOUT_SECONDS = 10;
|
||||
|
||||
public static final String IAM_USER_USERNAME = "CloudStack";
|
||||
public static final String IAM_USER_POLICY_NAME = "CloudStackPolicy";
|
||||
public static final String IAM_USER_POLICY = "{\n" +
|
||||
" \"Version\": \"2012-10-17\",\n" +
|
||||
" \"Statement\": [\n" +
|
||||
" {\n" +
|
||||
" \"Sid\": \"AllowFullS3Access\",\n" +
|
||||
" \"Effect\": \"Allow\",\n" +
|
||||
" \"Action\": [\n" +
|
||||
" \"s3:*\"\n" +
|
||||
" ],\n" +
|
||||
" \"Resource\": \"*\"\n" +
|
||||
" },\n" +
|
||||
" {\n" +
|
||||
" \"Sid\": \"ExceptBucketCreationOrDeletion\",\n" +
|
||||
" \"Effect\": \"Deny\",\n" +
|
||||
" \"Action\": [\n" +
|
||||
" \"s3:createBucket\",\n" +
|
||||
" \"s3:deleteBucket\"\n" +
|
||||
" ],\n" +
|
||||
" \"Resource\": \"*\"\n" +
|
||||
" }\n" +
|
||||
" ]\n" +
|
||||
"}\n";
|
||||
|
||||
/**
|
||||
* This method is solely for test purposes so that we can mock the timeout.
|
||||
*
|
||||
* @returns the timeout in seconds
|
||||
*/
|
||||
protected static int getAdminTimeoutSeconds() {
|
||||
return DEFAULT_ADMIN_TIMEOUT_SECONDS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a connection to the Cloudian HyperStore ADMIN API Service.
|
||||
* @param url the url of the ADMIN API service
|
||||
* @param user the admin username to connect as
|
||||
* @param pass the matching admin password
|
||||
* @param validateSSL validate the SSL Certificate (when using https://)
|
||||
* @return a connection object (never null)
|
||||
* @throws CloudRuntimeException if the connection fails for any reason
|
||||
*/
|
||||
public static CloudianClient getCloudianClient(String url, String user, String pass, boolean validateSSL) {
|
||||
try {
|
||||
URL parsedURL = new URL(url);
|
||||
String scheme = parsedURL.getProtocol();
|
||||
String host = parsedURL.getHost();
|
||||
int port = parsedURL.getPort();
|
||||
if (port == -1) {
|
||||
port = DEFAULT_ADMIN_PORT;
|
||||
}
|
||||
return new CloudianClient(host, port, scheme, user, pass, validateSSL, getAdminTimeoutSeconds());
|
||||
} catch (MalformedURLException | KeyStoreException | NoSuchAlgorithmException | KeyManagementException e) {
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an S3 connection for the given endpoint and credentials.
|
||||
* NOTE: https connections must use a trusted certificate.
|
||||
*
|
||||
* @param url the url of the S3 service
|
||||
* @param accessKey the credentials to use for the S3 connection.
|
||||
* @param secretKey the matching secret key.
|
||||
* @return an S3 connection (never null)
|
||||
* @throws CloudRuntimeException on failure.
|
||||
*/
|
||||
public static AmazonS3 getS3Client(String url, String accessKey, String secretKey) {
|
||||
AmazonS3 client = AmazonS3ClientBuilder.standard()
|
||||
.enablePathStyleAccess()
|
||||
.withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey)))
|
||||
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(url, "auto"))
|
||||
.build();
|
||||
if (client == null) {
|
||||
throw new CloudRuntimeException("Error while creating Cloudian S3 client");
|
||||
}
|
||||
return client;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an IAM connection for the given endpoint and credentials.
|
||||
* NOTE: https connections must use a trusted certificate.
|
||||
* NOTE: HyperStore IAM service is usually found on ports 16080/16443.
|
||||
*
|
||||
* @param url the url which should include the HyperStore IAM port if not 80/443.
|
||||
* @param accessKey the credentials to use for the iam connection.
|
||||
* @param secretKey the matching secret key.
|
||||
* @return an IAM connection (never null)
|
||||
* @throws CloudRuntimeException on failure.
|
||||
*/
|
||||
public static AmazonIdentityManagement getIAMClient(String url, String accessKey, String secretKey) {
|
||||
AmazonIdentityManagement iamClient = AmazonIdentityManagementClientBuilder.standard()
|
||||
.withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey)))
|
||||
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(url, "auto"))
|
||||
.build();
|
||||
if (iamClient == null) {
|
||||
throw new CloudRuntimeException("Error while creating Cloudian IAM client");
|
||||
}
|
||||
return iamClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the S3Url to confirm it behaves like an S3 Service.
|
||||
*
|
||||
* The method uses bad credentials and looks for the particular error from S3
|
||||
* that says InvalidAccessKeyId was used. The method quietly returns if
|
||||
* we connect and get the expected error back.
|
||||
*
|
||||
* @param s3Url the url to check
|
||||
*
|
||||
* @throws RuntimeException if there is any issue.
|
||||
*/
|
||||
public static void validateS3Url(String s3Url) {
|
||||
try {
|
||||
AmazonS3 s3Client = CloudianHyperStoreUtil.getS3Client(s3Url, "unknown", "unknown");
|
||||
s3Client.listBuckets();
|
||||
} catch (AmazonServiceException e) {
|
||||
// Check if the ErrorCode says that the access key (we used "unknown" was invalid
|
||||
if (StringUtils.compareIgnoreCase(e.getErrorCode(), "InvalidAccessKeyId") != 0) {
|
||||
throw new CloudRuntimeException("Unexpected response from S3 Endpoint.", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the IAMUrl to confirm it behaves like an IAM Service.
|
||||
*
|
||||
* The method uses bad credentials and looks for the particular error from IAM
|
||||
* that says InvalidAccessKeyId or InvalidClientTokenId was used. The method quietly
|
||||
* returns if we connect and get the expected error back.
|
||||
*
|
||||
* @param iamUrl the url to check
|
||||
*
|
||||
* @throws RuntimeException if there is any issue.
|
||||
*/
|
||||
public static void validateIAMUrl(String iamUrl) {
|
||||
try {
|
||||
AmazonIdentityManagement iamClient = CloudianHyperStoreUtil.getIAMClient(iamUrl, "unknown", "unknown");
|
||||
iamClient.listAccessKeys();
|
||||
} catch (AmazonServiceException e) {
|
||||
if (! StringUtils.equalsAnyIgnoreCase(e.getErrorCode(), "InvalidAccessKeyId", "InvalidClientTokenId")) {
|
||||
throw new CloudRuntimeException("Unexpected response from IAM Endpoint.", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
name=storage-object-cloudian
|
||||
parent=storage
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
<beans xmlns="http://www.springframework.org/schema/beans"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xmlns:context="http://www.springframework.org/schema/context"
|
||||
xmlns:aop="http://www.springframework.org/schema/aop"
|
||||
xsi:schemaLocation="http://www.springframework.org/schema/beans
|
||||
http://www.springframework.org/schema/beans/spring-beans.xsd
|
||||
http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd
|
||||
http://www.springframework.org/schema/context
|
||||
http://www.springframework.org/schema/context/spring-context.xsd"
|
||||
>
|
||||
<bean id="cloudianHyperStoreObjectStoreProviderImpl"
|
||||
class="org.apache.cloudstack.storage.datastore.provider.CloudianHyperStoreObjectStoreProviderImpl" />
|
||||
</beans>
|
||||
|
|
@ -0,0 +1,686 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package org.apache.cloudstack.storage.datastore.driver;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertThrows;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.ArgumentMatchers.anyMap;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.cloudstack.cloudian.client.CloudianClient;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianCredential;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianGroup;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianUser;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianUserBucketUsage;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianUserBucketUsage.CloudianBucketUsage;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.util.CloudianHyperStoreUtil;
|
||||
import org.apache.cloudstack.storage.object.Bucket;
|
||||
|
||||
import com.amazonaws.services.identitymanagement.AmazonIdentityManagement;
|
||||
import com.amazonaws.services.identitymanagement.model.AccessKey;
|
||||
import com.amazonaws.services.identitymanagement.model.AccessKeyMetadata;
|
||||
import com.amazonaws.services.identitymanagement.model.CreateAccessKeyRequest;
|
||||
import com.amazonaws.services.identitymanagement.model.CreateAccessKeyResult;
|
||||
import com.amazonaws.services.identitymanagement.model.CreateUserRequest;
|
||||
import com.amazonaws.services.identitymanagement.model.ListAccessKeysRequest;
|
||||
import com.amazonaws.services.identitymanagement.model.ListAccessKeysResult;
|
||||
import com.amazonaws.services.identitymanagement.model.PutUserPolicyRequest;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.model.CreateBucketRequest;
|
||||
import com.amazonaws.services.s3.model.ServerSideEncryptionRule;
|
||||
import com.amazonaws.services.s3.model.SetBucketCrossOriginConfigurationRequest;
|
||||
import com.amazonaws.services.s3.model.SetBucketEncryptionRequest;
|
||||
import com.amazonaws.services.s3.model.SetBucketVersioningConfigurationRequest;
|
||||
import com.cloud.agent.api.to.BucketTO;
|
||||
import com.cloud.domain.DomainVO;
|
||||
import com.cloud.domain.dao.DomainDao;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.storage.BucketVO;
|
||||
import com.cloud.storage.dao.BucketDao;
|
||||
import com.cloud.user.AccountDetailVO;
|
||||
import com.cloud.user.AccountDetailsDao;
|
||||
import com.cloud.user.AccountVO;
|
||||
import com.cloud.user.dao.AccountDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class CloudianHyperStoreObjectStoreDriverImplTest {
|
||||
|
||||
@Spy
|
||||
CloudianHyperStoreObjectStoreDriverImpl cloudianHyperStoreObjectStoreDriverImpl = new CloudianHyperStoreObjectStoreDriverImpl();
|
||||
|
||||
@Mock
|
||||
AmazonS3 s3Client;
|
||||
@Mock
|
||||
CloudianClient cloudianClient;
|
||||
@Mock
|
||||
AmazonIdentityManagement iamClient;
|
||||
@Mock
|
||||
ObjectStoreDao objectStoreDao;
|
||||
@Mock
|
||||
ObjectStoreVO objectStoreVO;
|
||||
@Mock
|
||||
ObjectStoreDetailsDao objectStoreDetailsDao;
|
||||
@Mock
|
||||
AccountDao accountDao;
|
||||
@Mock
|
||||
BucketDao bucketDao;
|
||||
@Mock
|
||||
DomainDao domainDao;
|
||||
@Mock
|
||||
AccountDetailsDao accountDetailsDao;
|
||||
|
||||
@Mock
|
||||
AccountVO account;
|
||||
@Mock
|
||||
DomainVO domain;
|
||||
|
||||
BucketVO bucketVo;
|
||||
Map<String, String> StoreDetailsMap;
|
||||
Map<String, String> AccountDetailsMap;
|
||||
|
||||
static long TEST_STORE_ID = 1010L;
|
||||
static long TEST_ACCOUNT_ID = 2010L;
|
||||
static long TEST_DOMAIN_ID = 3010L;
|
||||
static String TEST_ADMIN_URL = "https://admin-endpoint:19443";
|
||||
static String TEST_ADMIN_USER_NAME = "test_admin";
|
||||
static String TEST_ADMIN_PASSWORD = "test_password";
|
||||
static String TEST_ADMIN_VALIDATE_SSL = "true";
|
||||
static String TEST_BUCKET_NAME = "testbucketname";
|
||||
static String TEST_ROOT_AK = "root_access_key";
|
||||
static String TEST_ROOT_SK = "root_secret_key";
|
||||
static String TEST_IAM_AK = "iam_access_key";
|
||||
static String TEST_IAM_SK = "iam_secret_key";
|
||||
static String TEST_S3_URL = "http://s3-endpoint";
|
||||
static String TEST_IAM_URL = "http://iam-endpoint:16080";
|
||||
static String TEST_BUCKET_URL = TEST_S3_URL + "/" + TEST_BUCKET_NAME;
|
||||
|
||||
private AutoCloseable closeable;
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
closeable = MockitoAnnotations.openMocks(this);
|
||||
cloudianHyperStoreObjectStoreDriverImpl._storeDao = objectStoreDao;
|
||||
cloudianHyperStoreObjectStoreDriverImpl._storeDetailsDao = objectStoreDetailsDao;
|
||||
cloudianHyperStoreObjectStoreDriverImpl._accountDao = accountDao;
|
||||
cloudianHyperStoreObjectStoreDriverImpl._bucketDao = bucketDao;
|
||||
cloudianHyperStoreObjectStoreDriverImpl._accountDetailsDao = accountDetailsDao;
|
||||
cloudianHyperStoreObjectStoreDriverImpl._domainDao = domainDao;
|
||||
|
||||
// Setup to return the store url for cloudianClient
|
||||
when(objectStoreDao.findById(TEST_STORE_ID)).thenReturn(objectStoreVO);
|
||||
when(objectStoreVO.getUrl()).thenReturn(TEST_ADMIN_URL);
|
||||
|
||||
// The StoreDetailMap has Endpoint info and Admin Credentials
|
||||
StoreDetailsMap = new HashMap<String, String>();
|
||||
StoreDetailsMap.put(CloudianHyperStoreUtil.STORE_DETAILS_KEY_USER_NAME, TEST_ADMIN_USER_NAME);
|
||||
StoreDetailsMap.put(CloudianHyperStoreUtil.STORE_DETAILS_KEY_PASSWORD, TEST_ADMIN_PASSWORD);
|
||||
StoreDetailsMap.put(CloudianHyperStoreUtil.STORE_DETAILS_KEY_VALIDATE_SSL, TEST_ADMIN_VALIDATE_SSL);
|
||||
StoreDetailsMap.put(CloudianHyperStoreUtil.STORE_DETAILS_KEY_S3_URL, TEST_S3_URL);
|
||||
StoreDetailsMap.put(CloudianHyperStoreUtil.STORE_DETAILS_KEY_IAM_URL, TEST_IAM_URL);
|
||||
when(objectStoreDetailsDao.getDetails(TEST_STORE_ID)).thenReturn(StoreDetailsMap);
|
||||
|
||||
// The AccountDetailsMap has credentials for operating on the account.
|
||||
AccountDetailsMap = new HashMap<String, String>();
|
||||
AccountDetailsMap.put(CloudianHyperStoreUtil.KEY_ROOT_ACCESS_KEY, TEST_ROOT_AK);
|
||||
AccountDetailsMap.put(CloudianHyperStoreUtil.KEY_ROOT_SECRET_KEY, TEST_ROOT_SK);
|
||||
AccountDetailsMap.put(CloudianHyperStoreUtil.KEY_IAM_ACCESS_KEY, TEST_IAM_AK);
|
||||
AccountDetailsMap.put(CloudianHyperStoreUtil.KEY_IAM_SECRET_KEY, TEST_IAM_SK);
|
||||
when(accountDetailsDao.findDetails(TEST_ACCOUNT_ID)).thenReturn(AccountDetailsMap);
|
||||
|
||||
// Useful test bucket info
|
||||
bucketVo = new BucketVO(TEST_ACCOUNT_ID, TEST_DOMAIN_ID, TEST_STORE_ID, TEST_BUCKET_NAME, null, false, false, false, null);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
closeable.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetStoreTO() {
|
||||
assertNull(cloudianHyperStoreObjectStoreDriverImpl.getStoreTO(null));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateBucket() throws Exception {
|
||||
doReturn(s3Client).when(cloudianHyperStoreObjectStoreDriverImpl).getS3Client(anyString(), anyString(), anyString());
|
||||
when(bucketDao.findById(anyLong())).thenReturn(bucketVo);
|
||||
|
||||
// Actual Test
|
||||
Bucket bucketRet = cloudianHyperStoreObjectStoreDriverImpl.createBucket(bucketVo, false);
|
||||
assertEquals(TEST_BUCKET_NAME, bucketRet.getName());
|
||||
|
||||
// Capture the bucket info that was saved to the DB
|
||||
ArgumentCaptor<BucketVO> argument = ArgumentCaptor.forClass(BucketVO.class);
|
||||
verify(bucketDao, times(1)).update(any(), argument.capture());
|
||||
BucketVO UpdatedBucketVO = argument.getValue();
|
||||
assertEquals(TEST_IAM_AK, UpdatedBucketVO.getAccessKey());
|
||||
assertEquals(TEST_IAM_SK, UpdatedBucketVO.getSecretKey());
|
||||
assertEquals(TEST_BUCKET_URL, UpdatedBucketVO.getBucketURL());
|
||||
|
||||
verify(s3Client, times(1)).createBucket(any(CreateBucketRequest.class));
|
||||
verify(s3Client, times(1))
|
||||
.setBucketCrossOriginConfiguration(any(SetBucketCrossOriginConfigurationRequest.class));
|
||||
verify(s3Client, never()).deleteBucket(anyString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateHSCredential() throws Exception {
|
||||
cloudianClient = mock(CloudianClient.class);
|
||||
List<CloudianCredential> CredList = new ArrayList<CloudianCredential>();
|
||||
CloudianCredential c1 = new CloudianCredential();
|
||||
c1.setActive(false);
|
||||
c1.setCreateDate(new Date(1L)); // oldest but inactive
|
||||
CloudianCredential c2 = new CloudianCredential();
|
||||
c2.setAccessKey(TEST_ROOT_AK);
|
||||
c2.setSecretKey(TEST_ROOT_SK);
|
||||
c2.setActive(true);
|
||||
c2.setCreateDate(new Date(2L)); // 2nd oldest
|
||||
CloudianCredential c3 = new CloudianCredential();
|
||||
c3.setActive(true);
|
||||
c3.setCreateDate(new Date(2L)); // newest
|
||||
CredList.add(c1);
|
||||
CredList.add(c2);
|
||||
CredList.add(c3);
|
||||
when(cloudianClient.listCredentials(anyString(), anyString())).thenReturn(CredList);
|
||||
|
||||
// Test expects c2 which is the oldest active credential.
|
||||
CloudianCredential actual = cloudianHyperStoreObjectStoreDriverImpl.createHSCredential(cloudianClient, "user", "group");
|
||||
assertTrue(actual.getActive());
|
||||
assertEquals(TEST_ROOT_AK, actual.getAccessKey());
|
||||
assertEquals(TEST_ROOT_SK, actual.getSecretKey());
|
||||
verify(cloudianClient, never()).createCredential(anyString(), anyString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetAllBucketsUsageNoBuckets() throws Exception {
|
||||
when(bucketDao.listByObjectStoreId(TEST_STORE_ID)).thenReturn(new ArrayList<BucketVO>());
|
||||
Map<String, Long> emptyMap = cloudianHyperStoreObjectStoreDriverImpl.getAllBucketsUsage(TEST_STORE_ID);
|
||||
assertNotNull(emptyMap);
|
||||
assertEquals(0, emptyMap.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetAllBucketsUsageTwoDomains() {
|
||||
// Prepare Buckets the store knows about.
|
||||
BucketVO b1 = new BucketVO(TEST_ACCOUNT_ID, 1L, TEST_STORE_ID, "b1", null, false, false, false, null);
|
||||
BucketVO b2 = new BucketVO(TEST_ACCOUNT_ID, 1L, TEST_STORE_ID, "b2", null, false, false, false, null);
|
||||
BucketVO b3 = new BucketVO(TEST_ACCOUNT_ID, 2L, TEST_STORE_ID, "b3", null, false, false, false, null);
|
||||
BucketVO b4 = new BucketVO(TEST_ACCOUNT_ID, 2L, TEST_STORE_ID, "b4", null, false, false, false, null);
|
||||
List<BucketVO> BucketList = new ArrayList<BucketVO>();
|
||||
BucketList.add(b1); // b1 owned by domain 1, exists
|
||||
BucketList.add(b2); // b2 owned by domain 1, deleted in object store (so no usage info)
|
||||
BucketList.add(b3); // b3 owned by domain 2, exists
|
||||
BucketList.add(b4); // b4 owned by domain 2, exists
|
||||
when(bucketDao.listByObjectStoreId(TEST_STORE_ID)).thenReturn(BucketList);
|
||||
|
||||
final String hsGroupId1 = "domain1";
|
||||
final String hsGroupId2 = "domain2";
|
||||
|
||||
// Setup both domains d1 and d2 with uuids that will become hsGroupId
|
||||
DomainVO d1 = mock(DomainVO.class);
|
||||
when(d1.getUuid()).thenReturn(hsGroupId1);
|
||||
DomainVO d2 = mock(DomainVO.class);
|
||||
when(d2.getUuid()).thenReturn(hsGroupId2);
|
||||
when(domainDao.findById(1L)).thenReturn(d1);
|
||||
when(domainDao.findById(2L)).thenReturn(d2);
|
||||
|
||||
// Setup Bucket Usage Data returned for b1, b3, b4, b5 by CloudianClient
|
||||
// where b2 is missing, b4 usage is negative and b5 is unknown.
|
||||
CloudianBucketUsage bu1 = new CloudianBucketUsage();
|
||||
bu1.setBucketName("b1");
|
||||
bu1.setByteCount(1L);
|
||||
CloudianBucketUsage bu3 = new CloudianBucketUsage();
|
||||
bu3.setBucketName("b3");
|
||||
bu3.setByteCount(3L);
|
||||
CloudianBucketUsage bu4 = new CloudianBucketUsage();
|
||||
bu4.setBucketName("b4");
|
||||
bu4.setByteCount(-55555L);
|
||||
CloudianBucketUsage bu5 = new CloudianBucketUsage();
|
||||
bu5.setBucketName("b5");
|
||||
bu5.setByteCount(5L);
|
||||
List<CloudianBucketUsage> d1bucketList = new ArrayList<CloudianBucketUsage>();
|
||||
d1bucketList.add(bu1);
|
||||
List<CloudianBucketUsage> d2bucketList = new ArrayList<CloudianBucketUsage>();
|
||||
d2bucketList.add(bu3);
|
||||
d2bucketList.add(bu4);
|
||||
d2bucketList.add(bu5);
|
||||
CloudianUserBucketUsage d1U1Usage = mock(CloudianUserBucketUsage.class);
|
||||
when(d1U1Usage.getBuckets()).thenReturn(d1bucketList);
|
||||
CloudianUserBucketUsage d2U1Usage = mock(CloudianUserBucketUsage.class);
|
||||
when(d2U1Usage.getBuckets()).thenReturn(d2bucketList);
|
||||
List<CloudianUserBucketUsage> d1Usage = new ArrayList<CloudianUserBucketUsage>();
|
||||
d1Usage.add(d1U1Usage);
|
||||
List<CloudianUserBucketUsage> d2Usage = new ArrayList<CloudianUserBucketUsage>();
|
||||
d2Usage.add(d2U1Usage);
|
||||
|
||||
doReturn(cloudianClient).when(cloudianHyperStoreObjectStoreDriverImpl).getCloudianClientByStoreId(TEST_STORE_ID);
|
||||
when(cloudianClient.getUserBucketUsages(hsGroupId1, null, null)).thenReturn(d1Usage);
|
||||
when(cloudianClient.getUserBucketUsages(hsGroupId2, null, null)).thenReturn(d2Usage);
|
||||
|
||||
// Test Details:
|
||||
// The CloudStack DB knows about 4 buckets: b1, b2, b3, b4
|
||||
// The actual Object Store knows about 4 buckets: b1, b3, b4, b5
|
||||
// Bucket usage in Object Store is: b1:1, b3:3, b4:-55555, b5:5
|
||||
// Expected Response: Usage for 3 buckets, b1, b3 and b4 where
|
||||
// b4 usage is returns as 0 instead of actual negative value and
|
||||
// b5 is ignored as it is not known by the store.
|
||||
Map<String, Long> usageMap = cloudianHyperStoreObjectStoreDriverImpl.getAllBucketsUsage(TEST_STORE_ID);
|
||||
assertNotNull(usageMap);
|
||||
assertEquals(3, usageMap.size());
|
||||
assertEquals(1L, usageMap.get("b1").longValue());
|
||||
assertEquals(3L, usageMap.get("b3").longValue());
|
||||
assertEquals(0L, usageMap.get("b4").longValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateUserNotExists() throws Exception {
|
||||
// ensure no account credentials are returned in the account details for new user.
|
||||
Mockito.reset(accountDetailsDao);
|
||||
when(accountDetailsDao.findDetails(TEST_ACCOUNT_ID)).thenReturn(new HashMap<String, String>());
|
||||
|
||||
String hsUserId = "user1";
|
||||
String hsGroupId = "group1";
|
||||
when(accountDao.findById(TEST_ACCOUNT_ID)).thenReturn(account);
|
||||
when(account.getDomainId()).thenReturn(TEST_DOMAIN_ID);
|
||||
when(account.getUuid()).thenReturn(hsUserId);
|
||||
when(domainDao.findById(TEST_DOMAIN_ID)).thenReturn(domain);
|
||||
when(domain.getUuid()).thenReturn(hsGroupId);
|
||||
|
||||
doReturn(cloudianClient).when(cloudianHyperStoreObjectStoreDriverImpl).getCloudianClientByStoreId(TEST_STORE_ID);
|
||||
|
||||
// Setup the user and group as not found.
|
||||
when(cloudianClient.listUser(hsUserId, hsGroupId)).thenReturn(null);
|
||||
when(cloudianClient.listGroup(hsGroupId)).thenReturn(null);
|
||||
when(cloudianClient.addUser(any(CloudianUser.class))).thenReturn(true);
|
||||
// lets assume no credentials added, so we add new ones.
|
||||
when(cloudianClient.listCredentials(hsUserId, hsGroupId)).thenReturn(new ArrayList<CloudianCredential>());
|
||||
CloudianCredential credential = new CloudianCredential();
|
||||
credential.setAccessKey(TEST_ROOT_AK);
|
||||
credential.setSecretKey(TEST_ROOT_SK);
|
||||
when(cloudianClient.createCredential(hsUserId, hsGroupId)).thenReturn(credential);
|
||||
|
||||
// Setup IAM for user, policy and credential creation.
|
||||
doReturn(iamClient).when(cloudianHyperStoreObjectStoreDriverImpl).getIAMClientByStoreId(TEST_STORE_ID, credential);
|
||||
AccessKey accessKey = mock(AccessKey.class);
|
||||
CreateAccessKeyResult accessKeyResult = mock(CreateAccessKeyResult.class);
|
||||
when(accessKey.getAccessKeyId()).thenReturn(TEST_IAM_AK);
|
||||
when(accessKey.getSecretAccessKey()).thenReturn(TEST_IAM_SK);
|
||||
when(accessKeyResult.getAccessKey()).thenReturn(accessKey);
|
||||
when(iamClient.createAccessKey(any(CreateAccessKeyRequest.class))).thenReturn(accessKeyResult);
|
||||
|
||||
// Next Check what will be persisted in DB after everything created.
|
||||
// Even though its not going to be true for a new user, lets have 1 bucket
|
||||
// whose credentials need to be updated.
|
||||
BucketVO bucketToUpdate = mock(BucketVO.class);
|
||||
when(bucketToUpdate.getId()).thenReturn(9L);
|
||||
List<BucketVO> bucketUpdateList = new ArrayList<BucketVO>();
|
||||
bucketUpdateList.add(bucketToUpdate);
|
||||
when(bucketDao.listByObjectStoreIdAndAccountId(TEST_STORE_ID, TEST_ACCOUNT_ID)).thenReturn(bucketUpdateList);
|
||||
|
||||
// Test: The user should be created which involves:
|
||||
// creating the group, user and root credentials
|
||||
// creating the iam user, its policy and iam credentials
|
||||
// finally persisting the root and iam credentials in account details.
|
||||
boolean created = cloudianHyperStoreObjectStoreDriverImpl.createUser(TEST_ACCOUNT_ID, TEST_STORE_ID);
|
||||
assertTrue(created);
|
||||
|
||||
// THe HyperStore group, user and credentials
|
||||
verify(cloudianClient, times(1)).addGroup(any(CloudianGroup.class));
|
||||
verify(cloudianClient, times(1)).addUser(any(CloudianUser.class));
|
||||
verify(cloudianClient, times(1)).createCredential(hsUserId, hsGroupId);
|
||||
|
||||
// not expecting IAM list access keys for a new user.
|
||||
verify(iamClient, never()).listAccessKeys(any(ListAccessKeysRequest.class));
|
||||
// We do expect IAM user creation with policy and access keys though.
|
||||
verify(iamClient, times(1)).createUser(any(CreateUserRequest.class));
|
||||
verify(iamClient, times(1)).putUserPolicy(any(PutUserPolicyRequest.class));
|
||||
verify(iamClient, times(1)).createAccessKey(any(CreateAccessKeyRequest.class));
|
||||
|
||||
// Now let's verify that the correct account details were persisted.
|
||||
ArgumentCaptor<Map<String,String>> detailsArg = ArgumentCaptor.forClass((Class<Map<String, String>>) (Class<?>) Map.class);
|
||||
verify(accountDetailsDao, times(1)).persist(anyLong(), detailsArg.capture());
|
||||
Map<String, String> updatedDetails = detailsArg.getValue();
|
||||
assertEquals(4, updatedDetails.size());
|
||||
assertEquals(TEST_IAM_AK, updatedDetails.get(CloudianHyperStoreUtil.KEY_IAM_ACCESS_KEY));
|
||||
assertEquals(TEST_IAM_SK, updatedDetails.get(CloudianHyperStoreUtil.KEY_IAM_SECRET_KEY));
|
||||
assertEquals(TEST_ROOT_AK, updatedDetails.get(CloudianHyperStoreUtil.KEY_ROOT_ACCESS_KEY));
|
||||
assertEquals(TEST_ROOT_SK, updatedDetails.get(CloudianHyperStoreUtil.KEY_ROOT_SECRET_KEY));
|
||||
|
||||
// Also verify that bucketToUpdate was updated with new credentials.
|
||||
verify(bucketToUpdate, times(1)).setAccessKey(anyString());
|
||||
verify(bucketToUpdate, times(1)).setSecretKey(anyString());
|
||||
verify(bucketDao, times(1)).update(9L, bucketToUpdate);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateUserExists() {
|
||||
String hsUserId = "user1";
|
||||
String hsGroupId = "group1";
|
||||
when(accountDao.findById(TEST_ACCOUNT_ID)).thenReturn(account);
|
||||
when(account.getDomainId()).thenReturn(TEST_DOMAIN_ID);
|
||||
when(account.getUuid()).thenReturn(hsUserId);
|
||||
when(domainDao.findById(TEST_DOMAIN_ID)).thenReturn(domain);
|
||||
when(domain.getUuid()).thenReturn(hsGroupId);
|
||||
|
||||
doReturn(cloudianClient).when(cloudianHyperStoreObjectStoreDriverImpl).getCloudianClientByStoreId(TEST_STORE_ID);
|
||||
|
||||
// Setup the user/group as existing and active
|
||||
CloudianUser user = mock(CloudianUser.class);
|
||||
CloudianGroup group = mock(CloudianGroup.class);
|
||||
when(user.getActive()).thenReturn(true);
|
||||
when(group.getActive()).thenReturn(true);
|
||||
when(cloudianClient.listUser(hsUserId, hsGroupId)).thenReturn(user);
|
||||
when(cloudianClient.listGroup(hsGroupId)).thenReturn(group);
|
||||
|
||||
// Setup the HS Credential to match known Root credential
|
||||
CloudianCredential credential = new CloudianCredential();
|
||||
credential.setAccessKey(TEST_ROOT_AK);
|
||||
credential.setSecretKey(TEST_ROOT_SK);
|
||||
credential.setActive(true);
|
||||
credential.setCreateDate(new Date(1L));
|
||||
List<CloudianCredential> credentials = new ArrayList<CloudianCredential>();
|
||||
credentials.add(credential);
|
||||
when(cloudianClient.listCredentials(hsUserId, hsGroupId)).thenReturn(credentials);
|
||||
|
||||
// Setup IAM to return 2 credentials, one that matches and one that doesn't
|
||||
doReturn(iamClient).when(cloudianHyperStoreObjectStoreDriverImpl).getIAMClientByStoreId(TEST_STORE_ID, credential);
|
||||
ListAccessKeysResult listAccessKeyResult = mock(ListAccessKeysResult.class);
|
||||
List<AccessKeyMetadata> listAccessKeyMetadata = new ArrayList<AccessKeyMetadata>();
|
||||
AccessKeyMetadata accessKeyNoMatch = mock(AccessKeyMetadata.class);
|
||||
when(accessKeyNoMatch.getAccessKeyId()).thenReturn("no_match");
|
||||
AccessKeyMetadata accessKeyMatch = mock(AccessKeyMetadata.class);
|
||||
when(accessKeyMatch.getAccessKeyId()).thenReturn(TEST_IAM_AK);
|
||||
listAccessKeyMetadata.add(accessKeyNoMatch);
|
||||
listAccessKeyMetadata.add(accessKeyMatch);
|
||||
when(listAccessKeyResult.getAccessKeyMetadata()).thenReturn(listAccessKeyMetadata);
|
||||
when(iamClient.listAccessKeys(any())).thenReturn(listAccessKeyResult);
|
||||
|
||||
// Test: The user should exist and nothing needs to be created
|
||||
// or persisted. There is one misc IAM credential to clean up.
|
||||
boolean created = cloudianHyperStoreObjectStoreDriverImpl.createUser(TEST_ACCOUNT_ID, TEST_STORE_ID);
|
||||
assertTrue(created);
|
||||
|
||||
// THe No HyperStore user, group or credentials were created.
|
||||
verify(cloudianClient, never()).addGroup(any(CloudianGroup.class));
|
||||
verify(cloudianClient, never()).addUser(any(CloudianUser.class));
|
||||
verify(cloudianClient, never()).createCredential(hsUserId, hsGroupId);
|
||||
|
||||
// List access keys finds 2 do deletes 1 that doesn't match.
|
||||
verify(iamClient, times(1)).listAccessKeys(any());
|
||||
verify(iamClient, times(1)).deleteAccessKey(any());
|
||||
|
||||
// And we don't create anything IAM related either.
|
||||
verify(iamClient, never()).createUser(any());
|
||||
verify(iamClient, never()).putUserPolicy(any());
|
||||
verify(iamClient, never()).createAccessKey(any());
|
||||
|
||||
// Nothing needs to be persisted.
|
||||
verify(accountDetailsDao, never()).persist(anyLong(), anyMap());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateUserDisabledUserExists() {
|
||||
String hsUserId = "user1";
|
||||
String hsGroupId = "group1";
|
||||
when(accountDao.findById(TEST_ACCOUNT_ID)).thenReturn(account);
|
||||
when(account.getDomainId()).thenReturn(TEST_DOMAIN_ID);
|
||||
when(account.getUuid()).thenReturn(hsUserId);
|
||||
when(domainDao.findById(TEST_DOMAIN_ID)).thenReturn(domain);
|
||||
when(domain.getUuid()).thenReturn(hsGroupId);
|
||||
|
||||
doReturn(cloudianClient).when(cloudianHyperStoreObjectStoreDriverImpl).getCloudianClientByStoreId(TEST_STORE_ID);
|
||||
|
||||
// Setup the user to be found but inactive.
|
||||
CloudianUser user = mock(CloudianUser.class);
|
||||
when(user.getActive()).thenReturn(false);
|
||||
when(cloudianClient.listUser(hsUserId, hsGroupId)).thenReturn(user);
|
||||
|
||||
// Test: user exists but is disabled. This condition requires HyperStore administrator action.
|
||||
CloudRuntimeException thrown = assertThrows(CloudRuntimeException.class, () -> cloudianHyperStoreObjectStoreDriverImpl.createUser(TEST_ACCOUNT_ID, TEST_STORE_ID));
|
||||
assertTrue(thrown.getMessage().contains("is Disabled. Consult"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateUserDisabledGroupExists() {
|
||||
String hsUserId = "user1";
|
||||
String hsGroupId = "group1";
|
||||
when(accountDao.findById(TEST_ACCOUNT_ID)).thenReturn(account);
|
||||
when(account.getDomainId()).thenReturn(TEST_DOMAIN_ID);
|
||||
when(account.getUuid()).thenReturn(hsUserId);
|
||||
when(domainDao.findById(TEST_DOMAIN_ID)).thenReturn(domain);
|
||||
when(domain.getUuid()).thenReturn(hsGroupId);
|
||||
|
||||
doReturn(cloudianClient).when(cloudianHyperStoreObjectStoreDriverImpl).getCloudianClientByStoreId(TEST_STORE_ID);
|
||||
|
||||
// Setup the user to not be found so that we check for a group
|
||||
when(cloudianClient.listUser(hsUserId, hsGroupId)).thenReturn(null);
|
||||
CloudianGroup group = mock(CloudianGroup.class);
|
||||
when(group.getActive()).thenReturn(false);
|
||||
when(cloudianClient.listGroup(hsGroupId)).thenReturn(group);
|
||||
|
||||
// Test: user does not exist, check if group exists, it does but is marked disabled.
|
||||
CloudRuntimeException thrown = assertThrows(CloudRuntimeException.class, () -> cloudianHyperStoreObjectStoreDriverImpl.createUser(TEST_ACCOUNT_ID, TEST_STORE_ID));
|
||||
assertTrue(thrown.getMessage().contains(String.format("The group %s is Disabled. Consult", hsGroupId)));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListBuckets() {
|
||||
Map<String, Long> bucketUsageMap = new HashMap<String, Long>();
|
||||
bucketUsageMap.put("b1", 1L);
|
||||
bucketUsageMap.put("b2", 2L);
|
||||
when(cloudianHyperStoreObjectStoreDriverImpl.getAllBucketsUsage(anyLong())).thenReturn(bucketUsageMap);
|
||||
|
||||
List<Bucket> bucketList = cloudianHyperStoreObjectStoreDriverImpl.listBuckets(TEST_STORE_ID);
|
||||
assertNotNull(bucketList);
|
||||
assertEquals(2, bucketList.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteBucket() throws Exception {
|
||||
doReturn(s3Client).when(cloudianHyperStoreObjectStoreDriverImpl).getS3ClientByBucketAndStore(any(), anyLong());
|
||||
BucketTO bucket = mock(BucketTO.class);
|
||||
when(bucket.getName()).thenReturn(TEST_BUCKET_NAME);
|
||||
boolean deleted = cloudianHyperStoreObjectStoreDriverImpl.deleteBucket(bucket, TEST_STORE_ID);
|
||||
assertTrue(deleted);
|
||||
verify(s3Client, times(1)).deleteBucket(TEST_BUCKET_NAME);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetBucketPolicyPrivate() throws Exception {
|
||||
doReturn(s3Client).when(cloudianHyperStoreObjectStoreDriverImpl).getS3ClientByBucketAndStore(any(), anyLong());
|
||||
BucketTO bucket = mock(BucketTO.class);
|
||||
when(bucket.getName()).thenReturn(TEST_BUCKET_NAME);
|
||||
cloudianHyperStoreObjectStoreDriverImpl.setBucketPolicy(bucket, "private", TEST_STORE_ID);
|
||||
// private policy is equivalent to deleting any bucket policy
|
||||
verify(s3Client, times(1)).deleteBucketPolicy(TEST_BUCKET_NAME);
|
||||
verify(s3Client, never()).setBucketPolicy(anyString(), anyString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetBucketPolicyPublic() throws Exception {
|
||||
doReturn(s3Client).when(cloudianHyperStoreObjectStoreDriverImpl).getS3ClientByBucketAndStore(any(), anyLong());
|
||||
BucketTO bucket = mock(BucketTO.class);
|
||||
when(bucket.getName()).thenReturn(TEST_BUCKET_NAME);
|
||||
cloudianHyperStoreObjectStoreDriverImpl.setBucketPolicy(bucket, "public", TEST_STORE_ID);
|
||||
verify(s3Client, times(1)).setBucketPolicy(anyString(), anyString());
|
||||
verify(s3Client, never()).deleteBucketPolicy(TEST_BUCKET_NAME);
|
||||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
public void testSetBucketQuotaNonZero() {
|
||||
BucketTO bucket = mock(BucketTO.class);
|
||||
when(bucket.getName()).thenReturn(TEST_BUCKET_NAME);
|
||||
// Quota is not implemented by HyperStore. Throws a CloudRuntimeException if not 0.
|
||||
cloudianHyperStoreObjectStoreDriverImpl.setBucketQuota(bucket, TEST_STORE_ID, 5L);
|
||||
}
|
||||
|
||||
public void testSetBucketQuotaToZero() {
|
||||
BucketTO bucket = mock(BucketTO.class);
|
||||
when(bucket.getName()).thenReturn(TEST_BUCKET_NAME);
|
||||
// A zero quota indicates no quota and should be an accepted value.
|
||||
cloudianHyperStoreObjectStoreDriverImpl.setBucketQuota(bucket, TEST_STORE_ID, 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetBucketEncryption() {
|
||||
doReturn(s3Client).when(cloudianHyperStoreObjectStoreDriverImpl).getS3ClientByBucketAndStore(any(), anyLong());
|
||||
BucketTO bucket = mock(BucketTO.class);
|
||||
when(bucket.getName()).thenReturn(TEST_BUCKET_NAME);
|
||||
cloudianHyperStoreObjectStoreDriverImpl.setBucketEncryption(bucket, TEST_STORE_ID);
|
||||
|
||||
// setBucketEncryption should be called once with SSE set.
|
||||
ArgumentCaptor<SetBucketEncryptionRequest> arg = ArgumentCaptor.forClass(SetBucketEncryptionRequest.class);
|
||||
verify(s3Client, times(1)).setBucketEncryption(arg.capture());
|
||||
SetBucketEncryptionRequest request = arg.getValue();
|
||||
List<ServerSideEncryptionRule> rules = request.getServerSideEncryptionConfiguration().getRules();
|
||||
assertEquals(1, rules.size());
|
||||
assertEquals("AES256", rules.get(0).getApplyServerSideEncryptionByDefault().getSSEAlgorithm());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteBucketEncryption() throws Exception {
|
||||
doReturn(s3Client).when(cloudianHyperStoreObjectStoreDriverImpl).getS3ClientByBucketAndStore(any(), anyLong());
|
||||
BucketTO bucket = mock(BucketTO.class);
|
||||
when(bucket.getName()).thenReturn(TEST_BUCKET_NAME);
|
||||
boolean deleted = cloudianHyperStoreObjectStoreDriverImpl.deleteBucketEncryption(bucket, TEST_STORE_ID);
|
||||
assertTrue(deleted);
|
||||
verify(s3Client, times(1)).deleteBucketEncryption(TEST_BUCKET_NAME);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetBucketVersioning() throws Exception {
|
||||
doReturn(s3Client).when(cloudianHyperStoreObjectStoreDriverImpl).getS3ClientByBucketAndStore(any(), anyLong());
|
||||
BucketTO bucket = mock(BucketTO.class);
|
||||
when(bucket.getName()).thenReturn(TEST_BUCKET_NAME);
|
||||
ArgumentCaptor<SetBucketVersioningConfigurationRequest> arg = ArgumentCaptor.forClass(SetBucketVersioningConfigurationRequest.class);
|
||||
|
||||
boolean set = cloudianHyperStoreObjectStoreDriverImpl.setBucketVersioning(bucket, TEST_STORE_ID);
|
||||
assertTrue(set);
|
||||
verify(s3Client, times(1)).setBucketVersioningConfiguration(arg.capture());
|
||||
SetBucketVersioningConfigurationRequest request = arg.getValue();
|
||||
assertEquals(TEST_BUCKET_NAME, request.getBucketName());
|
||||
assertEquals("Enabled", request.getVersioningConfiguration().getStatus());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteBucketVersioning() throws Exception {
|
||||
doReturn(s3Client).when(cloudianHyperStoreObjectStoreDriverImpl).getS3ClientByBucketAndStore(any(), anyLong());
|
||||
BucketTO bucket = mock(BucketTO.class);
|
||||
when(bucket.getName()).thenReturn(TEST_BUCKET_NAME);
|
||||
ArgumentCaptor<SetBucketVersioningConfigurationRequest> arg = ArgumentCaptor.forClass(SetBucketVersioningConfigurationRequest.class);
|
||||
|
||||
boolean unSet = cloudianHyperStoreObjectStoreDriverImpl.deleteBucketVersioning(bucket, TEST_STORE_ID);
|
||||
assertTrue(unSet);
|
||||
verify(s3Client, times(1)).setBucketVersioningConfiguration(arg.capture());
|
||||
SetBucketVersioningConfigurationRequest request = arg.getValue();
|
||||
assertEquals(TEST_BUCKET_NAME, request.getBucketName());
|
||||
assertEquals("Suspended", request.getVersioningConfiguration().getStatus());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCloudianClientByStoreId() {
|
||||
try (MockedStatic<CloudianHyperStoreUtil> mockStatic = Mockito.mockStatic(CloudianHyperStoreUtil.class)) {
|
||||
mockStatic.when(() -> CloudianHyperStoreUtil.getCloudianClient(TEST_ADMIN_URL, TEST_ADMIN_USER_NAME, TEST_ADMIN_PASSWORD, true)).thenReturn(cloudianClient);
|
||||
CloudianClient actualCC = cloudianHyperStoreObjectStoreDriverImpl.getCloudianClientByStoreId(TEST_STORE_ID);
|
||||
assertNotNull(actualCC);
|
||||
assertEquals(cloudianClient, actualCC);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetS3ClientByBucketAndStore() {
|
||||
// Prepare Buckets the store knows about.
|
||||
BucketVO b1 = new BucketVO(TEST_ACCOUNT_ID, 1L, TEST_STORE_ID, "b1", null, false, false, false, null);
|
||||
BucketVO b2 = new BucketVO(TEST_ACCOUNT_ID, 1L, TEST_STORE_ID, "b2", null, false, false, false, null);
|
||||
BucketVO b3 = new BucketVO(TEST_ACCOUNT_ID, 2L, TEST_STORE_ID, TEST_BUCKET_NAME, null, false, false, false, null);
|
||||
BucketVO b4 = new BucketVO(TEST_ACCOUNT_ID, 2L, TEST_STORE_ID, "b4", null, false, false, false, null);
|
||||
List<BucketVO> BucketList = new ArrayList<BucketVO>();
|
||||
BucketList.add(b1); // b1 owned by domain 1, exists
|
||||
BucketList.add(b2); // b2 owned by domain 1, exists
|
||||
BucketList.add(b3); // b3 owned by domain 2, exists - our TEST BUCKET
|
||||
BucketList.add(b4); // b4 owned by domain 2, exists
|
||||
when(bucketDao.listByObjectStoreId(TEST_STORE_ID)).thenReturn(BucketList);
|
||||
|
||||
AccountDetailVO accessKeyDetail = mock(AccountDetailVO.class);
|
||||
when(accessKeyDetail.getValue()).thenReturn(TEST_ROOT_AK);
|
||||
when(accountDetailsDao.findDetail(TEST_ACCOUNT_ID, CloudianHyperStoreUtil.KEY_ROOT_ACCESS_KEY)).thenReturn(accessKeyDetail);
|
||||
AccountDetailVO secretKeyDetail = mock(AccountDetailVO.class);
|
||||
when(secretKeyDetail.getValue()).thenReturn(TEST_ROOT_SK);
|
||||
when(accountDetailsDao.findDetail(TEST_ACCOUNT_ID, CloudianHyperStoreUtil.KEY_ROOT_SECRET_KEY)).thenReturn(secretKeyDetail);
|
||||
|
||||
BucketTO bucket = mock(BucketTO.class);
|
||||
when(bucket.getName()).thenReturn(TEST_BUCKET_NAME);
|
||||
|
||||
try (MockedStatic<CloudianHyperStoreUtil> mockStatic = Mockito.mockStatic(CloudianHyperStoreUtil.class)) {
|
||||
mockStatic.when(() -> CloudianHyperStoreUtil.getS3Client(TEST_S3_URL, TEST_ROOT_AK, TEST_ROOT_SK)).thenReturn(s3Client);
|
||||
AmazonS3 actualS3Client = cloudianHyperStoreObjectStoreDriverImpl.getS3ClientByBucketAndStore(bucket, TEST_STORE_ID);
|
||||
assertNotNull(actualS3Client);
|
||||
assertEquals(s3Client, actualS3Client);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetS3ClientByBucketAndStoreNoMatch() {
|
||||
// Prepare Buckets the store knows about.
|
||||
BucketVO b1 = new BucketVO(TEST_ACCOUNT_ID, 1L, TEST_STORE_ID, "b1", null, false, false, false, null);
|
||||
List<BucketVO> BucketList = new ArrayList<BucketVO>();
|
||||
BucketList.add(b1); // b1 owned by domain 1, exists
|
||||
when(bucketDao.listByObjectStoreId(TEST_STORE_ID)).thenReturn(BucketList);
|
||||
|
||||
// The test bucket name won't match anything prepared above
|
||||
BucketTO bucket = mock(BucketTO.class);
|
||||
when(bucket.getName()).thenReturn(TEST_BUCKET_NAME);
|
||||
|
||||
CloudRuntimeException thrown = assertThrows(CloudRuntimeException.class, () -> cloudianHyperStoreObjectStoreDriverImpl.getS3ClientByBucketAndStore(bucket, TEST_STORE_ID));
|
||||
assertTrue(thrown.getMessage().contains("not found"));
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,231 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package org.apache.cloudstack.storage.datastore.lifecycle;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertThrows;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.ArgumentMatchers.anyBoolean;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.ArgumentMatchers.anyMap;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.cloudstack.api.ApiErrorCode;
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianClient;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.util.CloudianHyperStoreUtil;
|
||||
import org.apache.cloudstack.storage.object.ObjectStoreEntity;
|
||||
import org.apache.cloudstack.storage.object.datastore.ObjectStoreHelper;
|
||||
import org.apache.cloudstack.storage.object.datastore.ObjectStoreProviderManager;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import com.amazonaws.AmazonServiceException;
|
||||
import com.amazonaws.services.identitymanagement.AmazonIdentityManagement;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class CloudianHyperStoreObjectStoreLifeCycleImplTest {
|
||||
|
||||
@Spy
|
||||
CloudianHyperStoreObjectStoreLifeCycleImpl cloudianHyperStoreObjectStoreLifeCycleImpl = new CloudianHyperStoreObjectStoreLifeCycleImpl();
|
||||
|
||||
@Mock
|
||||
CloudianClient cloudianClient;
|
||||
@Mock
|
||||
AmazonS3 s3Client;
|
||||
@Mock
|
||||
AmazonIdentityManagement iamClient;
|
||||
@Mock
|
||||
ObjectStoreHelper objectStoreHelper;
|
||||
@Mock
|
||||
ObjectStoreProviderManager objectStoreMgr;
|
||||
@Mock
|
||||
ObjectStoreVO objectStoreVo;
|
||||
@Mock
|
||||
ObjectStoreEntity objectStoreEntity;
|
||||
|
||||
static String TEST_STORE_NAME = "testStore";
|
||||
static String TEST_ADMIN_URL = "https://admin-service:19443";
|
||||
static String TEST_PROVIDER_NAME = "Cloudian HyperStore";
|
||||
static String TEST_ADMIN_USERNAME = "test_admin";
|
||||
static String TEST_ADMIN_PASSWORD = "test_pass";
|
||||
static String TEST_VALIDATE_SSL = "false";
|
||||
static String TEST_S3_URL = "https://s3-endpoint";
|
||||
static String TEST_IAM_URL = "https://iam-endpoint";
|
||||
|
||||
Map<String, String> guiDetailMap;
|
||||
Map<String, Object> guiDataStoreMap;
|
||||
|
||||
MockedStatic<CloudianHyperStoreUtil> mockStatic;
|
||||
|
||||
private AutoCloseable closeable;
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
closeable = MockitoAnnotations.openMocks(this);
|
||||
|
||||
mockStatic = Mockito.mockStatic(CloudianHyperStoreUtil.class);
|
||||
|
||||
cloudianHyperStoreObjectStoreLifeCycleImpl.objectStoreHelper = objectStoreHelper;
|
||||
cloudianHyperStoreObjectStoreLifeCycleImpl.objectStoreMgr = objectStoreMgr;
|
||||
|
||||
guiDetailMap = new HashMap<String, String>();
|
||||
guiDetailMap.put("accesskey", TEST_ADMIN_USERNAME);
|
||||
guiDetailMap.put("secretkey", TEST_ADMIN_PASSWORD);
|
||||
guiDetailMap.put("validateSSL", TEST_VALIDATE_SSL);
|
||||
guiDetailMap.put("s3Url", TEST_S3_URL);
|
||||
guiDetailMap.put("iamUrl", TEST_IAM_URL);
|
||||
guiDataStoreMap = new HashMap<String, Object>();
|
||||
guiDataStoreMap.put("name", TEST_STORE_NAME);
|
||||
guiDataStoreMap.put("url", TEST_ADMIN_URL);
|
||||
guiDataStoreMap.put("providerName", TEST_PROVIDER_NAME);
|
||||
guiDataStoreMap.put("details", guiDetailMap);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
mockStatic.close();
|
||||
closeable.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitializeValidation() {
|
||||
mockStatic.when(() -> CloudianHyperStoreUtil.getCloudianClient(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(cloudianClient);
|
||||
mockStatic.when(() -> CloudianHyperStoreUtil.getS3Client(anyString(), anyString(), anyString())).thenReturn(s3Client);
|
||||
mockStatic.when(() -> CloudianHyperStoreUtil.getIAMClient(anyString(), anyString(), anyString())).thenReturn(iamClient);
|
||||
// Ensure real validation methods are called (as everything was mocked). These ones we need.
|
||||
mockStatic.when(() -> CloudianHyperStoreUtil.validateS3Url(anyString())).thenCallRealMethod();
|
||||
mockStatic.when(() -> CloudianHyperStoreUtil.validateIAMUrl(anyString())).thenCallRealMethod();
|
||||
|
||||
// Admin, S3 and IAM will be invoked to validate the urls/connectivity
|
||||
when(cloudianClient.getServerVersion()).thenReturn("Test Version");
|
||||
// S3 and IAM validation is done with an unknown key.
|
||||
AmazonServiceException ase = new AmazonServiceException("Test Amazon Service Exception");
|
||||
ase.setErrorCode("InvalidAccessKeyId");
|
||||
when(s3Client.listBuckets()).thenThrow(ase);
|
||||
when(iamClient.listAccessKeys()).thenThrow(ase);
|
||||
|
||||
when(objectStoreVo.getId()).thenReturn(99L);
|
||||
when(objectStoreHelper.createObjectStore(anyMap(), anyMap())).thenReturn(objectStoreVo);
|
||||
when(objectStoreMgr.getObjectStore(anyLong())).thenReturn(objectStoreEntity);
|
||||
|
||||
// Test initialization
|
||||
DataStore ds = cloudianHyperStoreObjectStoreLifeCycleImpl.initialize(guiDataStoreMap);
|
||||
assertNotNull(ds);
|
||||
|
||||
// Verify everything was called to test the connections
|
||||
verify(cloudianClient, times(1)).getServerVersion();
|
||||
verify(s3Client, times(1)).listBuckets();
|
||||
verify(iamClient, times(1)).listAccessKeys();
|
||||
|
||||
// Validate the store details were propagated correctly.
|
||||
ArgumentCaptor<Map<String,Object>> paramsArg = ArgumentCaptor.forClass((Class<Map<String, Object>>) (Class<?>) Map.class);
|
||||
ArgumentCaptor<Map<String,String>> detailsArg = ArgumentCaptor.forClass((Class<Map<String, String>>) (Class<?>) Map.class);
|
||||
verify(objectStoreHelper, times(1)).createObjectStore(paramsArg.capture(), detailsArg.capture());
|
||||
Map<String, Object> updatedParams = paramsArg.getValue();
|
||||
assertEquals(3, updatedParams.size());
|
||||
assertEquals(TEST_STORE_NAME, updatedParams.get(CloudianHyperStoreUtil.STORE_KEY_NAME));
|
||||
assertEquals(TEST_ADMIN_URL, updatedParams.get(CloudianHyperStoreUtil.STORE_KEY_URL));
|
||||
assertEquals(TEST_PROVIDER_NAME, updatedParams.get(CloudianHyperStoreUtil.STORE_KEY_PROVIDER_NAME));
|
||||
Map<String, String> updatedDetails = detailsArg.getValue();
|
||||
assertEquals(5, updatedDetails.size());
|
||||
assertEquals(TEST_ADMIN_USERNAME, updatedDetails.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_USER_NAME));
|
||||
assertEquals(TEST_ADMIN_PASSWORD, updatedDetails.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_PASSWORD));
|
||||
assertEquals(TEST_VALIDATE_SSL, updatedDetails.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_VALIDATE_SSL));
|
||||
assertEquals(TEST_S3_URL, updatedDetails.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_S3_URL));
|
||||
assertEquals(TEST_IAM_URL, updatedDetails.get(CloudianHyperStoreUtil.STORE_DETAILS_KEY_IAM_URL));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitializeEmptyMap() {
|
||||
// Pass an empty configuration map. No URL, name, details etc.
|
||||
guiDataStoreMap.clear();
|
||||
|
||||
// Test initialization - should complain about providerName not matching
|
||||
CloudRuntimeException thrown = assertThrows(CloudRuntimeException.class, () -> cloudianHyperStoreObjectStoreLifeCycleImpl.initialize(guiDataStoreMap));
|
||||
assertTrue(thrown.getMessage().contains("providerName"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitializeUnexpectedProviderName() {
|
||||
// Use a bad provider name
|
||||
guiDataStoreMap.replace("providerName", "bad provider name");
|
||||
|
||||
// Test initialization - should complain about providerName not matching
|
||||
CloudRuntimeException thrown = assertThrows(CloudRuntimeException.class, () -> cloudianHyperStoreObjectStoreLifeCycleImpl.initialize(guiDataStoreMap));
|
||||
assertTrue(thrown.getMessage().contains("Unexpected providerName"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitializeMissingDetails() {
|
||||
// Don't pass in the details map
|
||||
guiDataStoreMap.remove("details");
|
||||
|
||||
// Test initialization - should complain about details
|
||||
CloudRuntimeException thrown = assertThrows(CloudRuntimeException.class, () -> cloudianHyperStoreObjectStoreLifeCycleImpl.initialize(guiDataStoreMap));
|
||||
assertTrue(thrown.getMessage().contains("details"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitializeBadURL() {
|
||||
// Admin connectivity is done first. As everything in Util is mocked, this time we use real implementation.
|
||||
mockStatic.when(() -> CloudianHyperStoreUtil.getCloudianClient(anyString(), anyString(), anyString(), anyBoolean())).thenCallRealMethod();
|
||||
|
||||
// Override the URL for this test
|
||||
guiDataStoreMap.put("url", "bad_url");
|
||||
|
||||
// Test initialization
|
||||
CloudRuntimeException thrown = assertThrows(CloudRuntimeException.class, () -> cloudianHyperStoreObjectStoreLifeCycleImpl.initialize(guiDataStoreMap));
|
||||
assertEquals(MalformedURLException.class, thrown.getCause().getClass());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitializeBadCredentials() {
|
||||
// Admin connectivity is done first.
|
||||
mockStatic.when(() -> CloudianHyperStoreUtil.getCloudianClient(anyString(), anyString(), anyString(), anyBoolean())).thenReturn(cloudianClient);
|
||||
ServerApiException sae = new ServerApiException(ApiErrorCode.UNAUTHORIZED, "bad credentials");
|
||||
when(cloudianClient.getServerVersion()).thenThrow(sae);
|
||||
|
||||
// Test initialization
|
||||
ServerApiException thrown = assertThrows(ServerApiException.class, () -> cloudianHyperStoreObjectStoreLifeCycleImpl.initialize(guiDataStoreMap));
|
||||
assertEquals(ApiErrorCode.UNAUTHORIZED, thrown.getErrorCode());
|
||||
verify(cloudianClient, times(1)).getServerVersion();
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package org.apache.cloudstack.storage.datastore.provider;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider.DataStoreProviderType;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class CloudianHyperStoreObjectStoreProviderImplTest {
|
||||
|
||||
private CloudianHyperStoreObjectStoreProviderImpl cloudianHyperStoreObjectStoreProviderImpl;
|
||||
|
||||
private AutoCloseable closeable;
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
closeable = MockitoAnnotations.openMocks(this);
|
||||
cloudianHyperStoreObjectStoreProviderImpl = new CloudianHyperStoreObjectStoreProviderImpl();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
closeable.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetName() {
|
||||
String actualName = cloudianHyperStoreObjectStoreProviderImpl.getName();
|
||||
assertEquals("Cloudian HyperStore", actualName);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetTypes() {
|
||||
Set<DataStoreProviderType> types = cloudianHyperStoreObjectStoreProviderImpl.getTypes();
|
||||
assertEquals(1, types.size());
|
||||
assertEquals("OBJECT", types.toArray()[0].toString());
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,227 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package org.apache.cloudstack.storage.datastore.util;
|
||||
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.aResponse;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.get;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.post;
|
||||
import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertThrows;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.ArgumentMatchers.anyBoolean;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
|
||||
import org.apache.cloudstack.api.ApiErrorCode;
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.cloudian.client.CloudianClient;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.github.tomakehurst.wiremock.junit.WireMockRule;
|
||||
|
||||
public class CloudianHyperStoreUtilTest {
|
||||
private final int port = 18081;
|
||||
|
||||
@Rule
|
||||
public WireMockRule wireMockRule = new WireMockRule(port);
|
||||
|
||||
private AutoCloseable closeable;
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
closeable = MockitoAnnotations.openMocks(this);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
closeable.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCloudianClientBadUrl() {
|
||||
String url = "bad://bad-url";
|
||||
CloudRuntimeException thrown = assertThrows(CloudRuntimeException.class, () -> CloudianHyperStoreUtil.getCloudianClient(url, "", "", false));
|
||||
assertNotNull(thrown);
|
||||
assertTrue(thrown.getMessage().contains("unknown protocol"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCloudianClient() {
|
||||
String url = "https://localhost:98765";
|
||||
CloudianClient cc = CloudianHyperStoreUtil.getCloudianClient(url, "", "", false);
|
||||
assertNotNull(cc);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCloudianClientNoPort() {
|
||||
String url = "https://localhost";
|
||||
CloudianClient cc = CloudianHyperStoreUtil.getCloudianClient(url, "", "", false);
|
||||
assertNotNull(cc);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCloudianClientToGetServerVersion() {
|
||||
final String expect = "8.1 Compiled: 2023-11-11 16:30";
|
||||
wireMockRule.stubFor(get(urlEqualTo("/system/version"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withBody(expect)));
|
||||
|
||||
// Get a connection and try using it
|
||||
String url = String.format("http://localhost:%d", port);
|
||||
CloudianClient cc = CloudianHyperStoreUtil.getCloudianClient(url, "u", "p", false);
|
||||
String version = cc.getServerVersion();
|
||||
assertEquals(expect, version);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCloudianClientShortenedTimeout() {
|
||||
// Response delayed 3 seconds. We should never get it.
|
||||
final String expect = "8.1 Compiled: 2023-11-11 16:30";
|
||||
wireMockRule.stubFor(get(urlEqualTo("/system/version"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(200)
|
||||
.withFixedDelay(3000) // 3 second delay.
|
||||
.withBody(expect)));
|
||||
|
||||
try (MockedStatic<CloudianHyperStoreUtil> mockStatic = Mockito.mockStatic(CloudianHyperStoreUtil.class)) {
|
||||
// Force a shorter 1 second timeout for testing so as not to hold up unit tests.
|
||||
mockStatic.when(() -> CloudianHyperStoreUtil.getAdminTimeoutSeconds()).thenReturn(1);
|
||||
mockStatic.when(() -> CloudianHyperStoreUtil.getCloudianClient(anyString(), anyString(), anyString(), anyBoolean())).thenCallRealMethod();
|
||||
|
||||
// Get a connection and try using it but it should timeout
|
||||
String url = String.format("http://localhost:%d", port);
|
||||
CloudianClient cc = CloudianHyperStoreUtil.getCloudianClient(url, "u", "p", false);
|
||||
long before = System.currentTimeMillis();
|
||||
ServerApiException thrown = assertThrows(ServerApiException.class, () -> cc.getServerVersion());
|
||||
long after = System.currentTimeMillis();
|
||||
assertNotNull(thrown);
|
||||
assertEquals(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, thrown.getErrorCode());
|
||||
assertTrue((after - before) >= 1000); // should timeout after 1 second.
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateS3UrlGood() {
|
||||
// Mock an AWS S3 invalid access key response.
|
||||
StringBuilder ERR_XML = new StringBuilder("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
|
||||
ERR_XML.append("<Error>\n");
|
||||
ERR_XML.append(" <Code>InvalidAccessKeyId</Code>\n");
|
||||
ERR_XML.append(" <Message>The AWS Access Key Id you provided does not exist in our records.</Message>\n");
|
||||
ERR_XML.append(" <AWSAccessKeyId>unknown</AWSAccessKeyId>\n");
|
||||
ERR_XML.append(" <HostId>12345=</HostId>\n");
|
||||
ERR_XML.append("</Error>\n");
|
||||
|
||||
wireMockRule.stubFor(get(urlEqualTo("/"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(403)
|
||||
.withHeader("content-type", "application/xml")
|
||||
.withBody(ERR_XML.toString())));
|
||||
|
||||
// Test: validates the AmazonS3 client returned by CloudianHyperStoreUtil.getS3Client()
|
||||
// which is called indirectly via the validateS3Url() method can connect to the
|
||||
// remote port and handles the access key error as the expected s3 response.
|
||||
String url = String.format("http://localhost:%d", port);
|
||||
CloudianHyperStoreUtil.validateS3Url(url);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateS3UrlBadRequest() {
|
||||
wireMockRule.stubFor(get(urlEqualTo("/"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withHeader("content-type", "text/html")
|
||||
.withBody("<html><body>400 Bad Request</body></html>")));
|
||||
|
||||
String url = String.format("http://localhost:%d", port);
|
||||
CloudRuntimeException thrown = assertThrows(CloudRuntimeException.class, () -> CloudianHyperStoreUtil.validateS3Url(url));
|
||||
assertNotNull(thrown);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateIAMUrlGoodInvalidClientTokenId() {
|
||||
// Mock an AWS IAM invalid access key response.
|
||||
StringBuilder ERR_XML = new StringBuilder();
|
||||
ERR_XML.append("<ErrorResponse xmlns=\"https://iam.amazonaws.com/doc/2010-05-08/\">\n");
|
||||
ERR_XML.append(" <Error>\n");
|
||||
ERR_XML.append(" <Type>Sender</Type>\n");
|
||||
ERR_XML.append(" <Code>InvalidClientTokenId</Code>\n");
|
||||
ERR_XML.append(" <Message>The security token included in the request is invalid.</Message>\n");
|
||||
ERR_XML.append(" </Error>\n");
|
||||
ERR_XML.append(" <RequestId>a2c47f7e-0196-4b45-af18-a9e99e4d9ed5</RequestId>\n");
|
||||
ERR_XML.append("</ErrorResponse>\n");
|
||||
|
||||
wireMockRule.stubFor(post(urlEqualTo("/"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(403)
|
||||
.withHeader("content-type", "text/xml")
|
||||
.withBody(ERR_XML.toString())));
|
||||
|
||||
// Test: validates the AmazonIdentityManagement client returned by CloudianHyperStoreUtil.getIAMClient()
|
||||
// which is called indirectly via the validateIAMUrl() method can connect to the
|
||||
// remote port and handles the access key error as the expected s3 response.
|
||||
String url = String.format("http://localhost:%d", port);
|
||||
CloudianHyperStoreUtil.validateIAMUrl(url);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateIAMUrlGoodInvalidAccessKeyId() {
|
||||
// Mock HyperStore IAM invalid access key current response.
|
||||
StringBuilder ERR_XML = new StringBuilder("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
|
||||
ERR_XML.append("<ErrorResponse xmlns=\"https://iam.amazonaws.com/doc/2010-05-08/\">\n");
|
||||
ERR_XML.append(" <Error>\n");
|
||||
ERR_XML.append(" <Code>InvalidAccessKeyId</Code>\n");
|
||||
ERR_XML.append(" <Message>The Access Key Id you provided does not exist in our records.</Message>\n");
|
||||
ERR_XML.append(" </Error>\n");
|
||||
ERR_XML.append(" <RequestId>a2c47f7e-0196-4b45-af18-a9e99e4d9ed5</RequestId>\n");
|
||||
ERR_XML.append("</ErrorResponse>\n");
|
||||
|
||||
wireMockRule.stubFor(post(urlEqualTo("/"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(403)
|
||||
.withHeader("content-type", "application/xml;charset=UTF-8")
|
||||
.withBody(ERR_XML.toString())));
|
||||
|
||||
// Test: validates the AmazonIdentityManagement client returned by CloudianHyperStoreUtil.getIAMClient()
|
||||
// which is called indirectly via the validateIAMUrl() method can connect to the
|
||||
// remote port and handles the access key error as the expected s3 response.
|
||||
String url = String.format("http://localhost:%d", port);
|
||||
CloudianHyperStoreUtil.validateIAMUrl(url);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateIAMUrlBadRequest() {
|
||||
wireMockRule.stubFor(post(urlEqualTo("/"))
|
||||
.willReturn(aResponse()
|
||||
.withStatus(400)
|
||||
.withHeader("content-type", "text/html")
|
||||
.withBody("<html><body>400 Bad Request</body></html>")));
|
||||
|
||||
String url = String.format("http://localhost:%d", port);
|
||||
CloudRuntimeException thrown = assertThrows(CloudRuntimeException.class, () -> CloudianHyperStoreUtil.validateIAMUrl(url));
|
||||
assertNotNull(thrown);
|
||||
}
|
||||
}
|
||||
|
|
@ -5,6 +5,12 @@ All notable changes to Linstor CloudStack plugin will be documented in this file
|
|||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [2025-02-21]
|
||||
|
||||
### Fixed
|
||||
|
||||
- Always try to delete cs-...-rst resource before doing a snapshot backup
|
||||
|
||||
## [2025-01-27]
|
||||
|
||||
### Fixed
|
||||
|
|
|
|||
|
|
@ -1119,6 +1119,8 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
String snapshotName,
|
||||
String restoredName) throws ApiException {
|
||||
final String rscGrp = getRscGrp(storagePoolVO);
|
||||
// try to delete -rst resource, could happen if the copy failed and noone deleted it.
|
||||
deleteResourceDefinition(storagePoolVO, restoredName);
|
||||
ResourceDefinitionCreate rdc = createResourceDefinitionCreate(restoredName, rscGrp);
|
||||
api.resourceDefinitionCreate(rdc);
|
||||
|
||||
|
|
@ -1261,19 +1263,22 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
throws ApiException {
|
||||
Answer answer;
|
||||
String restoreName = rscName + "-rst";
|
||||
String devName = restoreResourceFromSnapshot(api, pool, rscName, snapshotName, restoreName);
|
||||
try {
|
||||
String devName = restoreResourceFromSnapshot(api, pool, rscName, snapshotName, restoreName);
|
||||
|
||||
Optional<RemoteHostEndPoint> optEPAny = getLinstorEP(api, restoreName);
|
||||
if (optEPAny.isPresent()) {
|
||||
// patch the src device path to the temporary linstor resource
|
||||
snapshotObject.setPath(devName);
|
||||
origCmd.setSrcTO(snapshotObject.getTO());
|
||||
answer = optEPAny.get().sendMessage(origCmd);
|
||||
} else{
|
||||
answer = new Answer(origCmd, false, "Unable to get matching Linstor endpoint.");
|
||||
Optional<RemoteHostEndPoint> optEPAny = getLinstorEP(api, restoreName);
|
||||
if (optEPAny.isPresent()) {
|
||||
// patch the src device path to the temporary linstor resource
|
||||
snapshotObject.setPath(devName);
|
||||
origCmd.setSrcTO(snapshotObject.getTO());
|
||||
answer = optEPAny.get().sendMessage(origCmd);
|
||||
} else{
|
||||
answer = new Answer(origCmd, false, "Unable to get matching Linstor endpoint.");
|
||||
}
|
||||
} finally {
|
||||
// delete the temporary resource, noop if already gone
|
||||
api.resourceDefinitionDelete(restoreName);
|
||||
}
|
||||
// delete the temporary resource, noop if already gone
|
||||
api.resourceDefinitionDelete(restoreName);
|
||||
return answer;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ import java.util.Map;
|
|||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.storage.dao.SnapshotDetailsVO;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
|
|
@ -110,7 +111,6 @@ import com.cloud.storage.VolumeDetailVO;
|
|||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.SnapshotDao;
|
||||
import com.cloud.storage.dao.SnapshotDetailsDao;
|
||||
import com.cloud.storage.dao.SnapshotDetailsVO;
|
||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
import com.cloud.storage.dao.VMTemplateDetailsDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
|
|
@ -640,57 +640,16 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
SnapshotDataStoreVO snap = getSnapshotImageStoreRef(sinfo.getId(), vinfo.getDataCenterId());
|
||||
SnapshotDetailsVO snapshotDetail = snapshotDetailsDao.findDetail(sinfo.getId(), StorPoolUtil.SP_DELAY_DELETE);
|
||||
if (snapshotDetail != null) {
|
||||
err = String.format("Could not create volume from snapshot due to: %s", resp.getError());
|
||||
err = String.format("Could not create volume from snapshot due to: %s. The snapshot was created with the delayDelete option.", resp.getError());
|
||||
} else if (snap != null && StorPoolStorageAdaptor.getVolumeNameFromPath(snap.getInstallPath(), false) == null) {
|
||||
resp = StorPoolUtil.volumeCreate(srcData.getUuid(), null, size, null, "no", "snapshot", sinfo.getBaseVolume().getMaxIops(), conn);
|
||||
if (resp.getError() == null) {
|
||||
VolumeObjectTO dstTO = (VolumeObjectTO) dstData.getTO();
|
||||
dstTO.setSize(size);
|
||||
dstTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
|
||||
cmd = new StorPoolDownloadTemplateCommand(srcData.getTO(), dstTO, StorPoolHelper.getTimeout(StorPoolHelper.PrimaryStorageDownloadWait, configDao), VirtualMachineManager.ExecuteInSequence.value(), "volume");
|
||||
|
||||
EndPoint ep = selector.select(srcData, dstData);
|
||||
if (ep == null) {
|
||||
err = "No remote endpoint to send command, check if host or ssvm is down?";
|
||||
} else {
|
||||
answer = ep.sendMessage(cmd);
|
||||
}
|
||||
|
||||
if (answer != null && answer.getResult()) {
|
||||
SpApiResponse resp2 = StorPoolUtil.volumeFreeze(StorPoolUtil.getNameFromResponse(resp, true), conn);
|
||||
if (resp2.getError() != null) {
|
||||
err = String.format("Could not freeze Storpool volume %s. Error: %s", srcData.getUuid(), resp2.getError());
|
||||
} else {
|
||||
String name = StorPoolUtil.getNameFromResponse(resp, false);
|
||||
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(sinfo.getId(), sinfo.getUuid());
|
||||
if (snapshotDetails != null) {
|
||||
StorPoolHelper.updateSnapshotDetailsValue(snapshotDetails.getId(), StorPoolUtil.devPath(name), "snapshot");
|
||||
}else {
|
||||
StorPoolHelper.addSnapshotDetails(sinfo.getId(), sinfo.getUuid(), StorPoolUtil.devPath(name), snapshotDetailsDao);
|
||||
}
|
||||
resp = StorPoolUtil.volumeCreate(volumeName, StorPoolUtil.getNameFromResponse(resp, true), size, null, null, "volume", sinfo.getBaseVolume().getMaxIops(), conn);
|
||||
if (resp.getError() == null) {
|
||||
updateStoragePool(dstData.getDataStore().getId(), size);
|
||||
|
||||
VolumeObjectTO to = (VolumeObjectTO) dstData.getTO();
|
||||
to.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
|
||||
to.setSize(size);
|
||||
// successfully downloaded snapshot to primary storage
|
||||
answer = new CopyCmdAnswer(to);
|
||||
StorPoolUtil.spLog("Created volume=%s with uuid=%s from snapshot=%s with uuid=%s", name, to.getUuid(), snapshotName, sinfo.getUuid());
|
||||
|
||||
} else {
|
||||
err = String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, resp.getError());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = answer != null ? answer.getDetails() : "Unknown error while downloading template. Null answer returned.";
|
||||
}
|
||||
SpApiResponse emptyVolumeCreateResp = StorPoolUtil.volumeCreate(volumeName, null, size, null, null, "volume", null, conn);
|
||||
if (emptyVolumeCreateResp.getError() == null) {
|
||||
answer = createVolumeFromSnapshot(srcData, dstData, size, emptyVolumeCreateResp);
|
||||
} else {
|
||||
err = String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, resp.getError());
|
||||
answer = new Answer(cmd, false, String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, emptyVolumeCreateResp.getError()));
|
||||
}
|
||||
} else {
|
||||
err = String.format("The snapshot %s does not exists neither on primary, neither on secondary storage. Cannot create volume from snapshot", snapshotName);
|
||||
answer = new Answer(cmd, false, String.format("The snapshot %s does not exists neither on primary, neither on secondary storage. Cannot create volume from snapshot", snapshotName));
|
||||
}
|
||||
} else {
|
||||
err = String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, resp.getError());
|
||||
|
|
@ -791,22 +750,17 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
err = String.format("Could not create Storpool volume for CS template %s. Error: %s", name, resp.getError());
|
||||
} else {
|
||||
String volumeNameToSnapshot = StorPoolUtil.getNameFromResponse(resp, true);
|
||||
SpApiResponse resp2 = StorPoolUtil.volumeFreeze(volumeNameToSnapshot, conn);
|
||||
if (resp2.getError() != null) {
|
||||
err = String.format("Could not freeze Storpool volume %s. Error: %s", name, resp2.getError());
|
||||
} else {
|
||||
StorPoolUtil.spLog("Storpool snapshot [%s] for a template exists. Creating template on Storpool with name [%s]", tinfo.getUuid(), name);
|
||||
TemplateObjectTO dstTO = (TemplateObjectTO) dstData.getTO();
|
||||
dstTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
|
||||
dstTO.setSize(size);
|
||||
answer = new CopyCmdAnswer(dstTO);
|
||||
}
|
||||
TemplateObjectTO dstTO = (TemplateObjectTO) dstData.getTO();
|
||||
|
||||
answer = createVolumeSnapshot(cmd, size, conn, volumeNameToSnapshot, dstTO);
|
||||
StorPoolUtil.volumeDelete(volumeNameToSnapshot, conn);
|
||||
}
|
||||
} else {
|
||||
resp = StorPoolUtil.volumeCreate(name, null, size, null, "no", "template", null, conn);
|
||||
if (resp.getError() != null) {
|
||||
err = String.format("Could not create Storpool volume for CS template %s. Error: %s", name, resp.getError());
|
||||
} else {
|
||||
String volName = StorPoolUtil.getNameFromResponse(resp, true);
|
||||
TemplateObjectTO dstTO = (TemplateObjectTO)dstData.getTO();
|
||||
dstTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
|
||||
dstTO.setSize(size);
|
||||
|
|
@ -822,19 +776,12 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
if (answer != null && answer.getResult()) {
|
||||
// successfully downloaded template to primary storage
|
||||
SpApiResponse resp2 = StorPoolUtil.volumeFreeze(StorPoolUtil.getNameFromResponse(resp, true), conn);
|
||||
if (resp2.getError() != null) {
|
||||
err = String.format("Could not freeze Storpool volume %s. Error: %s", name, resp2.getError());
|
||||
}
|
||||
answer = createVolumeSnapshot(cmd, size, conn, volName, dstTO);
|
||||
} else {
|
||||
err = answer != null ? answer.getDetails() : "Unknown error while downloading template. Null answer returned.";
|
||||
}
|
||||
}
|
||||
}
|
||||
if (err != null) {
|
||||
resp = StorPoolUtil.volumeDelete(StorPoolUtil.getNameFromResponse(resp, true), conn);
|
||||
if (resp.getError() != null) {
|
||||
logger.warn(String.format("Could not clean-up Storpool volume %s. Error: %s", name, resp.getError()));
|
||||
|
||||
StorPoolUtil.volumeDelete(volName, conn);
|
||||
}
|
||||
}
|
||||
} else if (srcType == DataObjectType.TEMPLATE && dstType == DataObjectType.VOLUME) {
|
||||
|
|
@ -1027,6 +974,42 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
callback.complete(res);
|
||||
}
|
||||
|
||||
private Answer createVolumeSnapshot(StorageSubSystemCommand cmd, Long size, SpConnectionDesc conn,
|
||||
String volName, TemplateObjectTO dstTO) {
|
||||
Answer answer;
|
||||
SpApiResponse resp = StorPoolUtil.volumeSnapshot(volName, dstTO.getUuid(), null, "template", null, conn);
|
||||
if (resp.getError() != null) {
|
||||
answer = new Answer(cmd, false, String.format("Could not snapshot volume. Error: %s", resp.getError()));
|
||||
} else {
|
||||
dstTO.setPath(StorPoolUtil.devPath(
|
||||
StorPoolUtil.getSnapshotNameFromResponse(resp, false, StorPoolUtil.GLOBAL_ID)));
|
||||
dstTO.setSize(size);
|
||||
answer = new CopyCmdAnswer(dstTO);
|
||||
}
|
||||
return answer;
|
||||
}
|
||||
|
||||
private Answer createVolumeFromSnapshot(DataObject srcData, DataObject dstData, final Long size,
|
||||
SpApiResponse emptyVolumeCreateResp) {
|
||||
Answer answer;
|
||||
String name = StorPoolUtil.getNameFromResponse(emptyVolumeCreateResp, false);
|
||||
VolumeObjectTO dstTO = (VolumeObjectTO) dstData.getTO();
|
||||
dstTO.setSize(size);
|
||||
dstTO.setPath(StorPoolUtil.devPath(name));
|
||||
StorageSubSystemCommand cmd = new StorPoolDownloadTemplateCommand(srcData.getTO(), dstTO, StorPoolHelper.getTimeout(StorPoolHelper.PrimaryStorageDownloadWait, configDao), VirtualMachineManager.ExecuteInSequence.value(), "volume");
|
||||
|
||||
EndPoint ep = selector.select(srcData, dstData);
|
||||
if (ep == null) {
|
||||
answer = new Answer(cmd, false, "\"No remote endpoint to send command, check if host or ssvm is down?\"");
|
||||
} else {
|
||||
answer = ep.sendMessage(cmd);
|
||||
}
|
||||
if (answer == null || !answer.getResult()) {
|
||||
answer = new Answer(cmd, false, answer != null ? answer.getDetails() : "Unknown error while downloading template. Null answer returned.");
|
||||
}
|
||||
return answer;
|
||||
}
|
||||
|
||||
private void updateVolumePoolType(VolumeInfo vinfo) {
|
||||
VolumeVO volumeVO = volumeDao.findById(vinfo.getId());
|
||||
volumeVO.setPoolType(StoragePoolType.StorPool);
|
||||
|
|
|
|||
|
|
@ -643,6 +643,12 @@ public class StorPoolUtil {
|
|||
return POST("MultiCluster/VolumeRevert/" + name, json, conn);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use volumeSnapshot instead
|
||||
* @param volumeName
|
||||
* @param conn
|
||||
* @return
|
||||
*/
|
||||
public static SpApiResponse volumeFreeze(final String volumeName, SpConnectionDesc conn) {
|
||||
return POST("MultiCluster/VolumeFreeze/" + volumeName, null, conn);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -219,24 +219,23 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy {
|
|||
} else {
|
||||
answer = (CopyCmdAnswer) ep2.sendMessage(backupSnapshot);
|
||||
if (answer != null && answer.getResult()) {
|
||||
SpApiResponse resSnapshot = StorPoolUtil.volumeFreeze(volumeName, conn);
|
||||
SpApiResponse resSnapshot = StorPoolUtil.volumeSnapshot(volumeName, template.getUuid(), null, "template", null, conn);
|
||||
if (resSnapshot.getError() != null) {
|
||||
logger.debug("Could not snapshot volume [id: {}, name: {}]", snapshot.getId(), snapshot.getName());
|
||||
StorPoolUtil.spLog("Volume freeze failed with error=%s", resSnapshot.getError().getDescr());
|
||||
logger.debug(String.format("Could not snapshot volume with ID={}", snapshot.getId()));
|
||||
StorPoolUtil.spLog("VolumeSnapshot failed with error=%s", resSnapshot.getError().getDescr());
|
||||
err = resSnapshot.getError().getDescr();
|
||||
StorPoolUtil.volumeDelete(volumeName, conn);
|
||||
} else {
|
||||
String templPath = StorPoolUtil.devPath(
|
||||
StorPoolUtil.getSnapshotNameFromResponse(resSnapshot, false, StorPoolUtil.GLOBAL_ID));
|
||||
StorPoolHelper.updateVmStoreTemplate(template.getId(), template.getDataStore().getRole(),
|
||||
StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(res, false)), _templStoreDao);
|
||||
templPath, _templStoreDao);
|
||||
}
|
||||
} else {
|
||||
err = "Could not copy template to secondary " + answer.getResult();
|
||||
StorPoolUtil.volumeDelete(StorPoolUtil.getNameFromResponse(res, true), conn);
|
||||
}
|
||||
}
|
||||
} catch (CloudRuntimeException e) {
|
||||
err = e.getMessage();
|
||||
}
|
||||
StorPoolUtil.volumeDelete(volumeName, conn);
|
||||
}
|
||||
_vmTemplateDetailsDao.persist(new VMTemplateDetailVO(template.getId(), StorPoolUtil.SP_STORAGE_POOL_ID,
|
||||
String.valueOf(vInfo.getDataStore().getId()), false));
|
||||
|
|
|
|||
5
pom.xml
5
pom.xml
|
|
@ -295,6 +295,11 @@
|
|||
<artifactId>aws-java-sdk-core</artifactId>
|
||||
<version>${cs.aws.sdk.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.amazonaws</groupId>
|
||||
<artifactId>aws-java-sdk-iam</artifactId>
|
||||
<version>${cs.aws.sdk.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.amazonaws</groupId>
|
||||
<artifactId>aws-java-sdk-s3</artifactId>
|
||||
|
|
|
|||
|
|
@ -31,6 +31,58 @@ NAS_ADDRESS=""
|
|||
MOUNT_OPTS=""
|
||||
BACKUP_DIR=""
|
||||
DISK_PATHS=""
|
||||
logFile="/var/log/cloudstack/agent/agent.log"
|
||||
|
||||
log() {
|
||||
[[ "$verb" -eq 1 ]] && builtin echo "$@"
|
||||
if [[ "$1" == "-ne" || "$1" == "-e" || "$1" == "-n" ]]; then
|
||||
builtin echo -e "$(date '+%Y-%m-%d %H-%M-%S>')" "${@: 2}" >> "$logFile"
|
||||
else
|
||||
builtin echo "$(date '+%Y-%m-%d %H-%M-%S>')" "$@" >> "$logFile"
|
||||
fi
|
||||
}
|
||||
|
||||
vercomp() {
|
||||
local IFS=.
|
||||
local i ver1=($1) ver2=($3)
|
||||
|
||||
# Compare each segment of the version numbers
|
||||
for ((i=0; i<${#ver1[@]}; i++)); do
|
||||
if [[ -z ${ver2[i]} ]]; then
|
||||
ver2[i]=0
|
||||
fi
|
||||
|
||||
if ((10#${ver1[i]} > 10#${ver2[i]})); then
|
||||
return 0 # Version 1 is greater
|
||||
elif ((10#${ver1[i]} < 10#${ver2[i]})); then
|
||||
return 2 # Version 2 is greater
|
||||
fi
|
||||
done
|
||||
return 0 # Versions are equal
|
||||
}
|
||||
|
||||
sanity_checks() {
|
||||
hvVersion=$(virsh version | grep hypervisor | awk '{print $(NF)}')
|
||||
libvVersion=$(virsh version | grep libvirt | awk '{print $(NF)}' | tail -n 1)
|
||||
apiVersion=$(virsh version | grep API | awk '{print $(NF)}')
|
||||
|
||||
# Compare qemu version (hvVersion >= 4.2.0)
|
||||
vercomp "$hvVersion" ">=" "4.2.0"
|
||||
hvStatus=$?
|
||||
|
||||
# Compare libvirt version (libvVersion >= 7.2.0)
|
||||
vercomp "$libvVersion" ">=" "7.2.0"
|
||||
libvStatus=$?
|
||||
|
||||
if [[ $hvStatus -eq 0 && $libvStatus -eq 0 ]]; then
|
||||
log -ne "Success... [ QEMU: $hvVersion Libvirt: $libvVersion apiVersion: $apiVersion ]"
|
||||
else
|
||||
echo "Failure... Your QEMU version $hvVersion or libvirt version $libvVersion is unsupported. Consider upgrading to the required minimum version of QEMU: 4.2.0 and Libvirt: 7.2.0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log -ne "Environment Sanity Checks successfully passed"
|
||||
}
|
||||
|
||||
### Operation methods ###
|
||||
|
||||
|
|
@ -79,7 +131,7 @@ backup_stopped_vm() {
|
|||
name="root"
|
||||
for disk in $DISK_PATHS; do
|
||||
volUuid="${disk##*/}"
|
||||
qemu-img convert -O qcow2 $disk $dest/$name.$volUuid.qcow2
|
||||
qemu-img convert -O qcow2 $disk $dest/$name.$volUuid.qcow2 | tee -a "$logFile"
|
||||
name="datadisk"
|
||||
done
|
||||
sync
|
||||
|
|
@ -99,7 +151,16 @@ delete_backup() {
|
|||
mount_operation() {
|
||||
mount_point=$(mktemp -d -t csbackup.XXXXX)
|
||||
dest="$mount_point/${BACKUP_DIR}"
|
||||
mount -t ${NAS_TYPE} ${NAS_ADDRESS} ${mount_point} $([[ ! -z "${MOUNT_OPTS}" ]] && echo -o ${MOUNT_OPTS})
|
||||
if [ ${NAS_TYPE} == "cifs" ]; then
|
||||
MOUNT_OPTS="${MOUNT_OPTS},nobrl"
|
||||
fi
|
||||
mount -t ${NAS_TYPE} ${NAS_ADDRESS} ${mount_point} $([[ ! -z "${MOUNT_OPTS}" ]] && echo -o ${MOUNT_OPTS}) | tee -a "$logFile"
|
||||
if [ $? -eq 0 ]; then
|
||||
log -ne "Successfully mounted ${NAS_TYPE} store"
|
||||
else
|
||||
echo "Failed to mount ${NAS_TYPE} store"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function usage {
|
||||
|
|
@ -157,6 +218,9 @@ while [[ $# -gt 0 ]]; do
|
|||
esac
|
||||
done
|
||||
|
||||
# Perform Initial sanity checks
|
||||
sanity_checks
|
||||
|
||||
if [ "$OP" = "backup" ]; then
|
||||
STATE=$(virsh -c qemu:///system list | grep $VM | awk '{print $3}')
|
||||
if [ "$STATE" = "running" ]; then
|
||||
|
|
|
|||
|
|
@ -615,107 +615,81 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
|
|||
|
||||
String msgSubject = null;
|
||||
String msgContent = null;
|
||||
String totalStr;
|
||||
String usedStr;
|
||||
String pctStr = formatPercent(usedCapacity / totalCapacity);
|
||||
String percentual = formatPercent(usedCapacity / totalCapacity);
|
||||
String totalInMB = formatBytesToMegabytes(totalCapacity);
|
||||
String usedInMB = formatBytesToMegabytes(usedCapacity);
|
||||
String totalInString = String.valueOf(totalCapacity);
|
||||
String usedInString = String.valueOf(usedCapacity);
|
||||
AlertType alertType = null;
|
||||
Long podId = pod == null ? null : pod.getId();
|
||||
Long clusterId = cluster == null ? null : cluster.getId();
|
||||
String clusterName = cluster == null ? null : cluster.getName();
|
||||
String podName = pod == null ? null : pod.getName();
|
||||
String dataCenterName = dc.getName();
|
||||
|
||||
switch (capacityType) {
|
||||
|
||||
//Cluster Level
|
||||
case Capacity.CAPACITY_TYPE_MEMORY:
|
||||
msgSubject = "System Alert: Low Available Memory in cluster " + cluster.getName() + " pod " + pod.getName() + " of availability zone " + dc.getName();
|
||||
totalStr = formatBytesToMegabytes(totalCapacity);
|
||||
usedStr = formatBytesToMegabytes(usedCapacity);
|
||||
msgContent = "System memory is low, total: " + totalStr + " MB, used: " + usedStr + " MB (" + pctStr + "%)";
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_MEMORY;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_CPU:
|
||||
msgSubject = "System Alert: Low Unallocated CPU in cluster " + cluster.getName() + " pod " + pod.getName() + " of availability zone " + dc.getName();
|
||||
totalStr = DfWhole.format(totalCapacity);
|
||||
usedStr = DfWhole.format(usedCapacity);
|
||||
msgContent = "Unallocated CPU is low, total: " + totalStr + " Mhz, used: " + usedStr + " Mhz (" + pctStr + "%)";
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_CPU;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_STORAGE:
|
||||
msgSubject = "System Alert: Low Available Storage in cluster " + cluster.getName() + " pod " + pod.getName() + " of availability zone " + dc.getName();
|
||||
totalStr = formatBytesToMegabytes(totalCapacity);
|
||||
usedStr = formatBytesToMegabytes(usedCapacity);
|
||||
msgContent = "Available storage space is low, total: " + totalStr + " MB, used: " + usedStr + " MB (" + pctStr + "%)";
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_STORAGE;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED:
|
||||
msgSubject = "System Alert: Remaining unallocated Storage is low in cluster " + cluster.getName() + " pod " + pod.getName() + " of availability zone " +
|
||||
dc.getName();
|
||||
totalStr = formatBytesToMegabytes(totalCapacity);
|
||||
usedStr = formatBytesToMegabytes(usedCapacity);
|
||||
msgContent = "Unallocated storage space is low, total: " + totalStr + " MB, allocated: " + usedStr + " MB (" + pctStr + "%)";
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_STORAGE_ALLOCATED;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_LOCAL_STORAGE:
|
||||
msgSubject = "System Alert: Remaining unallocated Local Storage is low in cluster " + cluster.getName() + " pod " + pod.getName() + " of availability zone " +
|
||||
dc.getName();
|
||||
totalStr = formatBytesToMegabytes(totalCapacity);
|
||||
usedStr = formatBytesToMegabytes(usedCapacity);
|
||||
msgContent = "Unallocated storage space is low, total: " + totalStr + " MB, allocated: " + usedStr + " MB (" + pctStr + "%)";
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_LOCAL_STORAGE;
|
||||
break;
|
||||
|
||||
//Pod Level
|
||||
case Capacity.CAPACITY_TYPE_PRIVATE_IP:
|
||||
msgSubject = "System Alert: Number of unallocated private IPs is low in pod " + pod.getName() + " of availability zone " + dc.getName();
|
||||
totalStr = Double.toString(totalCapacity);
|
||||
usedStr = Double.toString(usedCapacity);
|
||||
msgContent = "Number of unallocated private IPs is low, total: " + totalStr + ", allocated: " + usedStr + " (" + pctStr + "%)";
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_PRIVATE_IP;
|
||||
break;
|
||||
|
||||
//Zone Level
|
||||
case Capacity.CAPACITY_TYPE_SECONDARY_STORAGE:
|
||||
msgSubject = "System Alert: Low Available Secondary Storage in availability zone " + dc.getName();
|
||||
totalStr = formatBytesToMegabytes(totalCapacity);
|
||||
usedStr = formatBytesToMegabytes(usedCapacity);
|
||||
msgContent = "Available secondary storage space is low, total: " + totalStr + " MB, used: " + usedStr + " MB (" + pctStr + "%)";
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_SECONDARY_STORAGE;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP:
|
||||
msgSubject = "System Alert: Number of unallocated virtual network public IPs is low in availability zone " + dc.getName();
|
||||
totalStr = Double.toString(totalCapacity);
|
||||
usedStr = Double.toString(usedCapacity);
|
||||
msgContent = "Number of unallocated public IPs is low, total: " + totalStr + ", allocated: " + usedStr + " (" + pctStr + "%)";
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_VIRTUAL_NETWORK_PUBLIC_IP;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP:
|
||||
msgSubject = "System Alert: Number of unallocated shared network IPs is low in availability zone " + dc.getName();
|
||||
totalStr = Double.toString(totalCapacity);
|
||||
usedStr = Double.toString(usedCapacity);
|
||||
msgContent = "Number of unallocated shared network IPs is low, total: " + totalStr + ", allocated: " + usedStr + " (" + pctStr + "%)";
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_DIRECT_ATTACHED_PUBLIC_IP;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_VLAN:
|
||||
msgSubject = "System Alert: Number of unallocated VLANs is low in availability zone " + dc.getName();
|
||||
totalStr = Double.toString(totalCapacity);
|
||||
usedStr = Double.toString(usedCapacity);
|
||||
msgContent = "Number of unallocated VLANs is low, total: " + totalStr + ", allocated: " + usedStr + " (" + pctStr + "%)";
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_VLAN;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET:
|
||||
msgSubject = "System Alert: Number of unallocated virtual network guest IPv6 subnets is low in availability zone " + dc.getName();
|
||||
totalStr = Double.toString(totalCapacity);
|
||||
usedStr = Double.toString(usedCapacity);
|
||||
msgContent = "Number of unallocated virtual network guest IPv6 subnets is low, total: " + totalStr + ", allocated: " + usedStr + " (" + pctStr + "%)";
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_MEMORY:
|
||||
msgSubject = String.format("System Alert: Low Available Memory in cluster [%s] pod [%s] of availability zone [%s].", clusterName, podName, dataCenterName);
|
||||
msgContent = String.format("System memory is low, total: %s MB, used: %s MB (%s%%).", totalInMB, usedInMB, percentual);
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_MEMORY;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_CPU:
|
||||
msgSubject = String.format("System Alert: Low Unallocated CPU in cluster [%s] pod [%s] of availability zone [%s].", clusterName, podName, dataCenterName);
|
||||
msgContent = String.format("Unallocated CPU is low, total: %s Mhz, used: %s Mhz (%s%%).", DfWhole.format(totalCapacity), DfWhole.format(usedCapacity), percentual);
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_CPU;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_STORAGE:
|
||||
msgSubject = String.format("System Alert: Low Available Storage in cluster [%s] pod [%s] of availability zone [%s].", clusterName, podName, dataCenterName);
|
||||
msgContent = String.format("Available storage space is low, total: %s MB, used: %s MB (%s%%).", totalInMB, usedInMB, percentual);
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_STORAGE;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED:
|
||||
msgSubject = String.format("System Alert: Remaining unallocated Storage is low in cluster [%s] pod [%s] of availability zone [%s].", clusterName, podName,
|
||||
dataCenterName);
|
||||
msgContent = String.format("Unallocated storage space is low, total: %s MB, allocated: %s MB (%s%%)", totalInMB, usedInMB, percentual);
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_STORAGE_ALLOCATED;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_LOCAL_STORAGE:
|
||||
msgSubject = String.format("System Alert: Remaining unallocated Local Storage is low in cluster [%s] pod [%s] of availability zone [%s].", clusterName, podName,
|
||||
dataCenterName);
|
||||
msgContent = String.format("Unallocated storage space is low, total: %s MB, allocated: %s MB (%s%%)", totalInMB, usedInMB, percentual);
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_LOCAL_STORAGE;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_PRIVATE_IP:
|
||||
msgSubject = String.format("System Alert: Number of unallocated private IPs is low in pod %s of availability zone [%s].", podName, dataCenterName);
|
||||
msgContent = String.format("Number of unallocated private IPs is low, total: %s, allocated: %s (%s%%)", totalInString, usedInString, percentual);
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_PRIVATE_IP;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_SECONDARY_STORAGE:
|
||||
msgSubject = String.format("System Alert: Low Available Secondary Storage in availability zone [%s].", dataCenterName);
|
||||
msgContent = String.format("Available secondary storage space is low, total: %s MB, used: %s MB (%s%%).", totalInMB, usedInMB, percentual);
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_SECONDARY_STORAGE;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP:
|
||||
msgSubject = String.format("System Alert: Number of unallocated virtual network public IPs is low in availability zone [%s].", dataCenterName);
|
||||
msgContent = String.format("Number of unallocated public IPs is low, total: %s, allocated: %s (%s%%).", totalInString, usedInString, percentual);
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_VIRTUAL_NETWORK_PUBLIC_IP;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP:
|
||||
msgSubject = String.format("System Alert: Number of unallocated shared network IPs is low in availability zone [%s].", dataCenterName);
|
||||
msgContent = String.format("Number of unallocated shared network IPs is low, total: %s, allocated: %s (%s%%).", totalInString, usedInString, percentual);
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_DIRECT_ATTACHED_PUBLIC_IP;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_VLAN:
|
||||
msgSubject = String.format("System Alert: Number of unallocated VLANs is low in availability zone [%s].", dataCenterName);
|
||||
msgContent = String.format("Number of unallocated VLANs is low, total: %s, allocated: %s (%s%%).", totalInString, usedInString, percentual);
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_VLAN;
|
||||
break;
|
||||
case Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET:
|
||||
msgSubject = String.format("System Alert: Number of unallocated virtual network guest IPv6 subnets is low in availability zone [%s].", dc.getName());
|
||||
msgContent = String.format("Number of unallocated virtual network guest IPv6 subnets is low, total: [%s], allocated: [%s] (%s%%).", totalInString, usedInString, percentual);
|
||||
alertType = AlertManager.AlertType.ALERT_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET;
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(msgSubject);
|
||||
logger.debug(msgContent);
|
||||
}
|
||||
sendAlert(alertType, dc, pod, cluster, msgSubject, msgContent);
|
||||
logger.debug("Sending alert with subject [{}] and content [{}].", msgSubject, msgContent);
|
||||
sendAlert(alertType, dc.getId(), podId, clusterId, msgSubject, msgContent);
|
||||
} catch (Exception ex) {
|
||||
logger.error("Exception in CapacityChecker", ex);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2137,7 +2137,7 @@ public class ApiDBUtils {
|
|||
for (DiskOfferingJoinVO offering : offerings) {
|
||||
DiskOfferingResponse response = s_diskOfferingJoinDao.newDiskOfferingResponse(offering);
|
||||
if (vmId != null) {
|
||||
response.setSuitableForVm(suitability.get(offering.getId()));
|
||||
response.setSuitableForVm(suitability.getOrDefault(offering.getId(), true));
|
||||
}
|
||||
list.add(response);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5444,7 +5444,6 @@ public class ApiResponseHelper implements ResponseGenerator {
|
|||
response.setAddress(backupRepository.getAddress());
|
||||
response.setProviderName(backupRepository.getProvider());
|
||||
response.setType(backupRepository.getType());
|
||||
response.setMountOptions(backupRepository.getMountOptions());
|
||||
response.setCapacityBytes(backupRepository.getCapacityBytes());
|
||||
response.setObjectName("backuprepository");
|
||||
DataCenter zone = ApiDBUtils.findZoneById(backupRepository.getZoneId());
|
||||
|
|
|
|||
|
|
@ -1787,7 +1787,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
|
|||
}
|
||||
|
||||
Pair<Integer, Integer> interfaceMTUs = validateMtuConfig(publicMtu, privateMtu, zone.getId());
|
||||
mtuCheckForVpcNetwork(vpcId, interfaceMTUs, publicMtu, privateMtu);
|
||||
mtuCheckForVpcNetwork(vpcId, interfaceMTUs, publicMtu);
|
||||
|
||||
Network associatedNetwork = null;
|
||||
if (associatedNetworkId != null) {
|
||||
|
|
@ -2082,7 +2082,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
|
|||
return ntwkOff;
|
||||
}
|
||||
|
||||
protected void mtuCheckForVpcNetwork(Long vpcId, Pair<Integer, Integer> interfaceMTUs, Integer publicMtu, Integer privateMtu) {
|
||||
protected void mtuCheckForVpcNetwork(Long vpcId, Pair<Integer, Integer> interfaceMTUs, Integer publicMtu) {
|
||||
if (vpcId != null && publicMtu != null) {
|
||||
VpcVO vpc = _vpcDao.findById(vpcId);
|
||||
if (vpc == null) {
|
||||
|
|
@ -2090,7 +2090,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
|
|||
}
|
||||
logger.warn(String.format("VPC public MTU already set at VPC creation phase to: %s. Ignoring public MTU " +
|
||||
"passed during VPC network tier creation ", vpc.getPublicMtu()));
|
||||
interfaceMTUs.set(vpc.getPublicMtu(), privateMtu);
|
||||
interfaceMTUs.set(vpc.getPublicMtu(), interfaceMTUs.second());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1840,9 +1840,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||
_hostDetailsDao.update(hostDetail.getId(), hostDetail);
|
||||
} else if (!isUpdateFromHostHealthCheck && hostDetail != null &&
|
||||
Boolean.parseBoolean(hostDetail.getValue()) && resourceEvent == ResourceState.Event.Disable) {
|
||||
logger.info(String.format("The setting %s is enabled but the host %s is manually set into %s state," +
|
||||
logger.info("The setting {} is enabled but {} is manually set into {} state," +
|
||||
"ignoring future auto enabling of the host based on health check results",
|
||||
AgentManager.EnableKVMAutoEnableDisable.key(), host.getName(), resourceEvent));
|
||||
AgentManager.EnableKVMAutoEnableDisable.key(), host, resourceEvent);
|
||||
hostDetail.setValue(Boolean.FALSE.toString());
|
||||
_hostDetailsDao.update(hostDetail.getId(), hostDetail);
|
||||
} else if (hostDetail == null) {
|
||||
|
|
|
|||
|
|
@ -718,10 +718,10 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
|
|||
getDynamicDataFromDB();
|
||||
long interval = (Long) dbStats.get(uptime) - lastUptime;
|
||||
long activity = (Long) dbStats.get(queries) - lastQueries;
|
||||
loadHistory.add(0, Double.valueOf(activity / interval));
|
||||
loadHistory.add(0, interval == 0 ? -1 : Double.valueOf(activity / interval));
|
||||
int maxsize = DATABASE_SERVER_LOAD_HISTORY_RETENTION_NUMBER.value();
|
||||
while (loadHistory.size() > maxsize) {
|
||||
loadHistory.remove(maxsize - 1);
|
||||
loadHistory.remove(maxsize);
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
// pokemon catch to make sure the thread stays running
|
||||
|
|
|
|||
|
|
@ -23,21 +23,12 @@ import java.util.Date;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.TimeZone;
|
||||
import java.util.Timer;
|
||||
import java.util.TimerTask;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import com.amazonaws.util.CollectionUtils;
|
||||
import com.cloud.alert.AlertManager;
|
||||
import com.cloud.configuration.Resource;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.storage.Snapshot;
|
||||
import com.cloud.storage.VolumeApiService;
|
||||
import com.cloud.user.DomainManager;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
import com.cloud.vm.VirtualMachineManager;
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
|
|
@ -68,7 +59,6 @@ import org.apache.cloudstack.backup.dao.BackupDao;
|
|||
import org.apache.cloudstack.backup.dao.BackupOfferingDao;
|
||||
import org.apache.cloudstack.backup.dao.BackupScheduleDao;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.jobs.AsyncJobDispatcher;
|
||||
import org.apache.cloudstack.framework.jobs.AsyncJobManager;
|
||||
|
|
@ -83,8 +73,11 @@ import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToSt
|
|||
import org.apache.commons.lang3.BooleanUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.amazonaws.util.CollectionUtils;
|
||||
import com.cloud.alert.AlertManager;
|
||||
import com.cloud.api.ApiDispatcher;
|
||||
import com.cloud.api.ApiGsonHelper;
|
||||
import com.cloud.configuration.Resource;
|
||||
import com.cloud.dc.DataCenter;
|
||||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.event.ActionEvent;
|
||||
|
|
@ -94,6 +87,7 @@ import com.cloud.event.EventVO;
|
|||
import com.cloud.event.UsageEventUtils;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.exception.PermissionDeniedException;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
|
|
@ -101,13 +95,17 @@ import com.cloud.hypervisor.HypervisorGuru;
|
|||
import com.cloud.hypervisor.HypervisorGuruManager;
|
||||
import com.cloud.projects.Project;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.Snapshot;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.VolumeApiService;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.DiskOfferingDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountManager;
|
||||
import com.cloud.user.AccountService;
|
||||
import com.cloud.user.DomainManager;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import com.cloud.user.User;
|
||||
import com.cloud.utils.DateUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
|
|
@ -126,8 +124,10 @@ import com.cloud.utils.db.TransactionCallbackNoReturn;
|
|||
import com.cloud.utils.db.TransactionLegacy;
|
||||
import com.cloud.utils.db.TransactionStatus;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineManager;
|
||||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
import com.google.gson.Gson;
|
||||
|
|
@ -173,8 +173,6 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager {
|
|||
@Inject
|
||||
private VolumeApiService volumeApiService;
|
||||
@Inject
|
||||
private VolumeOrchestrationService volumeOrchestrationService;
|
||||
@Inject
|
||||
private ResourceLimitService resourceLimitMgr;
|
||||
@Inject
|
||||
private AlertManager alertManager;
|
||||
|
|
@ -514,16 +512,33 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager {
|
|||
|
||||
@Override
|
||||
@ActionEvent(eventType = EventTypes.EVENT_VM_BACKUP_SCHEDULE_DELETE, eventDescription = "deleting VM backup schedule")
|
||||
public boolean deleteBackupSchedule(Long vmId) {
|
||||
final VMInstanceVO vm = findVmById(vmId);
|
||||
validateForZone(vm.getDataCenterId());
|
||||
accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm);
|
||||
|
||||
final BackupSchedule schedule = backupScheduleDao.findByVM(vmId);
|
||||
if (schedule == null) {
|
||||
throw new CloudRuntimeException("VM has no backup schedule defined, no need to delete anything.");
|
||||
public boolean deleteBackupSchedule(DeleteBackupScheduleCmd cmd) {
|
||||
Long vmId = cmd.getVmId();
|
||||
Long id = cmd.getId();
|
||||
if (Objects.isNull(vmId) && Objects.isNull(id)) {
|
||||
throw new InvalidParameterValueException("Either instance ID or ID of backup schedule needs to be specified");
|
||||
}
|
||||
return backupScheduleDao.remove(schedule.getId());
|
||||
if (Objects.nonNull(vmId)) {
|
||||
final VMInstanceVO vm = findVmById(vmId);
|
||||
validateForZone(vm.getDataCenterId());
|
||||
accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm);
|
||||
return deleteAllVMBackupSchedules(vm.getId());
|
||||
} else {
|
||||
final BackupSchedule schedule = backupScheduleDao.findById(id);
|
||||
if (schedule == null) {
|
||||
throw new CloudRuntimeException("Could not find the requested backup schedule.");
|
||||
}
|
||||
return backupScheduleDao.remove(schedule.getId());
|
||||
}
|
||||
}
|
||||
|
||||
private boolean deleteAllVMBackupSchedules(long vmId) {
|
||||
List<BackupScheduleVO> vmBackupSchedules = backupScheduleDao.listByVM(vmId);
|
||||
boolean success = true;
|
||||
for (BackupScheduleVO vmBackupSchedule : vmBackupSchedules) {
|
||||
success = success && backupScheduleDao.remove(vmBackupSchedule.getId());
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
private void postCreateScheduledBackup(Backup.Type backupType, Long vmId) {
|
||||
|
|
@ -740,6 +755,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager {
|
|||
!vm.getState().equals(VirtualMachine.State.Destroyed)) {
|
||||
throw new CloudRuntimeException("Existing VM should be stopped before being restored from backup");
|
||||
}
|
||||
|
||||
// This is done to handle historic backups if any with Veeam / Networker plugins
|
||||
List<Backup.VolumeInfo> backupVolumes = CollectionUtils.isNullOrEmpty(backup.getBackedUpVolumes()) ?
|
||||
vm.getBackupVolumeList() : backup.getBackedUpVolumes();
|
||||
|
|
|
|||
|
|
@ -303,8 +303,15 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic
|
|||
try {
|
||||
List<ObjectStoreVO> objectStores = _objectStoreDao.listObjectStores();
|
||||
for(ObjectStoreVO objectStoreVO: objectStores) {
|
||||
logger.debug("Getting bucket usage for Object Store \"{}\"", objectStoreVO.getName());
|
||||
ObjectStoreEntity objectStore = (ObjectStoreEntity)_dataStoreMgr.getDataStore(objectStoreVO.getId(), DataStoreRole.Object);
|
||||
Map<String, Long> bucketSizes = objectStore.getAllBucketsUsage();
|
||||
Map<String, Long> bucketSizes;
|
||||
try {
|
||||
bucketSizes = objectStore.getAllBucketsUsage();
|
||||
} catch (CloudRuntimeException e) {
|
||||
logger.error(String.format("Failed to get bucket usage for Object Store \"%s\". Skipping this store.", objectStoreVO.getName()), e);
|
||||
continue;
|
||||
}
|
||||
List<BucketVO> buckets = _bucketDao.listByObjectStoreId(objectStoreVO.getId());
|
||||
for(BucketVO bucket : buckets) {
|
||||
Long size = bucketSizes.get(bucket.getName());
|
||||
|
|
|
|||
|
|
@ -555,7 +555,7 @@ public class NetworkServiceImplTest {
|
|||
Mockito.when(vpcVO.getPublicMtu()).thenReturn(vpcMtu);
|
||||
|
||||
Pair<Integer, Integer> updatedMtus = service.validateMtuConfig(publicMtu, privateMtu, zoneId);
|
||||
service.mtuCheckForVpcNetwork(vpcId, updatedMtus, publicMtu, privateMtu);
|
||||
service.mtuCheckForVpcNetwork(vpcId, updatedMtus, publicMtu);
|
||||
Assert.assertEquals(vpcMtu, updatedMtus.first());
|
||||
Assert.assertEquals(privateMtu, updatedMtus.second());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -403,6 +403,8 @@ for example:
|
|||
except IOError as e:
|
||||
msg = "Failed to save management server secret key file %s due to %s, also please check the default umask"%(self.encryptionKeyFile, e.strerror)
|
||||
self.errorAndExit(msg)
|
||||
os.chmod(self.encryptionKeyFile, 0o640)
|
||||
shutil.chown(self.encryptionKeyFile, user=None, group="cloud")
|
||||
|
||||
def formatEncryptResult(value):
|
||||
return 'ENC(%s)'%value
|
||||
|
|
|
|||
|
|
@ -18,14 +18,15 @@
|
|||
import logging
|
||||
import random
|
||||
import time
|
||||
import socket
|
||||
|
||||
# All tests inherit from cloudstackTestCase
|
||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||
|
||||
# Import Integration Libraries
|
||||
# base - contains all resources as entities and defines create, delete, list operations on them
|
||||
from marvin.lib.base import Account, DiskOffering, ServiceOffering, Snapshot, StoragePool, Template, User, \
|
||||
VirtualMachine, Volume
|
||||
from marvin.lib.base import Account, DiskOffering, ServiceOffering, Snapshot, StoragePool, Template, User
|
||||
from marvin.lib.base import VirtualMachine, Volume, VmSnapshot
|
||||
|
||||
# common - commonly used methods for all tests are listed here
|
||||
from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_virtual_machines, \
|
||||
|
|
@ -97,8 +98,7 @@ class TestData:
|
|||
# hypervisor type to test
|
||||
hypervisor_type = kvm
|
||||
|
||||
def __init__(self):
|
||||
linstor_controller_url = "http://10.43.224.8"
|
||||
def __init__(self, linstor_controller_url):
|
||||
self.testdata = {
|
||||
TestData.kvm: {
|
||||
TestData.username: "admin",
|
||||
|
|
@ -197,7 +197,7 @@ class TestData:
|
|||
"resourceGroup": "acs-test-same"
|
||||
}
|
||||
},
|
||||
# Linstor storage pool on different ScaleIO storage instance
|
||||
# Linstor storage pool on different Linstor storage instance
|
||||
TestData.primaryStorageDistinctInstance: {
|
||||
"name": "Linstor-%d" % random.randint(0, 100),
|
||||
TestData.scope: "ZONE",
|
||||
|
|
@ -225,6 +225,44 @@ class TestData:
|
|||
},
|
||||
}
|
||||
|
||||
class ServiceReady:
|
||||
@classmethod
|
||||
def ready(cls, hostname: str, port: int) -> bool:
|
||||
try:
|
||||
s = socket.create_connection((hostname, port), timeout=1)
|
||||
s.close()
|
||||
return True
|
||||
except (ConnectionRefusedError, socket.timeout, OSError):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def wait(
|
||||
cls,
|
||||
hostname,
|
||||
port,
|
||||
wait_interval = 5,
|
||||
timeout = 90,
|
||||
service_name = 'ssh') -> bool:
|
||||
"""
|
||||
Wait until the controller can be reached.
|
||||
:param hostname:
|
||||
:param port: port of the application
|
||||
:param wait_interval:
|
||||
:param timeout: time to wait until exit with False
|
||||
:param service_name: name of the service to wait
|
||||
:return:
|
||||
"""
|
||||
starttime = int(round(time.time() * 1000))
|
||||
while not cls.ready(hostname, port):
|
||||
if starttime + timeout * 1000 < int(round(time.time() * 1000)):
|
||||
raise RuntimeError("{s} {h} cannot be reached.".format(s=service_name, h=hostname))
|
||||
time.sleep(wait_interval)
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def wait_ssh_ready(cls, hostname, wait_interval = 1, timeout = 90):
|
||||
return cls.wait(hostname, 22, wait_interval, timeout, "ssh")
|
||||
|
||||
|
||||
class TestLinstorVolumes(cloudstackTestCase):
|
||||
_volume_vm_id_and_vm_id_do_not_match_err_msg = "The volume's VM ID and the VM's ID do not match."
|
||||
|
|
@ -239,7 +277,11 @@ class TestLinstorVolumes(cloudstackTestCase):
|
|||
cls.apiClient = testclient.getApiClient()
|
||||
cls.configData = testclient.getParsedTestDataConfig()
|
||||
cls.dbConnection = testclient.getDbConnection()
|
||||
cls.testdata = TestData().testdata
|
||||
|
||||
# first host has the linstor controller
|
||||
first_host = list_hosts(cls.apiClient)[0]
|
||||
|
||||
cls.testdata = TestData(first_host.ipaddress).testdata
|
||||
|
||||
# Get Resources from Cloud Infrastructure
|
||||
cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
|
||||
|
|
@ -326,7 +368,8 @@ class TestLinstorVolumes(cloudstackTestCase):
|
|||
serviceofferingid=cls.compute_offering.id,
|
||||
templateid=cls.template.id,
|
||||
domainid=cls.domain.id,
|
||||
startvm=False
|
||||
startvm=False,
|
||||
mode='basic',
|
||||
)
|
||||
|
||||
TestLinstorVolumes._start_vm(cls.virtual_machine)
|
||||
|
|
@ -394,7 +437,8 @@ class TestLinstorVolumes(cloudstackTestCase):
|
|||
serviceofferingid=self.compute_offering.id,
|
||||
templateid=self.template.id,
|
||||
domainid=self.domain.id,
|
||||
startvm=False
|
||||
startvm=False,
|
||||
mode='basic',
|
||||
)
|
||||
|
||||
TestLinstorVolumes._start_vm(test_virtual_machine)
|
||||
|
|
@ -887,8 +931,31 @@ class TestLinstorVolumes(cloudstackTestCase):
|
|||
"Check volume was deleted"
|
||||
)
|
||||
|
||||
@attr(tags=['basic'], required_hardware=False)
|
||||
def test_09_create_snapshot(self):
|
||||
"""Create snapshot of root disk"""
|
||||
self.virtual_machine.stop(self.apiClient)
|
||||
|
||||
volume = list_volumes(
|
||||
self.apiClient,
|
||||
virtualmachineid = self.virtual_machine.id,
|
||||
type = "ROOT",
|
||||
listall = True,
|
||||
)
|
||||
snapshot = Snapshot.create(
|
||||
self.apiClient,
|
||||
volume_id = volume[0].id,
|
||||
account=self.account.name,
|
||||
domainid=self.domain.id,
|
||||
)
|
||||
|
||||
self.assertIsNotNone(snapshot, "Could not create snapshot")
|
||||
|
||||
snapshot.delete(self.apiClient)
|
||||
|
||||
|
||||
@attr(tags=['advanced', 'migration'], required_hardware=False)
|
||||
def test_09_migrate_volume_to_same_instance_pool(self):
|
||||
def test_10_migrate_volume_to_same_instance_pool(self):
|
||||
"""Migrate volume to the same instance pool"""
|
||||
|
||||
if not self.testdata[TestData.migrationTests]:
|
||||
|
|
@ -906,7 +973,8 @@ class TestLinstorVolumes(cloudstackTestCase):
|
|||
serviceofferingid=self.compute_offering.id,
|
||||
templateid=self.template.id,
|
||||
domainid=self.domain.id,
|
||||
startvm=False
|
||||
startvm=False,
|
||||
mode='basic',
|
||||
)
|
||||
|
||||
TestLinstorVolumes._start_vm(test_virtual_machine)
|
||||
|
|
@ -1020,7 +1088,7 @@ class TestLinstorVolumes(cloudstackTestCase):
|
|||
test_virtual_machine.delete(self.apiClient, True)
|
||||
|
||||
@attr(tags=['advanced', 'migration'], required_hardware=False)
|
||||
def test_10_migrate_volume_to_distinct_instance_pool(self):
|
||||
def test_11_migrate_volume_to_distinct_instance_pool(self):
|
||||
"""Migrate volume to distinct instance pool"""
|
||||
|
||||
if not self.testdata[TestData.migrationTests]:
|
||||
|
|
@ -1038,7 +1106,8 @@ class TestLinstorVolumes(cloudstackTestCase):
|
|||
serviceofferingid=self.compute_offering.id,
|
||||
templateid=self.template.id,
|
||||
domainid=self.domain.id,
|
||||
startvm=False
|
||||
startvm=False,
|
||||
mode='basic',
|
||||
)
|
||||
|
||||
TestLinstorVolumes._start_vm(test_virtual_machine)
|
||||
|
|
@ -1151,6 +1220,132 @@ class TestLinstorVolumes(cloudstackTestCase):
|
|||
|
||||
test_virtual_machine.delete(self.apiClient, True)
|
||||
|
||||
@attr(tags=["basic"], required_hardware=False)
|
||||
def test_12_create_vm_snapshots(self):
|
||||
"""Test to create VM snapshots
|
||||
"""
|
||||
vm = TestLinstorVolumes._start_vm(self.virtual_machine)
|
||||
|
||||
try:
|
||||
# Login to VM and write data to file system
|
||||
self.debug("virt: {}".format(vm))
|
||||
ssh_client = self.virtual_machine.get_ssh_client(vm.ipaddress, retries=5)
|
||||
ssh_client.execute("echo 'hello world' > testfile")
|
||||
ssh_client.execute("sync")
|
||||
except Exception as exc:
|
||||
self.fail("SSH failed for Virtual machine {}: {}".format(self.virtual_machine.ssh_ip, exc))
|
||||
|
||||
time.sleep(10)
|
||||
memory_snapshot = False
|
||||
vm_snapshot = VmSnapshot.create(
|
||||
self.apiClient,
|
||||
self.virtual_machine.id,
|
||||
memory_snapshot,
|
||||
"VMSnapshot1",
|
||||
"test snapshot"
|
||||
)
|
||||
self.assertEqual(
|
||||
vm_snapshot.state,
|
||||
"Ready",
|
||||
"Check the snapshot of vm is ready!"
|
||||
)
|
||||
|
||||
@attr(tags=["basic"], required_hardware=False)
|
||||
def test_13_revert_vm_snapshots(self):
|
||||
"""Test to revert VM snapshots
|
||||
"""
|
||||
|
||||
result = None
|
||||
try:
|
||||
ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
|
||||
result = ssh_client.execute("rm -rf testfile")
|
||||
except Exception as exc:
|
||||
self.fail("SSH failed for Virtual machine %s: %s".format(self.virtual_machine.ipaddress, exc))
|
||||
|
||||
if result is not None and "No such file or directory" in str(result):
|
||||
self.fail("testfile not deleted")
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
list_snapshot_response = VmSnapshot.list(
|
||||
self.apiClient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
listall=True)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_snapshot_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
list_snapshot_response,
|
||||
None,
|
||||
"Check if snapshot exists in ListSnapshot"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
list_snapshot_response[0].state,
|
||||
"Ready",
|
||||
"Check the snapshot of vm is ready!"
|
||||
)
|
||||
|
||||
self.virtual_machine.stop(self.apiClient, forced=True)
|
||||
|
||||
VmSnapshot.revertToSnapshot(
|
||||
self.apiClient,
|
||||
list_snapshot_response[0].id
|
||||
)
|
||||
|
||||
TestLinstorVolumes._start_vm(self.virtual_machine)
|
||||
|
||||
try:
|
||||
ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
|
||||
|
||||
result = ssh_client.execute("cat testfile")
|
||||
|
||||
except Exception as exc:
|
||||
self.fail("SSH failed for Virtual machine {}: {}".format(self.virtual_machine.ipaddress, exc))
|
||||
|
||||
self.assertEqual(
|
||||
"hello world",
|
||||
result[0],
|
||||
"Check the content is the same as originally written"
|
||||
)
|
||||
|
||||
@attr(tags=["basic"], required_hardware=False)
|
||||
def test_14_delete_vm_snapshots(self):
|
||||
"""Test to delete vm snapshots
|
||||
"""
|
||||
|
||||
list_snapshot_response = VmSnapshot.list(
|
||||
self.apiClient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
listall=True)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_snapshot_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
list_snapshot_response,
|
||||
None,
|
||||
"Check if snapshot exists in ListSnapshot"
|
||||
)
|
||||
VmSnapshot.deleteVMSnapshot(
|
||||
self.apiClient,
|
||||
list_snapshot_response[0].id)
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
list_snapshot_response = VmSnapshot.list(
|
||||
self.apiClient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
listall=False)
|
||||
self.debug('list_snapshot_response -------------------- {}'.format(list_snapshot_response))
|
||||
|
||||
self.assertIsNone(list_snapshot_response, "snapshot is already deleted")
|
||||
|
||||
def _create_vm_using_template_and_destroy_vm(self, template):
|
||||
vm_name = "VM-%d" % random.randint(0, 100)
|
||||
|
||||
|
|
@ -1177,42 +1372,31 @@ class TestLinstorVolumes(cloudstackTestCase):
|
|||
|
||||
virtual_machine.delete(self.apiClient, True)
|
||||
|
||||
@staticmethod
|
||||
def _get_bytes_from_gb(number_in_gb):
|
||||
return number_in_gb * 1024 * 1024 * 1024
|
||||
|
||||
def _get_volume(self, volume_id):
|
||||
list_vols_response = list_volumes(self.apiClient, id=volume_id)
|
||||
return list_vols_response[0]
|
||||
|
||||
def _get_vm(self, vm_id):
|
||||
list_vms_response = list_virtual_machines(self.apiClient, id=vm_id)
|
||||
@classmethod
|
||||
def _get_vm(cls, vm_id):
|
||||
list_vms_response = list_virtual_machines(cls.apiClient, id=vm_id)
|
||||
return list_vms_response[0]
|
||||
|
||||
def _get_template_cache_name(self):
|
||||
if TestData.hypervisor_type == TestData.kvm:
|
||||
return TestData.templateCacheNameKvm
|
||||
|
||||
self.assert_(False, "Invalid hypervisor type")
|
||||
|
||||
@classmethod
|
||||
def _start_vm(cls, vm):
|
||||
vm_for_check = list_virtual_machines(
|
||||
cls.apiClient,
|
||||
id=vm.id
|
||||
)[0]
|
||||
vm_for_check = cls._get_vm(vm.id)
|
||||
|
||||
if vm_for_check.state == VirtualMachine.STOPPED:
|
||||
vm.start(cls.apiClient)
|
||||
|
||||
# For KVM, just give it 90 seconds to boot up.
|
||||
if TestData.hypervisor_type == TestData.kvm:
|
||||
time.sleep(90)
|
||||
vm_for_check = cls._get_vm(vm.id)
|
||||
ServiceReady.wait_ssh_ready(vm_for_check.ipaddress)
|
||||
return vm_for_check
|
||||
|
||||
@classmethod
|
||||
def _reboot_vm(cls, vm):
|
||||
vm_for_check = cls._get_vm(vm.id)
|
||||
vm.reboot(cls.apiClient)
|
||||
|
||||
# For KVM, just give it 90 seconds to boot up.
|
||||
if TestData.hypervisor_type == TestData.kvm:
|
||||
time.sleep(90)
|
||||
time.sleep(5)
|
||||
|
||||
ServiceReady.wait_ssh_ready(vm_for_check.ipaddress)
|
||||
|
|
|
|||
|
|
@ -259,8 +259,8 @@ class TestPurgeExpungedVms(cloudstackTestCase):
|
|||
active_server_ips = []
|
||||
active_server_ips.append(self.mgtSvrDetails["mgtSvrIp"])
|
||||
for idx, server in enumerate(servers):
|
||||
if server.state == 'Up' and server.serviceip != self.mgtSvrDetails["mgtSvrIp"]:
|
||||
active_server_ips.append(server.serviceip)
|
||||
if server.state == 'Up' and server.ipaddress != self.mgtSvrDetails["mgtSvrIp"]:
|
||||
active_server_ips.append(server.ipaddress)
|
||||
return active_server_ips
|
||||
|
||||
def restartAllManagementServers(self):
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
set -e
|
||||
set -x
|
||||
|
||||
CLOUDSTACK_RELEASE=4.20.0
|
||||
CLOUDSTACK_RELEASE=4.21.0
|
||||
|
||||
function configure_apache2() {
|
||||
# Enable ssl, rewrite and auth
|
||||
|
|
|
|||
|
|
@ -32,8 +32,8 @@
|
|||
"format": "qcow2",
|
||||
"headless": true,
|
||||
"http_directory": "http",
|
||||
"iso_checksum": "sha512:fc3560bb586af14b1d77ab7c2806616916926afcbd5cb3fd5a04a5633dfd91cfbbccada1a123f1ea14c480153b731cbee72a230cea17fd9116b9df8444d8df1c",
|
||||
"iso_url": "https://cdimage.debian.org/mirror/cdimage/release/12.7.0/arm64/iso-cd/debian-12.7.0-arm64-netinst.iso",
|
||||
"iso_checksum": "sha512:04a2a128852c2dff8bb71779ad325721385051eb1264d897bdb5918ab207a9b1de636ded149c56c61a09eb8c7f428496815e70d3be31b1b1cf4c70bf6427cedd",
|
||||
"iso_url": "https://cdimage.debian.org/mirror/cdimage/release/12.9.0/arm64/iso-cd/debian-12.9.0-arm64-netinst.iso",
|
||||
"net_device": "virtio-net",
|
||||
"output_directory": "../dist",
|
||||
"qemu_binary": "qemu-system-aarch64",
|
||||
|
|
|
|||
|
|
@ -31,8 +31,8 @@
|
|||
"format": "qcow2",
|
||||
"headless": true,
|
||||
"http_directory": "http",
|
||||
"iso_checksum": "sha512:fc3560bb586af14b1d77ab7c2806616916926afcbd5cb3fd5a04a5633dfd91cfbbccada1a123f1ea14c480153b731cbee72a230cea17fd9116b9df8444d8df1c",
|
||||
"iso_url": "https://cdimage.debian.org/mirror/cdimage/release/12.7.0/arm64/iso-cd/debian-12.7.0-arm64-netinst.iso",
|
||||
"iso_checksum": "sha512:04a2a128852c2dff8bb71779ad325721385051eb1264d897bdb5918ab207a9b1de636ded149c56c61a09eb8c7f428496815e70d3be31b1b1cf4c70bf6427cedd",
|
||||
"iso_url": "https://cdimage.debian.org/mirror/cdimage/release/12.9.0/arm64/iso-cd/debian-12.9.0-arm64-netinst.iso",
|
||||
"net_device": "virtio-net",
|
||||
"output_directory": "../dist",
|
||||
"qemu_binary": "qemu-system-aarch64",
|
||||
|
|
|
|||
|
|
@ -27,8 +27,8 @@
|
|||
"format": "qcow2",
|
||||
"headless": true,
|
||||
"http_directory": "http",
|
||||
"iso_checksum": "sha512:e0bd9ba03084a6fd42413b425a2d20e3731678a31fe5fb2cc84f79332129afca2ad4ec897b4224d6a833afaf28a5d938b0fe5d680983182944162c6825b135ce",
|
||||
"iso_url": "https://cdimage.debian.org/mirror/cdimage/release/12.7.0/amd64/iso-cd/debian-12.7.0-amd64-netinst.iso",
|
||||
"iso_checksum": "sha512:9ebe405c3404a005ce926e483bc6c6841b405c4d85e0c8a7b1707a7fe4957c617ae44bd807a57ec3e5c2d3e99f2101dfb26ef36b3720896906bdc3aaeec4cd80",
|
||||
"iso_url": "https://cdimage.debian.org/mirror/cdimage/release/12.9.0/amd64/iso-cd/debian-12.9.0-amd64-netinst.iso",
|
||||
"net_device": "virtio-net",
|
||||
"output_directory": "../dist",
|
||||
"qemuargs": [
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
"loginTitle": "CloudStack",
|
||||
"loginFavicon": "assets/logo.svg",
|
||||
"loginFooter": "",
|
||||
"resetPasswordFooter": "",
|
||||
"logo": "assets/logo.svg",
|
||||
"minilogo": "assets/mini-logo.svg",
|
||||
"banner": "assets/banner.svg",
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@
|
|||
</div>
|
||||
</body>
|
||||
<script type="text/javascript">
|
||||
fetch('./config.json')
|
||||
fetch('./config.json?ts=' + Date.now())
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
document.getElementById("favicon").setAttribute("href", data.loginFavicon);
|
||||
|
|
|
|||
|
|
@ -2689,7 +2689,6 @@
|
|||
"message.volume.state.uploaderror": "Η αποστολή τόμου αντιμετώπισε κάποιο σφάλμα",
|
||||
"message.volume.state.uploadinprogress": "Η αποστολή τόμου βρίσκεται σε εξέλιξη",
|
||||
"message.volume.state.uploadop": "Η λειτουργία αποστολής τόμου βρίσκεται σε εξέλιξη ή, εν ολίγοις, ο τόμος βρίσκεται σε δευτερεύουσα αποθήκευση",
|
||||
"message.vr.alert.upon.network.offering.creation.l2": "Επειδή εικονικοί δρομολογητές δεν χρησιμοποιούνται για δίκτυα L2 η προσφορά υπολογιστικού νέφους δεν θα χρησιμοποιηθεί",
|
||||
"message.vr.alert.upon.network.offering.creation.others": "Εφόσον καμία απο τις υποχρεωτικές υπηρεσίες για την δημιουργία του εικονικού δρομολογητή (VPN, DHCP, DNS, Firewall, LB, UserData, SourceNat, StaticNat, PortForwarding) δεν είναι ενεργή, ο εικονικός δρομολογητές δεν θα δημιουργθεί και η προσφορά υπολογιστικού νέςφους δεν θα χρησιμοποιηθεί.",
|
||||
"message.warn.filetype": "jpg, jpeg, png, bmp και svg είναι οι μόνες υποστηριζόμενες μορφές εικόνας.",
|
||||
"message.zone.creation.complete": "Η δημιουργία ζώνης ολοκληρώθηκε",
|
||||
|
|
|
|||
|
|
@ -531,6 +531,11 @@
|
|||
"label.clientid": "Provider Client ID",
|
||||
"label.close": "Close",
|
||||
"label.cloud.managed": "CloudManaged",
|
||||
"label.cloudian.admin.password": "Admin Service Password",
|
||||
"label.cloudian.admin.url": "Admin Service Endpoint URL",
|
||||
"label.cloudian.admin.username": "Admin Service Username",
|
||||
"label.cloudian.iam.url": "IAM Service Endpoint URL",
|
||||
"label.cloudian.s3.url": "S3 Service Endpoint URL",
|
||||
"label.cloudian.storage": "Cloudian storage",
|
||||
"label.cluster": "Cluster",
|
||||
"label.cluster.name": "Cluster name",
|
||||
|
|
@ -2662,6 +2667,11 @@
|
|||
"label.zonewizard.traffictype.storage": "Storage: Traffic between primary and secondary storage servers, such as Instance Templates and Snapshots.",
|
||||
"label.buckets": "Buckets",
|
||||
"label.objectstorageid": "Object Storage Pool",
|
||||
"label.oobm.address": "Out-of-band management address",
|
||||
"label.oobm.driver": "Out-of-band management driver",
|
||||
"label.oobm.port": "Out-of-band management port",
|
||||
"label.oobm.powerstate": "Out-of-band management power state",
|
||||
"label.oobm.username": "Out-of-band management username",
|
||||
"label.bucket.update": "Update Bucket",
|
||||
"label.bucket.delete": "Delete Bucket",
|
||||
"label.quotagib": "Quota in GiB",
|
||||
|
|
@ -3382,6 +3392,7 @@
|
|||
"message.no.description": "No description entered.",
|
||||
"message.offering.internet.protocol.warning": "WARNING: IPv6 supported Networks use static routing and will require upstream routes to be configured manually.",
|
||||
"message.offering.ipv6.warning": "Please refer documentation for creating IPv6 enabled Network/VPC offering <a href='http://docs.cloudstack.apache.org/en/latest/plugins/ipv6.html#isolated-network-and-vpc-tier'>IPv6 support in CloudStack - Isolated Networks and VPC Network Tiers</a>",
|
||||
"message.oobm.configured": "Successfully configured out-of-band management for host",
|
||||
"message.ovf.configurations": "OVF configurations available for the selected appliance. Please select the desired value. Incompatible compute offerings will get disabled.",
|
||||
"message.password.reset.failed": "Failed to reset password.",
|
||||
"message.password.reset.success": "Password has been reset successfully. Please login using your new credentials.",
|
||||
|
|
@ -3542,6 +3553,7 @@
|
|||
"message.success.change.bgp.peers": "Successfully changed BGP peers",
|
||||
"message.success.change.offering": "Successfully changed offering",
|
||||
"message.success.change.password": "Successfully changed password for User",
|
||||
"message.success.change.host.password": "Successfully changed password for host \"{name}\"",
|
||||
"message.success.clear.webhook.deliveries": "Successfully cleared webhook deliveries",
|
||||
"message.success.change.scope": "Successfully changed scope for storage pool",
|
||||
"message.success.config.backup.schedule": "Successfully configured Instance backup schedule",
|
||||
|
|
@ -3747,7 +3759,6 @@
|
|||
"message.volumes.managed": "Volumes controlled by CloudStack.",
|
||||
"message.volumes.unmanaged": "Volumes not controlled by CloudStack.",
|
||||
"message.vpc.restart.required": "Restart is required for VPC(s). Click here to view VPC(s) which require restart.",
|
||||
"message.vr.alert.upon.network.offering.creation.l2": "As virtual routers are not created for L2 Networks, the compute offering will not be used.",
|
||||
"message.vr.alert.upon.network.offering.creation.others": "As none of the obligatory services for creating a virtual router (VPN, DHCP, DNS, Firewall, LB, UserData, SourceNat, StaticNat, PortForwarding) are enabled, the virtual router will not be created and the compute offering will not be used.",
|
||||
"message.warn.change.primary.storage.scope": "This feature is tested and supported for the following configurations:<br>KVM - NFS/Ceph - DefaultPrimary<br>VMware - NFS - DefaultPrimary<br>*There might be extra steps involved to make it work for other configurations.",
|
||||
"message.warn.filetype": "jpg, jpeg, png, bmp and svg are the only supported image formats.",
|
||||
|
|
|
|||
|
|
@ -2390,6 +2390,7 @@
|
|||
"message.success.change.affinity.group": "Grupos de afinidade alterados com sucesso",
|
||||
"message.success.change.offering": "Oferta alterada com sucesso",
|
||||
"message.success.change.password": "Senha alterada com sucesso",
|
||||
"message.success.change.host.password": "Senha do host \"{name}\" foi alterada com sucesso",
|
||||
"message.success.config.backup.schedule": "Agendamento de backup de VM configurado com sucesso",
|
||||
"message.success.config.sticky.policy": "Sticky policy configurada com sucesso",
|
||||
"message.success.copy.clipboard": "Copiado com sucesso para a \u00e1rea de transfer\u00eancia",
|
||||
|
|
@ -2507,7 +2508,6 @@
|
|||
"message.volume.state.uploaderror": "O carregamento do volume encontrou um erro",
|
||||
"message.volume.state.uploadinprogress": "Carregamento do volume em progresso",
|
||||
"message.volume.state.uploadop": "A opera\u00e7\u00e3o de carregamento de volume est\u00e1 em andamento",
|
||||
"message.vr.alert.upon.network.offering.creation.l2": "Como VRs n\u00e3o s\u00e3o criados para redes do tipo L2, a oferta de computa\u00e7\u00e3o n\u00e3o ser\u00e1 utilizada.",
|
||||
"message.vr.alert.upon.network.offering.creation.others": "Como nenhum dos servi\u00e7os obrigat\u00f3rios para cria\u00e7\u00e3o do VR (VPN, DHCP, DNS, Firewall, LB, UserData, SourceNat, StaticNat, PortForwarding) foram habilitados, o VR n\u00e3o ser\u00e1 criado e a oferta de computa\u00e7\u00e3o n\u00e3o ser\u00e1 usada.",
|
||||
"message.warn.filetype": "jpg, jpeg, png, bmp e svg s\u00e3o os \u00fanicos formatos de imagem suportados",
|
||||
"message.warn.importing.instance.without.nic": "AVISO: essa inst\u00e2ncia est\u00e1 sendo importada sem NICs e muitos recursos de rede n\u00e3o estar\u00e3o dispon\u00edveis. Considere criar uma NIC antes de importar via VCenter ou assim que a inst\u00e2ncia for importada.",
|
||||
|
|
|
|||
|
|
@ -3661,7 +3661,6 @@
|
|||
"message.volumes.managed": "CloudStack ద్వారా నియంత్రించబడే వాల్యూమ్లు.",
|
||||
"message.volumes.unmanaged": "CloudStack ద్వారా వాల్యూమ్లు నియంత్రించబడవు.",
|
||||
"message.vpc.restart.required": "VPC(లు) కోసం పునఃప్రారంభించాల్సిన అవసరం ఉంది. ",
|
||||
"message.vr.alert.upon.network.offering.creation.l2": "L2 నెట్వర్క్ల కోసం వర్చువల్ రూటర్లు సృష్టించబడనందున, కంప్యూట్ ఆఫర్ ఉపయోగించబడదు.",
|
||||
"message.vr.alert.upon.network.offering.creation.others": "వర్చువల్ రూటర్ను (VPN, DHCP, DNS, Firewall, LB, UserData, SourceNat, StaticNat, PortForwarding) సృష్టించడం కోసం తప్పనిసరి సేవలు ఏవీ ప్రారంభించబడనందున, వర్చువల్ రూటర్ సృష్టించబడదు మరియు గణన సమర్పణ ఉపయోగించబడదు.",
|
||||
"message.warn.change.primary.storage.scope": "ఈ ఫీచర్ క్రింది కాన్ఫిగరేషన్ల కోసం పరీక్షించబడింది మరియు మద్దతు ఇస్తుంది:<br>KVM - NFS/Ceph - డిఫాల్ట్ ప్రైమరీ<br>VMware - NFS - డిఫాల్ట్ ప్రైమరీ<br>*ఇతర కాన్ఫిగరేషన్ల కోసం ఇది పని చేయడానికి అదనపు దశలు ఉండవచ్చు.",
|
||||
"message.warn.filetype": "jpg, jpeg, png, bmp మరియు svg మాత్రమే మద్దతు ఉన్న ఇమేజ్ ఫార్మాట్లు.",
|
||||
|
|
|
|||
|
|
@ -1235,6 +1235,11 @@ export default {
|
|||
if (item.value) {
|
||||
query[item.param] = this.resource[item.value]
|
||||
} else {
|
||||
if (item.name === 'template') {
|
||||
query.templatefilter = 'self'
|
||||
query.filter = 'self'
|
||||
}
|
||||
|
||||
if (item.param === 'account') {
|
||||
query[item.param] = this.resource.name
|
||||
query.domainid = this.resource.domainid
|
||||
|
|
|
|||
|
|
@ -160,6 +160,10 @@
|
|||
|
||||
<a-tag>static-nat</a-tag>
|
||||
</span>
|
||||
<span v-if="record.issystem">
|
||||
|
||||
<a-tag>system</a-tag>
|
||||
</span>
|
||||
</template>
|
||||
<template v-if="column.key === 'ip6address'" href="javascript:;">
|
||||
<span>{{ ipV6Address(text, record) }}</span>
|
||||
|
|
@ -421,8 +425,8 @@
|
|||
<status :text="record.enabled ? record.enabled.toString() : 'false'" />
|
||||
{{ record.enabled ? 'Enabled' : 'Disabled' }}
|
||||
</template>
|
||||
<template v-if="['created', 'sent', 'removed', 'effectiveDate', 'endDate'].includes(column.key) || (['startdate'].includes(column.key) && ['webhook'].includes($route.path.split('/')[1])) || (column.key === 'allocated' && ['asnumbers', 'publicip', 'ipv4subnets'].includes($route.meta.name) && text)">
|
||||
{{ $toLocaleDate(text) }}
|
||||
<template v-if="['created', 'sent', 'removed', 'effectiveDate', 'endDate', 'allocated'].includes(column.key) || (['startdate'].includes(column.key) && ['webhook'].includes($route.path.split('/')[1])) || (column.key === 'allocated' && ['asnumbers', 'publicip', 'ipv4subnets'].includes($route.meta.name) && text)">
|
||||
{{ text && $toLocaleDate(text) }}
|
||||
</template>
|
||||
<template v-if="['startdate', 'enddate'].includes(column.key) && ['vm', 'vnfapp'].includes($route.path.split('/')[1])">
|
||||
{{ getDateAtTimeZone(text, record.timezone) }}
|
||||
|
|
|
|||
|
|
@ -468,7 +468,8 @@ export default {
|
|||
return false
|
||||
},
|
||||
uploadFiles () {
|
||||
if (!this.uploadDirectory.endsWith('/')) {
|
||||
this.uploadDirectory = this.uploadDirectory.trim()
|
||||
if (this.uploadDirectory.length !== 0 && !this.uploadDirectory.endsWith('/')) {
|
||||
this.uploadDirectory = this.uploadDirectory + '/'
|
||||
}
|
||||
var promises = []
|
||||
|
|
|
|||
|
|
@ -151,7 +151,7 @@ export default {
|
|||
],
|
||||
mapping: {
|
||||
type: {
|
||||
options: ['nfs']
|
||||
options: ['nfs', 'cifs']
|
||||
},
|
||||
provider: {
|
||||
value: (record) => { return 'nas' }
|
||||
|
|
|
|||
|
|
@ -78,6 +78,14 @@ export default {
|
|||
popup: true,
|
||||
component: shallowRef(defineAsyncComponent(() => import('@/views/infra/HostUpdate')))
|
||||
},
|
||||
{
|
||||
api: 'updateHostPassword',
|
||||
icon: 'key-outlined',
|
||||
label: 'label.action.change.password',
|
||||
dataView: true,
|
||||
popup: true,
|
||||
component: shallowRef(defineAsyncComponent(() => import('@/views/infra/ChangeHostPassword.vue')))
|
||||
},
|
||||
{
|
||||
api: 'provisionCertificate',
|
||||
icon: 'safety-certificate-outlined',
|
||||
|
|
@ -147,16 +155,8 @@ export default {
|
|||
message: 'label.outofbandmanagement.configure',
|
||||
docHelp: 'adminguide/hosts.html#out-of-band-management',
|
||||
dataView: true,
|
||||
post: true,
|
||||
args: ['hostid', 'address', 'port', 'username', 'password', 'driver'],
|
||||
mapping: {
|
||||
hostid: {
|
||||
value: (record) => { return record.id }
|
||||
},
|
||||
driver: {
|
||||
options: ['ipmitool', 'nestedcloudstack', 'redfish']
|
||||
}
|
||||
}
|
||||
popup: true,
|
||||
component: shallowRef(defineAsyncComponent(() => import('@/views/infra/ConfigureHostOOBM')))
|
||||
},
|
||||
{
|
||||
api: 'enableOutOfBandManagementForHost',
|
||||
|
|
|
|||
|
|
@ -840,10 +840,13 @@ export default {
|
|||
message: 'message.action.release.ip',
|
||||
docHelp: 'adminguide/networking_and_traffic.html#releasing-an-ip-address-alloted-to-a-vpc',
|
||||
dataView: true,
|
||||
show: (record) => { return record.state === 'Allocated' && !record.issourcenat },
|
||||
show: (record) => { return record.state === 'Allocated' && !record.issourcenat && !record.issystem },
|
||||
groupAction: true,
|
||||
popup: true,
|
||||
groupMap: (selection) => { return selection.map(x => { return { id: x } }) }
|
||||
groupMap: (selection) => { return selection.map(x => { return { id: x } }) },
|
||||
groupShow: (selectedIps) => {
|
||||
return selectedIps.every((ip) => ip.state === 'Allocated' && !ip.issourcenat && !ip.issystem)
|
||||
}
|
||||
},
|
||||
{
|
||||
api: 'reserveIpAddress',
|
||||
|
|
@ -863,7 +866,10 @@ export default {
|
|||
show: (record) => { return record.state === 'Reserved' },
|
||||
groupAction: true,
|
||||
popup: true,
|
||||
groupMap: (selection) => { return selection.map(x => { return { id: x } }) }
|
||||
groupMap: (selection) => { return selection.map(x => { return { id: x } }) },
|
||||
groupShow: (selectedIps) => {
|
||||
return selectedIps.every((ip) => ip.state === 'Reserved')
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
|
|||
|
|
@ -258,25 +258,8 @@ export default {
|
|||
show: (record) => {
|
||||
return record.state === 'Ready' && (record.vmstate === 'Stopped' || !record.virtualmachineid)
|
||||
},
|
||||
args: (record, store) => {
|
||||
var fields = ['volumeid', 'name', 'displaytext', 'ostypeid', 'isdynamicallyscalable', 'requireshvm', 'passwordenabled']
|
||||
if (['Admin', 'DomainAdmin'].includes(store.userInfo.roletype)) {
|
||||
fields.push('domainid')
|
||||
fields.push('account')
|
||||
}
|
||||
if (['Admin'].includes(store.userInfo.roletype) || store.features.userpublictemplateenabled) {
|
||||
fields.push('ispublic')
|
||||
}
|
||||
if (['Admin'].includes(store.userInfo.roletype)) {
|
||||
fields.push('isfeatured')
|
||||
}
|
||||
return fields
|
||||
},
|
||||
mapping: {
|
||||
volumeid: {
|
||||
value: (record) => { return record.id }
|
||||
}
|
||||
}
|
||||
popup: true,
|
||||
component: shallowRef(defineAsyncComponent(() => import('@/views/storage/CreateTemplate.vue')))
|
||||
},
|
||||
{
|
||||
api: 'recoverVolume',
|
||||
|
|
|
|||
|
|
@ -37,8 +37,9 @@
|
|||
</div>
|
||||
<route-view></route-view>
|
||||
</div>
|
||||
<div class="user-layout-footer" v-if="$config.loginFooter">
|
||||
<label v-html="$config.loginFooter"></label>
|
||||
<div class="user-layout-footer" v-if="$config.loginFooter || $config.resetPasswordFooter">
|
||||
<label v-if="$route.name === 'resetPassword' && $config.resetPasswordFooter" v-html="$config.resetPasswordFooter"></label>
|
||||
<label v-else v-html="$config.loginFooter"></label>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ export function loadLanguageAsync (lang) {
|
|||
return Promise.resolve(setLanguage(lang))
|
||||
}
|
||||
|
||||
return fetch(`locales/${lang}.json`)
|
||||
return fetch(`locales/${lang}.json?ts=${Date.now()}`)
|
||||
.then(response => response.json())
|
||||
.then(json => Promise.resolve(setLanguage(lang, json)))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ vueApp.use(genericUtilPlugin)
|
|||
vueApp.use(extensions)
|
||||
vueApp.use(directives)
|
||||
|
||||
fetch('config.json').then(response => response.json()).then(config => {
|
||||
fetch('config.json?ts=' + Date.now()).then(response => response.json()).then(config => {
|
||||
vueProps.$config = config
|
||||
let basUrl = config.apiBase
|
||||
if (config.multipleServer) {
|
||||
|
|
|
|||
|
|
@ -44,15 +44,44 @@
|
|||
>{{ prov }}</a-select-option>
|
||||
</a-select>
|
||||
</a-form-item>
|
||||
<a-form-item name="url" ref="url" :label="$t('label.url')">
|
||||
<a-input v-model:value="form.url" />
|
||||
</a-form-item>
|
||||
<a-form-item name="accessKey" ref="accessKey" :label="$t('label.access.key')">
|
||||
<a-input v-model:value="form.accessKey" />
|
||||
</a-form-item>
|
||||
<a-form-item name="secretKey" ref="secretKey" :label="$t('label.secret.key')">
|
||||
<a-input v-model:value="form.secretKey" />
|
||||
</a-form-item>
|
||||
|
||||
<div v-if="form.provider === 'Cloudian HyperStore'">
|
||||
<!-- HyperStore Only Object Store Configuration -->
|
||||
<a-form-item name="url" ref="url" :label="$t('label.cloudian.admin.url')">
|
||||
<a-input v-model:value="form.url" placeholder="https://admin-hostname:19443" />
|
||||
</a-form-item>
|
||||
<a-form-item name="validateSSL" ref="validateSSL">
|
||||
<a-checkbox v-model:checked="form.validateSSL">Validate SSL Certificate</a-checkbox>
|
||||
</a-form-item>
|
||||
<a-form-item name="accessKey" ref="accessKey" :label="$t('label.cloudian.admin.username')">
|
||||
<!-- Use accessKey field for the username to make provider shared configuration easier -->
|
||||
<a-input v-model:value="form.accessKey" />
|
||||
</a-form-item>
|
||||
<a-form-item name="secretKey" ref="secretKey" :label="$t('label.cloudian.admin.password')">
|
||||
<!-- Use secretKey field for the password to make provider shared configuration easier -->
|
||||
<a-input-password v-model:value="form.secretKey" autocomplete="off"/>
|
||||
</a-form-item>
|
||||
<a-form-item name="s3Url" ref="s3Url" :label="$t('label.cloudian.s3.url')" :rules="[{ required: true, message: this.$t('label.required') }]">
|
||||
<a-input v-model:value="form.s3Url" placeholder="https://s3-hostname or http://s3-hostname"/>
|
||||
</a-form-item>
|
||||
<a-form-item name="iamUrl" ref="iamUrl" :label="$t('label.cloudian.iam.url')" :rules="[{ required: true, message: this.$t('label.required') }]">
|
||||
<a-input v-model:value="form.iamUrl" placeholder="https://iam-hostname:16443 or http://iam-hostname:16080"/>
|
||||
</a-form-item>
|
||||
</div>
|
||||
|
||||
<div v-else>
|
||||
<!-- Non-HyperStore Object Stores -->
|
||||
<a-form-item name="url" ref="url" :label="$t('label.url')">
|
||||
<a-input v-model:value="form.url" />
|
||||
</a-form-item>
|
||||
<a-form-item name="accessKey" ref="accessKey" :label="$t('label.access.key')">
|
||||
<a-input v-model:value="form.accessKey" />
|
||||
</a-form-item>
|
||||
<a-form-item name="secretKey" ref="secretKey" :label="$t('label.secret.key')">
|
||||
<a-input-password v-model:value="form.secretKey" autocomplete="off"/>
|
||||
</a-form-item>
|
||||
</div>
|
||||
|
||||
<div :span="24" class="action-button">
|
||||
<a-button @click="closeModal">{{ $t('label.cancel') }}</a-button>
|
||||
<a-button type="primary" ref="submit" @click="handleSubmit">{{ $t('label.ok') }}</a-button>
|
||||
|
|
@ -82,7 +111,7 @@ export default {
|
|||
inject: ['parentFetchData'],
|
||||
data () {
|
||||
return {
|
||||
providers: ['MinIO', 'Ceph', 'Simulator'],
|
||||
providers: ['MinIO', 'Ceph', 'Cloudian HyperStore', 'Simulator'],
|
||||
zones: [],
|
||||
loading: false
|
||||
}
|
||||
|
|
@ -95,7 +124,8 @@ export default {
|
|||
initForm () {
|
||||
this.formRef = ref()
|
||||
this.form = reactive({
|
||||
provider: 'MinIO'
|
||||
provider: 'MinIO',
|
||||
validateSSL: true
|
||||
})
|
||||
this.rules = reactive({
|
||||
url: [{ required: true, message: this.$t('label.required') }],
|
||||
|
|
@ -128,6 +158,15 @@ export default {
|
|||
data['details[1].key'] = 'secretkey'
|
||||
data['details[1].value'] = values.secretKey
|
||||
|
||||
if (provider === 'Cloudian HyperStore') {
|
||||
data['details[2].key'] = 'validateSSL'
|
||||
data['details[2].value'] = values.validateSSL
|
||||
data['details[3].key'] = 's3Url'
|
||||
data['details[3].value'] = values.s3Url
|
||||
data['details[4].key'] = 'iamUrl'
|
||||
data['details[4].value'] = values.iamUrl
|
||||
}
|
||||
|
||||
this.loading = true
|
||||
|
||||
try {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,150 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
<template>
|
||||
<div class="form-layout" v-ctrl-enter="handleSubmit">
|
||||
<a-spin :spinning="loading">
|
||||
<a-form
|
||||
:ref="formRef"
|
||||
:model="form"
|
||||
:rules="rules"
|
||||
layout="vertical"
|
||||
@finish="handleSubmit">
|
||||
<a-form-item name="username" ref="username">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.username')" :tooltip="apiParams.username.description"/>
|
||||
</template>
|
||||
<a-input
|
||||
v-model:value="form.username"
|
||||
:placeholder="$t('label.username')"/>
|
||||
</a-form-item>
|
||||
<a-form-item name="password" ref="password">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.new.password')" :tooltip="apiParams.password.description"/>
|
||||
</template>
|
||||
<a-input-password
|
||||
v-model:value="form.password"
|
||||
:placeholder="$t('label.new.password')"/>
|
||||
</a-form-item>
|
||||
<a-form-item name="confirmpassword" ref="confirmpassword">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.confirmpassword')" :tooltip="apiParams.password.description"/>
|
||||
</template>
|
||||
<a-input-password
|
||||
v-model:value="form.confirmpassword"
|
||||
:placeholder="$t('label.confirmpassword.description')"/>
|
||||
</a-form-item>
|
||||
|
||||
<div :span="24" class="action-button">
|
||||
<a-button @click="closeAction">{{ $t('label.cancel') }}</a-button>
|
||||
<a-button :loading="loading" ref="submit" type="primary" @click="handleSubmit">{{ $t('label.ok') }}</a-button>
|
||||
</div>
|
||||
</a-form>
|
||||
</a-spin>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script>
|
||||
import { ref, reactive, toRaw } from 'vue'
|
||||
import { api } from '@/api'
|
||||
import TooltipLabel from '@/components/widgets/TooltipLabel'
|
||||
|
||||
export default {
|
||||
name: 'ChangeHostPassword',
|
||||
components: {
|
||||
TooltipLabel
|
||||
},
|
||||
props: {
|
||||
resource: {
|
||||
type: Object,
|
||||
required: true
|
||||
}
|
||||
},
|
||||
data () {
|
||||
return {
|
||||
loading: false
|
||||
}
|
||||
},
|
||||
beforeCreate () {
|
||||
this.apiParams = this.$getApiParams('updateHostPassword')
|
||||
},
|
||||
created () {
|
||||
this.initForm()
|
||||
},
|
||||
methods: {
|
||||
initForm () {
|
||||
this.formRef = ref()
|
||||
this.form = reactive({})
|
||||
this.rules = reactive({
|
||||
username: [{ required: true, message: this.$t('message.error.host.username') }],
|
||||
password: [{ required: true, message: this.$t('message.error.new.password') }],
|
||||
confirmpassword: [
|
||||
{ required: true, message: this.$t('message.error.confirm.password') },
|
||||
{ validator: this.validateTwoPassword }
|
||||
]
|
||||
})
|
||||
},
|
||||
async validateTwoPassword (rule, value) {
|
||||
const messageConfirm = this.$t('message.validate.equalto')
|
||||
const passwordVal = this.form.password
|
||||
if (passwordVal !== value) {
|
||||
return Promise.reject(messageConfirm)
|
||||
}
|
||||
},
|
||||
handleSubmit (e) {
|
||||
e.preventDefault()
|
||||
if (this.loading) return
|
||||
this.formRef.value.validate().then(() => {
|
||||
const values = toRaw(this.form)
|
||||
this.loading = true
|
||||
const params = {
|
||||
username: values.username,
|
||||
hostId: this.resource.id,
|
||||
password: values.password
|
||||
}
|
||||
api('updateHostPassword', {}, 'POST', params).then(json => {
|
||||
this.$notification.success({
|
||||
message: this.$t('label.action.change.password'),
|
||||
description: `${this.$t('message.success.change.host.password', { name: this.resource.name })}`
|
||||
})
|
||||
this.$emit('refresh-data')
|
||||
this.closeAction()
|
||||
}).catch(error => {
|
||||
this.$notifyError(error)
|
||||
}).finally(() => {
|
||||
this.loading = false
|
||||
})
|
||||
}).catch(error => {
|
||||
this.formRef.value.scrollToField(error.errorFields[0].name)
|
||||
})
|
||||
},
|
||||
closeAction () {
|
||||
this.$emit('close-action')
|
||||
}
|
||||
}
|
||||
}
|
||||
</script>
|
||||
|
||||
<style scoped lang="less">
|
||||
.form-layout {
|
||||
width: 80vw;
|
||||
|
||||
@media (min-width: 600px) {
|
||||
width: 450px;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
|
|
@ -0,0 +1,172 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
<template>
|
||||
<div class="form-layout">
|
||||
<a-form
|
||||
:ref="formRef"
|
||||
:model="form"
|
||||
:rules="rules"
|
||||
@finish="handleSubmit"
|
||||
v-ctrl-enter="handleSubmit"
|
||||
class="form"
|
||||
layout="vertical"
|
||||
>
|
||||
<a-alert type="warning">
|
||||
<template #message>
|
||||
<span v-html="$t('label.outofbandmanagement.configure')" />
|
||||
</template>
|
||||
</a-alert>
|
||||
<div style="margin-top: 10px;">
|
||||
<a-form-item name="address" ref="address">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.address')" :tooltip="apiParams.address.description"/>
|
||||
</template>
|
||||
<a-input
|
||||
v-model:value="form.address"
|
||||
v-focus="true" />
|
||||
</a-form-item>
|
||||
<a-form-item name="port" ref="port">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.port')" :tooltip="apiParams.port.description"/>
|
||||
</template>
|
||||
<a-input
|
||||
v-model:value="form.port" />
|
||||
</a-form-item>
|
||||
<a-form-item name="username" ref="username">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.username')" :tooltip="apiParams.username.description"/>
|
||||
</template>
|
||||
<a-input
|
||||
v-model:value="form.username" />
|
||||
</a-form-item>
|
||||
<a-form-item name="password" ref="password">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.password')" :tooltip="apiParams.password.description"/>
|
||||
</template>
|
||||
<a-input-password
|
||||
v-model:value="form.password"
|
||||
:placeholder="apiParams.password.description"/>
|
||||
</a-form-item>
|
||||
<a-form-item name="driver" ref="driver">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.driver')" :tooltip="apiParams.driver.description"/>
|
||||
</template>
|
||||
<a-select
|
||||
v-model:value="form.driver"
|
||||
style="width: 100%;"
|
||||
optionFilterProp="value"
|
||||
:filterOption="(input, option) => {
|
||||
return option.value.toLowerCase().indexOf(input.toLowerCase()) >= 0
|
||||
}" >
|
||||
<a-select-option key="" label="">{{ }}</a-select-option>
|
||||
<a-select-option value="ipmitool">ipmitool</a-select-option>
|
||||
<a-select-option value="nestedcloudstack">nestedcloudstack</a-select-option>
|
||||
<a-select-option value="redfish">redfish</a-select-option>
|
||||
</a-select>
|
||||
</a-form-item>
|
||||
</div>
|
||||
<div :span="24" class="action-button">
|
||||
<a-button @click="onCloseAction">{{ $t('label.cancel') }}</a-button>
|
||||
<a-button type="primary" @click="handleSubmit" ref="submit">{{ $t('label.ok') }}</a-button>
|
||||
</div>
|
||||
</a-form>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script>
|
||||
import TooltipLabel from '@/components/widgets/TooltipLabel'
|
||||
import { ref, reactive, toRaw } from 'vue'
|
||||
import { api } from '@/api'
|
||||
|
||||
export default {
|
||||
name: 'ConfigureHostOOBM',
|
||||
components: {
|
||||
TooltipLabel
|
||||
},
|
||||
props: {
|
||||
resource: {
|
||||
type: Object,
|
||||
required: true
|
||||
}
|
||||
},
|
||||
data () {
|
||||
return {
|
||||
}
|
||||
},
|
||||
beforeCreate () {
|
||||
this.apiParams = this.$getApiParams('configureOutOfBandManagement')
|
||||
},
|
||||
created () {
|
||||
this.initForm()
|
||||
},
|
||||
methods: {
|
||||
initForm () {
|
||||
this.formRef = ref()
|
||||
this.form = reactive({
|
||||
address: this.resource.outofbandmanagement.address || '',
|
||||
port: this.resource.outofbandmanagement.port || '',
|
||||
username: this.resource.outofbandmanagement.username || '',
|
||||
password: '',
|
||||
driver: this.resource.outofbandmanagement.driver || ''
|
||||
})
|
||||
this.rules = reactive({
|
||||
address: [{ required: true, message: this.$t('message.error.required.input') }],
|
||||
port: [{ required: true, message: this.$t('message.error.required.input') }],
|
||||
username: [{ required: true, message: this.$t('message.error.required.input') }],
|
||||
password: [{ required: true, message: this.$t('message.error.required.input') }],
|
||||
driver: [{ required: true, message: this.$t('message.error.required.input') }]
|
||||
})
|
||||
},
|
||||
handleSubmit (e) {
|
||||
e.preventDefault()
|
||||
this.formRef.value.validate().then(() => {
|
||||
const values = toRaw(this.form)
|
||||
const params = {
|
||||
hostid: this.resource.id,
|
||||
address: values.address,
|
||||
port: values.port,
|
||||
username: values.username,
|
||||
password: values.password,
|
||||
driver: values.driver
|
||||
}
|
||||
|
||||
api('configureOutOfBandManagement', {}, 'POST', params).then(_ => {
|
||||
this.$message.success(this.$t('message.oobm.configured'))
|
||||
this.$emit('refresh-data')
|
||||
this.onCloseAction()
|
||||
}).catch(error => {
|
||||
this.$notifyError(error)
|
||||
})
|
||||
})
|
||||
},
|
||||
onCloseAction () {
|
||||
this.$emit('close-action')
|
||||
}
|
||||
}
|
||||
}
|
||||
</script>
|
||||
|
||||
<style scoped>
|
||||
.form-layout {
|
||||
width: 30vw;
|
||||
|
||||
@media (min-width: 500px) {
|
||||
width: 450px;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
|
|
@ -28,15 +28,15 @@
|
|||
>
|
||||
<a-alert type="warning">
|
||||
<template #message>
|
||||
<span v-html="$t('message.confirm.enable.host')" />
|
||||
<span v-html="resourcestate === 'Disabled' ? $t('message.confirm.enable.host') : $t('message.confirm.disable.host') " />
|
||||
</template>
|
||||
</a-alert>
|
||||
<div v-show="enableKVMAutoEnableDisableSetting" class="reason">
|
||||
<div v-show="kvmAutoEnableDisableSetting" class="reason">
|
||||
<a-form-item
|
||||
class="form__item"
|
||||
name="reason"
|
||||
ref="reason"
|
||||
:label="'The setting \'enable.kvm.host.auto.enable.disable\' is enabled, ' +
|
||||
:label="'The Auto Enable/Disable KVM Hosts functionality is enabled, ' +
|
||||
' can specify a reason for ' + (resourcestate === 'Enabled' ? 'disabling' : 'enabling') + ' this host'">
|
||||
<a-textarea
|
||||
v-model:value="form.reason"
|
||||
|
|
@ -69,7 +69,7 @@ export default {
|
|||
return {
|
||||
resourcestate: '',
|
||||
allocationstate: '',
|
||||
enableKVMAutoEnableDisableSetting: false
|
||||
kvmAutoEnableDisableSetting: false
|
||||
}
|
||||
},
|
||||
created () {
|
||||
|
|
@ -91,8 +91,8 @@ export default {
|
|||
return
|
||||
}
|
||||
api('listConfigurations', { name: 'enable.kvm.host.auto.enable.disable', clusterid: this.resource.clusterid }).then(json => {
|
||||
if (json.listconfigurationsresponse.configuration[0]) {
|
||||
this.enableKVMAutoEnableDisableSetting = json.listconfigurationsresponse.configuration[0].value
|
||||
if (json.listconfigurationsresponse.configuration?.[0]) {
|
||||
this.kvmAutoEnableDisableSetting = json?.listconfigurationsresponse?.configuration?.[0]?.value || false
|
||||
}
|
||||
})
|
||||
},
|
||||
|
|
|
|||
|
|
@ -86,14 +86,48 @@
|
|||
</div>
|
||||
</div>
|
||||
</a-list-item>
|
||||
<a-list-item v-if="host.outofbandmanagement">
|
||||
<div>
|
||||
<strong>{{ $t('label.powerstate') }}</strong>
|
||||
<span v-if="host?.outofbandmanagement?.enabled">
|
||||
<a-list-item>
|
||||
<div>
|
||||
{{ host.outofbandmanagement.powerstate }}
|
||||
<strong>{{ $t('label.oobm.username') }}</strong>
|
||||
<div>
|
||||
{{ host.outofbandmanagement.username }}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</a-list-item>
|
||||
</a-list-item>
|
||||
<a-list-item>
|
||||
<div>
|
||||
<strong>{{ $t('label.oobm.powerstate') }}</strong>
|
||||
<div>
|
||||
{{ host.outofbandmanagement.powerstate }}
|
||||
</div>
|
||||
</div>
|
||||
</a-list-item>
|
||||
<a-list-item>
|
||||
<div>
|
||||
<strong>{{ $t('label.oobm.driver') }}</strong>
|
||||
<div>
|
||||
{{ host.outofbandmanagement.driver }}
|
||||
</div>
|
||||
</div>
|
||||
</a-list-item>
|
||||
<a-list-item>
|
||||
<div>
|
||||
<strong>{{ $t('label.oobm.address') }}</strong>
|
||||
<div>
|
||||
{{ host.outofbandmanagement.address }}
|
||||
</div>
|
||||
</div>
|
||||
</a-list-item>
|
||||
<a-list-item>
|
||||
<div>
|
||||
<strong>{{ $t('label.oobm.port') }}</strong>
|
||||
<div>
|
||||
{{ host.outofbandmanagement.port }}
|
||||
</div>
|
||||
</div>
|
||||
</a-list-item>
|
||||
</span>
|
||||
<a-list-item v-if="host.hostha">
|
||||
<div>
|
||||
<strong>{{ $t('label.haenable') }}</strong>
|
||||
|
|
|
|||
|
|
@ -491,7 +491,6 @@ export default {
|
|||
const formRaw = toRaw(this.form)
|
||||
const values = this.handleRemoveFields(formRaw)
|
||||
var params = {
|
||||
isMirrored: false,
|
||||
name: values.name,
|
||||
displaytext: values.displaytext,
|
||||
storageType: values.storagetype,
|
||||
|
|
|
|||
|
|
@ -312,11 +312,10 @@
|
|||
</a-radio-button>
|
||||
</a-radio-group>
|
||||
</a-form-item>
|
||||
<a-form-item name="serviceofferingid" ref="serviceofferingid">
|
||||
<a-form-item name="serviceofferingid" ref="serviceofferingid" v-if="guestType !== 'l2'">
|
||||
<a-alert v-if="!isVirtualRouterForAtLeastOneService" type="warning" style="margin-bottom: 10px">
|
||||
<template #message>
|
||||
<span v-if="guestType === 'l2'" v-html="$t('message.vr.alert.upon.network.offering.creation.l2')" />
|
||||
<span v-else v-html="$t('message.vr.alert.upon.network.offering.creation.others')" />
|
||||
<span v-html="$t('message.vr.alert.upon.network.offering.creation.others')" />
|
||||
</template>
|
||||
</a-alert>
|
||||
<template #label>
|
||||
|
|
@ -331,8 +330,11 @@
|
|||
}"
|
||||
:loading="serviceOfferingLoading"
|
||||
:placeholder="apiParams.serviceofferingid.description">
|
||||
<a-select-option v-for="(opt) in serviceOfferings" :key="opt.id" :label="opt.name || opt.description">
|
||||
{{ opt.name || opt.description }}
|
||||
<a-select-option
|
||||
v-for="(offering, index) in serviceOfferings"
|
||||
:value="offering.id"
|
||||
:key="index">
|
||||
{{ offering.displaytext || offering.name }}
|
||||
</a-select-option>
|
||||
</a-select>
|
||||
</a-form-item>
|
||||
|
|
@ -765,7 +767,6 @@ export default {
|
|||
this.form.lbtype = 'publicLb'
|
||||
this.isVirtualRouterForAtLeastOneService = false
|
||||
this.isVpcVirtualRouterForAtLeastOneService = false
|
||||
this.serviceOfferings = []
|
||||
this.serviceOfferingLoading = false
|
||||
this.sourceNatServiceChecked = false
|
||||
this.lbServiceChecked = false
|
||||
|
|
@ -853,9 +854,7 @@ export default {
|
|||
params.systemvmtype = 'domainrouter'
|
||||
this.serviceOfferingLoading = true
|
||||
api('listServiceOfferings', params).then(json => {
|
||||
const listServiceOfferings = json.listserviceofferingsresponse.serviceoffering
|
||||
this.serviceOfferings = this.serviceOfferings.concat(listServiceOfferings)
|
||||
this.form.serviceofferingid = this.serviceOfferings.length > 0 ? this.serviceOfferings[0].id : ''
|
||||
this.serviceOfferings = json?.listserviceofferingsresponse?.serviceoffering || []
|
||||
}).finally(() => {
|
||||
this.serviceOfferingLoading = false
|
||||
})
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@
|
|||
v-model:value="form.displaytext"
|
||||
:placeholder="apiParams.displaytext.description" />
|
||||
</a-form-item>
|
||||
<a-form-item ref="zoneid" name="zoneid">
|
||||
<a-form-item v-if="resource.intervaltype" ref="zoneid" name="zoneid">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.zoneid')" :tooltip="apiParams.zoneid.description"/>
|
||||
</template>
|
||||
|
|
@ -130,41 +130,40 @@
|
|||
</a-select>
|
||||
</a-form-item>
|
||||
<a-row :gutter="12">
|
||||
<a-col :md="24" :lg="24">
|
||||
<a-form-item ref="groupenabled" name="groupenabled">
|
||||
<a-checkbox-group
|
||||
v-model:value="form.groupenabled"
|
||||
style="width: 100%;"
|
||||
>
|
||||
<a-row>
|
||||
<a-col :span="12">
|
||||
<a-checkbox value="passwordenabled">
|
||||
{{ $t('label.passwordenabled') }}
|
||||
</a-checkbox>
|
||||
</a-col>
|
||||
<a-col :span="12">
|
||||
<a-checkbox value="isdynamicallyscalable">
|
||||
{{ $t('label.isdynamicallyscalable') }}
|
||||
</a-checkbox>
|
||||
</a-col>
|
||||
<a-col :span="12">
|
||||
<a-checkbox value="requireshvm">
|
||||
{{ $t('label.requireshvm') }}
|
||||
</a-checkbox>
|
||||
</a-col>
|
||||
<a-col :span="12" v-if="isAdminRole">
|
||||
<a-checkbox value="isfeatured">
|
||||
{{ $t('label.isfeatured') }}
|
||||
</a-checkbox>
|
||||
</a-col>
|
||||
<a-col :span="12" v-if="isAdminRole || $store.getters.features.userpublictemplateenabled">
|
||||
<a-checkbox value="ispublic">
|
||||
{{ $t('label.ispublic') }}
|
||||
</a-checkbox>
|
||||
</a-col>
|
||||
</a-row>
|
||||
</a-checkbox-group>
|
||||
<a-col :md="24" :lg="12">
|
||||
<a-form-item ref="isdynamicallyscalable" name="isdynamicallyscalable">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.isdynamicallyscalable')" :tooltip="apiParams.isdynamicallyscalable.description"/>
|
||||
</template>
|
||||
<a-switch v-model:checked="form.isdynamicallyscalable" />
|
||||
</a-form-item>
|
||||
<a-form-item ref="requireshvm" name="requireshvm">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.requireshvm')" :tooltip="apiParams.requireshvm.description"/>
|
||||
</template>
|
||||
<a-switch v-model:checked="form.requireshvm" />
|
||||
</a-form-item>
|
||||
<a-form-item ref="passwordenabled" name="passwordenabled">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.passwordenabled')" :tooltip="apiParams.passwordenabled.description"/>
|
||||
</template>
|
||||
<a-switch v-model:checked="form.passwordenabled" />
|
||||
</a-form-item>
|
||||
<a-form-item
|
||||
ref="ispublic"
|
||||
name="ispublic"
|
||||
v-if="$store.getters.userInfo.roletype === 'Admin' || $store.getters.features.userpublictemplateenabled" >
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.ispublic')" :tooltip="apiParams.ispublic.description"/>
|
||||
</template>
|
||||
<a-switch v-model:checked="form.ispublic" />
|
||||
</a-form-item>
|
||||
<a-form-item ref="isfeatured" name="isfeatured" v-if="$store.getters.userInfo.roletype === 'Admin'">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.isfeatured')" :tooltip="apiParams.isfeatured.description"/>
|
||||
</template>
|
||||
<a-switch v-model:checked="form.isfeatured" />
|
||||
</a-form-item>
|
||||
</a-col>
|
||||
</a-row>
|
||||
<div :span="24" class="action-button">
|
||||
|
|
@ -234,7 +233,9 @@ export default {
|
|||
},
|
||||
fetchData () {
|
||||
this.fetchOsTypes()
|
||||
this.fetchSnapshotZones()
|
||||
if (this.resource.intervaltype) {
|
||||
this.fetchSnapshotZones()
|
||||
}
|
||||
if ('listDomains' in this.$store.getters.apis) {
|
||||
this.fetchDomains()
|
||||
}
|
||||
|
|
@ -300,21 +301,24 @@ export default {
|
|||
this.handleDomainChange(null)
|
||||
})
|
||||
},
|
||||
handleDomainChange (domain) {
|
||||
async handleDomainChange (domain) {
|
||||
this.domainid = domain
|
||||
this.form.account = null
|
||||
this.account = null
|
||||
if ('listAccounts' in this.$store.getters.apis) {
|
||||
this.fetchAccounts()
|
||||
await this.fetchAccounts()
|
||||
}
|
||||
},
|
||||
fetchAccounts () {
|
||||
api('listAccounts', {
|
||||
domainid: this.domainid
|
||||
}).then(response => {
|
||||
this.accounts = response.listaccountsresponse.account || []
|
||||
}).catch(error => {
|
||||
this.$notifyError(error)
|
||||
return new Promise((resolve, reject) => {
|
||||
api('listAccounts', {
|
||||
domainid: this.domainid
|
||||
}).then(response => {
|
||||
this.accounts = response?.listaccountsresponse?.account || []
|
||||
resolve(this.accounts)
|
||||
}).catch(error => {
|
||||
this.$notifyError(error)
|
||||
})
|
||||
})
|
||||
},
|
||||
handleAccountChange (acc) {
|
||||
|
|
@ -329,17 +333,22 @@ export default {
|
|||
this.formRef.value.validate().then(() => {
|
||||
const formRaw = toRaw(this.form)
|
||||
const values = this.handleRemoveFields(formRaw)
|
||||
values.snapshotid = this.resource.id
|
||||
if (values.groupenabled) {
|
||||
const input = values.groupenabled
|
||||
for (const index in input) {
|
||||
const name = input[index]
|
||||
values[name] = true
|
||||
const params = {}
|
||||
if (this.resource.intervaltype) {
|
||||
params.snapshotid = this.resource.id
|
||||
} else {
|
||||
params.volumeid = this.resource.id
|
||||
}
|
||||
|
||||
for (const key in values) {
|
||||
const input = values[key]
|
||||
if (input === undefined) {
|
||||
continue
|
||||
}
|
||||
delete values.groupenabled
|
||||
params[key] = input
|
||||
}
|
||||
this.loading = true
|
||||
api('createTemplate', values).then(response => {
|
||||
api('createTemplate', params).then(response => {
|
||||
this.$pollJob({
|
||||
jobId: response.createtemplateresponse.jobid,
|
||||
title: this.$t('message.success.create.template'),
|
||||
|
|
|
|||
Loading…
Reference in New Issue