bug 11237: 1) Remove capacity records when corresponding pod/zone is removed 2) On api layer check if zone exists before listing capacity record

status 11237: resolved fixed
This commit is contained in:
alena 2011-08-24 13:05:16 -07:00
parent 993b5f5ba5
commit b4e022a9b4
4 changed files with 68 additions and 61 deletions

View File

@ -1728,6 +1728,13 @@ public class ApiResponseHelper implements ResponseGenerator {
// collect all the capacity types, sum allocated/used and sum total...get one capacity number for each
for (Capacity capacity : hostCapacities) {
//check if zone exist
DataCenter zone = ApiDBUtils.findZoneById(capacity.getDataCenterId());
if (zone == null) {
continue;
}
short capacityType = capacity.getCapacityType();
//If local storage then ignore
@ -1841,7 +1848,7 @@ public class ApiResponseHelper implements ResponseGenerator {
public List<CapacityResponse> createCapacityResponse(List<? extends Capacity> result, DecimalFormat format) {
List<CapacityResponse> capacityResponses = new ArrayList<CapacityResponse>();
List<CapacityVO> summedCapacities = sumCapacities(result);
for (CapacityVO summedCapacity : summedCapacities) {
for (CapacityVO summedCapacity : summedCapacities) {
CapacityResponse capacityResponse = new CapacityResponse();
capacityResponse.setCapacityTotal(summedCapacity.getTotalCapacity());
capacityResponse.setCapacityType(summedCapacity.getCapacityType());

View File

@ -23,11 +23,9 @@ import java.util.List;
import com.cloud.capacity.CapacityVO;
import com.cloud.utils.db.GenericDao;
public interface CapacityDao extends GenericDao<CapacityVO, Long> {
void clearNonStorageCapacities();
void clearStorageCapacities();
public interface CapacityDao extends GenericDao<CapacityVO, Long> {
CapacityVO findByHostIdType(Long hostId, short capacityType);
void clearNonStorageCapacities2();
List<Long> orderClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor);
List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType, float cpuOverprovisioningFactor);
List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType, float cpuOverprovisioningFactor);
boolean removeBy(Short capacityType, Long zoneId, Long podId, Long clusterId);
}

View File

@ -40,10 +40,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
private static final Logger s_logger = Logger.getLogger(CapacityDaoImpl.class);
private static final String ADD_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity + ? WHERE host_id = ? AND capacity_type = ?";
private static final String SUBTRACT_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity - ? WHERE host_id = ? AND capacity_type = ?";
private static final String CLEAR_STORAGE_CAPACITIES = "DELETE FROM `cloud`.`op_host_capacity` WHERE capacity_type=2 OR capacity_type=3 OR capacity_type=6"; //clear storage and secondary_storage capacities
private static final String CLEAR_NON_STORAGE_CAPACITIES = "DELETE FROM `cloud`.`op_host_capacity` WHERE capacity_type<>2 AND capacity_type<>3 AND capacity_type<>6"; //clear non-storage and non-secondary_storage capacities
private static final String CLEAR_NON_STORAGE_CAPACITIES2 = "DELETE FROM `cloud`.`op_host_capacity` WHERE capacity_type<>2 AND capacity_type<>3 AND capacity_type<>6 AND capacity_type<>0 AND capacity_type<>1"; //clear non-storage and non-secondary_storage capacities
private static final String SUBTRACT_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity - ? WHERE host_id = ? AND capacity_type = ?";
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1 = "SELECT DISTINCT capacity.cluster_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster on (cluster.id = capacity.cluster_id AND cluster.removed is NULL) WHERE ";
private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2 = " AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " +
@ -53,6 +50,8 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
private SearchBuilder<CapacityVO> _hostIdTypeSearch;
private SearchBuilder<CapacityVO> _hostOrPoolIdSearch;
private SearchBuilder<CapacityVO> _allFieldsSearch;
private static final String LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY = "SELECT a.host_id FROM (host JOIN op_host_capacity a ON host.id = a.host_id AND host.cluster_id = ? AND host.type = ? " +
"AND (a.total_capacity * ? - a.used_capacity) >= ? and a.capacity_type = 1) " +
@ -67,6 +66,16 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
_hostOrPoolIdSearch = createSearchBuilder();
_hostOrPoolIdSearch.and("hostId", _hostOrPoolIdSearch.entity().getHostOrPoolId(), SearchCriteria.Op.EQ);
_hostOrPoolIdSearch.done();
_allFieldsSearch = createSearchBuilder();
_allFieldsSearch.and("id", _allFieldsSearch.entity().getId(), SearchCriteria.Op.EQ);
_allFieldsSearch.and("hostId", _allFieldsSearch.entity().getHostOrPoolId(), SearchCriteria.Op.EQ);
_allFieldsSearch.and("zoneId", _allFieldsSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
_allFieldsSearch.and("podId", _allFieldsSearch.entity().getPodId(), SearchCriteria.Op.EQ);
_allFieldsSearch.and("clusterId", _allFieldsSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
_allFieldsSearch.and("capacityType", _allFieldsSearch.entity().getCapacityType(), SearchCriteria.Op.EQ);
_allFieldsSearch.done();
}
public void updateAllocated(Long hostId, long allocatedAmount, short capacityType, boolean add) {
@ -91,55 +100,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
s_logger.warn("Exception updating capacity for host: " + hostId, e);
}
}
@Override
public void clearNonStorageCapacities() {
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
txn.start();
String sql = CLEAR_NON_STORAGE_CAPACITIES;
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.executeUpdate();
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Exception clearing non storage capacities", e);
}
}
@Override
public void clearNonStorageCapacities2() {
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
txn.start();
String sql = CLEAR_NON_STORAGE_CAPACITIES2;
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.executeUpdate();
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Exception clearing non storage capacities", e);
}
}
@Override
public void clearStorageCapacities() {
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
txn.start();
String sql = CLEAR_STORAGE_CAPACITIES;
pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.executeUpdate();
txn.commit();
} catch (Exception e) {
txn.rollback();
s_logger.warn("Exception clearing storage capacities", e);
}
}
@Override
public CapacityVO findByHostIdType(Long hostId, short capacityType) {
@ -231,5 +192,28 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
} catch (Throwable e) {
throw new CloudRuntimeException("Caught: " + sql, e);
}
}
@Override
public boolean removeBy(Short capacityType, Long zoneId, Long podId, Long clusterId) {
SearchCriteria<CapacityVO> sc = _allFieldsSearch.create();
if (capacityType != null) {
sc.setParameters("capacityType", capacityType);
}
if (zoneId != null) {
sc.setParameters("zoneId", zoneId);
}
if (podId != null) {
sc.setParameters("podId", podId);
}
if (clusterId != null) {
sc.setParameters("clusterId", clusterId);
}
return remove(sc) > 0;
}
}

View File

@ -57,6 +57,8 @@ import com.cloud.api.commands.UpdateNetworkOfferingCmd;
import com.cloud.api.commands.UpdatePodCmd;
import com.cloud.api.commands.UpdateServiceOfferingCmd;
import com.cloud.api.commands.UpdateZoneCmd;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.configuration.ResourceCount.ResourceType;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.AccountVlanMapVO;
@ -195,6 +197,8 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura
AlertManager _alertMgr;
@Inject(adapter = SecurityChecker.class)
Adapters<SecurityChecker> _secChecker;
@Inject
CapacityDao _capacityDao;
// FIXME - why don't we have interface for DataCenterLinkLocalIpAddressDao?
protected static final DataCenterLinkLocalIpAddressDaoImpl _LinkLocalIpAllocDao = ComponentLocator.inject(DataCenterLinkLocalIpAddressDaoImpl.class);
@ -659,6 +663,8 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura
@DB
public boolean deletePod(DeletePodCmd cmd) {
Long podId = cmd.getId();
Transaction txn = Transaction.currentTxn();
// Make sure the pod exists
if (!validPod(podId)) {
@ -669,14 +675,19 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura
HostPodVO pod = _podDao.findById(podId);
txn.start();
// Delete private ip addresses for the pod if there are any
List<DataCenterIpAddressVO> privateIps = _privateIpAddressDao.listByPodIdDcId(Long.valueOf(podId), pod.getDataCenterId());
if (privateIps != null && privateIps.size() != 0) {
if (!privateIps.isEmpty()) {
if (!(_privateIpAddressDao.deleteIpAddressByPod(podId))) {
throw new CloudRuntimeException("Failed to cleanup private ip addresses for pod " + podId);
}
// Delete corresponding capacity record
_capacityDao.removeBy(Capacity.CAPACITY_TYPE_PRIVATE_IP, null, podId, null);
}
// Delete link local ip addresses for the pod
List<DataCenterLinkLocalIpAddressVO> localIps = _LinkLocalIpAllocDao.listByPodIdDcId(podId, pod.getDataCenterId());
if (!localIps.isEmpty()) {
@ -697,6 +708,8 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura
if (!(_podDao.remove(podId))) {
throw new CloudRuntimeException("Failed to delete pod " + podId);
}
txn.commit();
return true;
}
@ -1138,6 +1151,11 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura
}
}
success = _zoneDao.remove(zoneId);
if (success) {
//delete all capacity records for the zone
_capacityDao.removeBy(null, zoneId, null, null);
}
txn.commit();