Cleaned up Storage capacity Global Lock code that no longer in use.

This commit is contained in:
alena 2010-09-05 19:51:05 -07:00
parent 10fdd6d828
commit 2098127074
3 changed files with 9 additions and 111 deletions

View File

@ -155,41 +155,14 @@ public class UserConcentratedAllocator implements PodAllocator {
private boolean dataCenterAndPodHasEnoughCapacity(long dataCenterId, long podId, long capacityNeeded, short capacityType, long[] hostCandidate) {
List<CapacityVO> capacities = null;
// long start = System.currentTimeMillis();
// if (m_capacityCheckLock.lock(120)) { // 2 minutes
// long lockTime = System.currentTimeMillis();
// try {
SearchCriteria sc = _capacityDao.createSearchCriteria();
sc.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType);
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, dataCenterId);
sc.addAnd("podId", SearchCriteria.Op.EQ, podId);
s_logger.trace("Executing search");
capacities = _capacityDao.search(sc, null);
s_logger.trace("Done with search");
// } finally {
// m_capacityCheckLock.unlock();
// long end = System.currentTimeMillis();
// if (s_logger.isTraceEnabled())
// s_logger.trace("CapacityCheckLock was held for " + (end - lockTime) + " ms; lock was acquired in " + (lockTime - start) + " ms");
// }
// } else {
// s_logger.error("Unable to acquire synchronization lock for pod allocation");
// long end = System.currentTimeMillis();
// if (s_logger.isTraceEnabled())
// s_logger.trace("CapacityCheckerLock got timed out after " + (end - start) + " ms");
//
// // we now try to enforce reservation-style allocation, waiting time has been adjusted
// // to 2 minutes
// return false;
//
///*
// // If we can't lock the table, just return that there is enough capacity and allow instance creation to fail on the agent
// // if there is not enough capacity. All that does is skip the optimization of checking for capacity before sending the
// // command to the agent.
// return true;
//*/
// }
SearchCriteria sc = _capacityDao.createSearchCriteria();
sc.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType);
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, dataCenterId);
sc.addAnd("podId", SearchCriteria.Op.EQ, podId);
s_logger.trace("Executing search");
capacities = _capacityDao.search(sc, null);
s_logger.trace("Done with search");
boolean enoughCapacity = false;
if (capacities != null) {

View File

@ -466,39 +466,7 @@ public class AlertManagerImpl implements AlertManager {
s_logger.error("Unable to start transaction for capacity update");
}finally {
txn.close();
}
// if (m_capacityCheckLock.lock(5)) { // 5 second timeout
// long lockTime = System.currentTimeMillis();
// try {
// // delete the old records
// _capacityDao.clearNonStorageCapacities();
//
// for (CapacityVO newCapacity : newCapacities) {
// _capacityDao.persist(newCapacity);
// }
// txn.commit();
// } finally {
// m_capacityCheckLock.unlock();
// long end = System.currentTimeMillis();
// if (s_logger.isTraceEnabled())
// s_logger.trace("CapacityCheckLock was held for " + (end - lockTime) + " ms; lock was acquired in " + (lockTime - start) + " ms");
// }
//
// if (s_logger.isTraceEnabled()) {
// s_logger.trace("done recalculating system capacity");
// }
// } else {
// txn.rollback();
// if (s_logger.isTraceEnabled()) {
// s_logger.trace("Skipping capacity check, unable to lock the capacity table for recalculation.");
// }
// long end = System.currentTimeMillis();
// if (s_logger.isTraceEnabled())
// s_logger.trace("CapacityCheckerLock got timed out after " + (end - start) + " ms");
// }
}
}
class CapacityChecker extends TimerTask {

View File

@ -59,7 +59,6 @@ import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.component.ComponentLocator;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.GlobalLock;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.Transaction;
@ -361,48 +360,6 @@ public class StatsCollector {
}finally {
txn.close();
}
//
//
//
// long start = System.currentTimeMillis();
// if (m_capacityCheckLock.lock(5)) { // 5 second timeout
// long lockTime = System.currentTimeMillis();
// if (s_logger.isTraceEnabled()) {
// s_logger.trace("recalculating system storage capacity");
// }
// try {
// // now update the capacity table with the new stats
// // FIXME: the right way to do this is to register a listener (see RouterStatsListener)
// // for the host stats, send the Watch<something>Command at a regular interval
// // to collect the stats from an agent and update the database as needed. The
// // listener model has connects/disconnects to keep things in sync much better
// // than this model right now
// _capacityDao.clearStorageCapacities();
//
// for (CapacityVO newCapacity : newCapacities) {
// s_logger.trace("Executing capacity update");
// _capacityDao.persist(newCapacity);
// s_logger.trace("Done with capacity update");
// }
// txn.commit();
// } finally {
// m_capacityCheckLock.unlock();
// long end = System.currentTimeMillis();
// if (s_logger.isTraceEnabled())
// s_logger.trace("CapacityCheckLock was held for " + (end - lockTime) + " ms; lock was acquired in " + (lockTime - start) + " ms");
// }
// if (s_logger.isTraceEnabled()) {
// s_logger.trace("done recalculating system storage capacity");
// }
// } else {
// if (s_logger.isTraceEnabled()) {
// s_logger.trace("not recalculating system storage capacity, unable to lock capacity table");
// }
// long end = System.currentTimeMillis();
// if (s_logger.isTraceEnabled())
// s_logger.trace("CapacityCheckerLock got timed out after " + (end - start) + " ms");
// }
} catch (Throwable t) {
s_logger.error("Error trying to retrieve storage stats", t);
}