Added TRACE level logging for Global capacityCheckerLocks in UserConcentratedAllocator, AlertManagerImpl and StatsCollector.

This commit is contained in:
root 2010-09-01 17:34:08 -07:00 committed by alena
parent f5e675427b
commit 4a74992cd3
3 changed files with 29 additions and 5 deletions

View File

@ -155,7 +155,9 @@ public class UserConcentratedAllocator implements PodAllocator {
private boolean dataCenterAndPodHasEnoughCapacity(long dataCenterId, long podId, long capacityNeeded, short capacityType, long[] hostCandidate) {
List<CapacityVO> capacities = null;
long start = System.currentTimeMillis();
if (m_capacityCheckLock.lock(120)) { // 2 minutes
long lockTime = System.currentTimeMillis();
try {
SearchCriteria sc = _capacityDao.createSearchCriteria();
sc.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType);
@ -164,9 +166,15 @@ public class UserConcentratedAllocator implements PodAllocator {
capacities = _capacityDao.search(sc, null);
} finally {
m_capacityCheckLock.unlock();
long end = System.currentTimeMillis();
if (s_logger.isTraceEnabled())
s_logger.trace("CapacityCheckLock was held for " + (end - lockTime) + " ms; lock was acquired in " + (lockTime - start) + " ms");
}
} else {
s_logger.error("Unable to acquire synchronization lock for pod allocation");
long end = System.currentTimeMillis();
if (s_logger.isTraceEnabled())
s_logger.trace("CapacityCheckerLock got timed out after " + (end - start) + " ms");
// we now try to enforce reservation-style allocation, waiting time has been adjusted
// to 2 minutes

View File

@ -443,8 +443,10 @@ public class AlertManagerImpl implements AlertManager {
newCapacities.add(newPrivateIPCapacity);
// _capacityDao.persist(newPrivateIPCapacity);
}
long start = System.currentTimeMillis();
if (m_capacityCheckLock.lock(5)) { // 5 second timeout
long lockTime = System.currentTimeMillis();
try {
// delete the old records
_capacityDao.clearNonStorageCapacities();
@ -453,7 +455,10 @@ public class AlertManagerImpl implements AlertManager {
_capacityDao.persist(newCapacity);
}
} finally {
m_capacityCheckLock.unlock();
m_capacityCheckLock.unlock();
long end = System.currentTimeMillis();
if (s_logger.isTraceEnabled())
s_logger.trace("CapacityCheckLock was held for " + (end - lockTime) + " ms; lock was acquired in " + (lockTime - start) + " ms");
}
if (s_logger.isTraceEnabled()) {
@ -462,7 +467,10 @@ public class AlertManagerImpl implements AlertManager {
} else {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Skipping capacity check, unable to lock the capacity table for recalculation.");
}
}
long end = System.currentTimeMillis();
if (s_logger.isTraceEnabled())
s_logger.trace("CapacityCheckerLock got timed out after " + (end - start) + " ms");
}
}

View File

@ -337,8 +337,10 @@ public class StatsCollector {
newCapacities.add(capacity);
// _capacityDao.persist(capacity);
}
if (m_capacityCheckLock.lock(5)) { // 5 second timeout
long start = System.currentTimeMillis();
if (m_capacityCheckLock.lock(5)) { // 5 second timeout
long lockTime = System.currentTimeMillis();
if (s_logger.isTraceEnabled()) {
s_logger.trace("recalculating system storage capacity");
}
@ -356,6 +358,9 @@ public class StatsCollector {
}
} finally {
m_capacityCheckLock.unlock();
long end = System.currentTimeMillis();
if (s_logger.isTraceEnabled())
s_logger.trace("CapacityCheckLock was held for " + (end - lockTime) + " ms; lock was acquired in " + (lockTime - start) + " ms");
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("done recalculating system storage capacity");
@ -364,6 +369,9 @@ public class StatsCollector {
if (s_logger.isTraceEnabled()) {
s_logger.trace("not recalculating system storage capacity, unable to lock capacity table");
}
long end = System.currentTimeMillis();
if (s_logger.isTraceEnabled())
s_logger.trace("CapacityCheckerLock got timed out after " + (end - start) + " ms");
}
} catch (Throwable t) {
s_logger.error("Error trying to retrieve storage stats", t);