Issue #1821 make GridInfoCache more elaborate.

Change-Id: Ib64a68ab4e898eba5fe186ac5ac01407964e37d9

Former-commit-id: a31dc1874b [formerly bc8729d631] [formerly a31dc1874b [formerly bc8729d631] [formerly 4b6d2fd8d1 [formerly c7b4e7c982fb682779b729d1a8a8fb5ff6f5e1d2]]]
Former-commit-id: 4b6d2fd8d1
Former-commit-id: 60b2259343 [formerly e4462ee41a]
Former-commit-id: 03eade5b98
This commit is contained in:
Ben Steffensmeier 2013-03-27 16:44:14 -05:00
parent 06030b77b4
commit 07438ab40a
2 changed files with 217 additions and 93 deletions

View file

@ -31,4 +31,5 @@ Require-Bundle: com.raytheon.uf.common.parameter;bundle-version="1.0.0",
org.springframework;bundle-version="2.5.6",
javax.measure;bundle-version="1.0.0",
com.raytheon.uf.common.status;bundle-version="1.12.1174",
org.apache.commons.logging;bundle-version="1.1.1"
org.apache.commons.logging;bundle-version="1.1.1",
com.raytheon.uf.common.comm;bundle-version="1.12.1174"

View file

@ -19,16 +19,19 @@
**/
package com.raytheon.uf.edex.plugin.grid.dao;
import java.lang.ref.SoftReference;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.WeakHashMap;
import com.raytheon.uf.common.comm.CommunicationException;
import com.raytheon.uf.common.dataplugin.grid.GridInfoConstants;
import com.raytheon.uf.common.dataplugin.grid.GridInfoRecord;
import com.raytheon.uf.common.dataplugin.level.LevelFactory;
import com.raytheon.uf.common.gridcoverage.lookup.GridCoverageLookup;
import com.raytheon.uf.common.parameter.lookup.ParameterLookup;
import com.raytheon.uf.edex.database.DataAccessLayerException;
import com.raytheon.uf.edex.database.cluster.ClusterLockUtils;
import com.raytheon.uf.edex.database.cluster.ClusterLockUtils.LockState;
@ -57,21 +60,107 @@ import com.raytheon.uf.edex.database.query.DatabaseQuery;
public class GridInfoCache {
// 6 hours
private static final int ROTATION_INTERVAL = 1 * 1 * 60 * 1000;
private static GridInfoCache instance = new GridInfoCache();
public static GridInfoCache getInstance() {
return instance;
}
private final CoreDao dao;
private static final CoreDao dao = new CoreDao(
DaoConfig.forClass(GridInfoRecord.class));
// A weak hashmap of soft references is used as a SoftSet.
private Map<GridInfoRecord, SoftReference<GridInfoRecord>> cache = null;
private Map<String, DatasetCache> cache = null;
private long lastRotationTime;
private GridInfoCache() {
cache = Collections
.synchronizedMap(new WeakHashMap<GridInfoRecord, SoftReference<GridInfoRecord>>());
dao = new CoreDao(DaoConfig.forClass(GridInfoRecord.class));
.synchronizedMap(new HashMap<String, DatasetCache>());
lastRotationTime = System.currentTimeMillis();
}
public GridInfoRecord getGridInfo(GridInfoRecord record)
throws DataAccessLayerException {
DatasetCache dCache = cache.get(record.getDatasetId());
if (dCache == null) {
dCache = createDatasetCache(record.getDatasetId());
}
GridInfoRecord result = dCache.getGridInfo(record);
if (System.currentTimeMillis() > lastRotationTime + ROTATION_INTERVAL) {
rotateCache();
}
return result;
}
/**
* Remove the info records with the specified ids from the cache.
*
* @param infoKeys
*/
public void purgeCache(Collection<Integer> infoKeys) {
synchronized (cache) {
Iterator<DatasetCache> it = cache.values().iterator();
while (it.hasNext()) {
DatasetCache next = it.next();
next.purgeCache(infoKeys);
if (next.isEmpty()) {
it.remove();
}
}
}
}
private DatasetCache createDatasetCache(String datasetId)
throws DataAccessLayerException {
synchronized (cache) {
DatasetCache dCache = cache.get(datasetId);
if (dCache == null) {
dCache = new DatasetCache(datasetId);
cache.put(datasetId, dCache);
}
return dCache;
}
}
private void rotateCache() {
synchronized (cache) {
if (System.currentTimeMillis() > lastRotationTime
+ ROTATION_INTERVAL) {
Iterator<DatasetCache> it = cache.values().iterator();
while (it.hasNext()) {
DatasetCache next = it.next();
next.rotateCache();
if (next.isEmpty()) {
it.remove();
}
}
}
lastRotationTime = System.currentTimeMillis();
}
}
/**
*
* A second chance cache for all GridInfoRecords for a single datasetid.
*
*/
private static class DatasetCache {
private Map<GridInfoRecord, GridInfoRecord> primaryCache;
private Map<GridInfoRecord, GridInfoRecord> secondChanceCache;
public DatasetCache(String datasetid) throws DataAccessLayerException {
primaryCache = Collections
.synchronizedMap(new HashMap<GridInfoRecord, GridInfoRecord>());
secondChanceCache = Collections
.synchronizedMap(new HashMap<GridInfoRecord, GridInfoRecord>());
DatabaseQuery query = new DatabaseQuery(GridInfoRecord.class);
query.addQueryParam(GridInfoConstants.DATASET_ID, datasetid);
queryAndAdd(query);
}
public GridInfoRecord getGridInfo(GridInfoRecord record)
@ -86,11 +175,28 @@ public class GridInfoCache {
return result;
}
public void purgeCache(Collection<Integer> infoKeys) {
purgeCache(infoKeys, primaryCache);
purgeCache(infoKeys, secondChanceCache);
}
public void rotateCache() {
secondChanceCache = primaryCache;
primaryCache = Collections
.synchronizedMap(new HashMap<GridInfoRecord, GridInfoRecord>());
}
public boolean isEmpty() {
return primaryCache.isEmpty() && secondChanceCache.isEmpty();
}
private GridInfoRecord checkLocalCache(GridInfoRecord record) {
GridInfoRecord result = null;
SoftReference<GridInfoRecord> ref = cache.get(record);
if (ref != null) {
result = ref.get();
GridInfoRecord result = primaryCache.get(record);
if (result == null) {
result = secondChanceCache.get(record);
if (result != null) {
addToCache(result);
}
}
return result;
}
@ -105,39 +211,57 @@ public class GridInfoCache {
*/
private GridInfoRecord query(GridInfoRecord record)
throws DataAccessLayerException {
// It is possible that this query will return multiple
// results, for example in the case of models with secondary ids. In
// general these cases should be rare and small. So we handle it by
// caching everything returned and then double checking the cache.
DatabaseQuery query = new DatabaseQuery(GridInfoRecord.class);
query.addQueryParam(GridInfoConstants.DATASET_ID, record.getDatasetId());
query.addQueryParam(GridInfoConstants.PARAMETER_ABBREVIATION, record
.getParameter().getAbbreviation());
query.addQueryParam(GridInfoConstants.DATASET_ID,
record.getDatasetId());
query.addQueryParam(GridInfoConstants.PARAMETER_ABBREVIATION,
record.getParameter().getAbbreviation());
query.addQueryParam(GridInfoConstants.LEVEL_ID, record.getLevel()
.getId());
query.addQueryParam(GridInfoConstants.LOCATION_ID, record.getLocation()
.getId());
List<?> dbList = dao.queryByCriteria(query);
if (dbList != null && !dbList.isEmpty()) {
for (Object pdo : dbList) {
GridInfoRecord gir = (GridInfoRecord) pdo;
// if we don't remove then when an entry exists already the key
// and value become references to different objects which is not
// what we want.
cache.remove(gir);
cache.put(gir, new SoftReference<GridInfoRecord>(gir));
}
}
query.addQueryParam(GridInfoConstants.LOCATION_ID, record
.getLocation().getId());
queryAndAdd(query);
return checkLocalCache(record);
}
private void queryAndAdd(DatabaseQuery query)
throws DataAccessLayerException {
List<?> dbList = dao.queryByCriteria(query);
if (dbList != null && !dbList.isEmpty()) {
for (Object pdo : dbList) {
addToCache((GridInfoRecord) pdo);
}
}
}
/**
* Insert the record into the database if there is no current record that
* equals this one. This method uses a fairly broad cluster lock so only one
* thread at a time across all clustered edices can insert at a time. This
* method should not be used much on running systems since gridded models
* maintain fairly consistent info records over time.
* Replace several fields with cached versions to save memory and then
* add to the primaryCache.
*
* @param record
*/
private void addToCache(GridInfoRecord record) {
record.setLocation(GridCoverageLookup.getInstance().getCoverage(
record.getLocation().getId()));
record.setParameter(ParameterLookup.getInstance().getParameter(
record.getParameter().getAbbreviation()));
try {
record.setLevel(LevelFactory.getInstance().getLevel(
record.getLevel().getId()));
} catch (CommunicationException e) {
// This should never hit and if it does ignore it, the only side
// affect is thatthe level in the record will not be the same as
// the other records on the same level.
}
primaryCache.put(record, record);
}
/**
* Insert the record into the database if there is no current record
* that equals this one. This method uses a fairly broad cluster lock so
* only one thread at a time across all clustered edices can insert at a
* time. This method should not be used much on running systems since
* gridded models maintain fairly consistent info records over time.
*
* @param record
* @return
@ -146,7 +270,8 @@ public class GridInfoCache {
throws DataAccessLayerException {
ClusterTask ct = null;
do {
ct = ClusterLockUtils.lock("grid_info", "newEntry", 30000, true);
ct = ClusterLockUtils.lock("grid_info_create",
record.getDatasetId(), 30000, true);
} while (!LockState.SUCCESSFUL.equals(ct.getLockState()));
try {
GridInfoRecord existing = query(record);
@ -157,16 +282,12 @@ public class GridInfoCache {
} finally {
ClusterLockUtils.unlock(ct, false);
}
cache.put(record, new SoftReference<GridInfoRecord>(record));
addToCache(record);
return record;
}
/**
* Remove the info records with the specified ids from the cache.
*
* @param infoKeys
*/
public void purgeCache(Collection<Integer> infoKeys) {
private void purgeCache(Collection<Integer> infoKeys,
Map<GridInfoRecord, GridInfoRecord> cache) {
synchronized (cache) {
Iterator<GridInfoRecord> it = cache.keySet().iterator();
while (it.hasNext()) {
@ -178,4 +299,6 @@ public class GridInfoCache {
}
}
}
}