Issue #1821 Speed up GridInfoCache.

Change-Id: I14a92d85994c25dc7f80828efd12d2d4e2e70e40

Former-commit-id: 6f525a81b4 [formerly fde9f936e53a998f8cde174935d00287e6aaf70c]
Former-commit-id: 2a13abd32c
This commit is contained in:
Ben Steffensmeier 2013-03-27 13:14:41 -05:00
parent 60735ddd83
commit bd35a06d51
3 changed files with 50 additions and 23 deletions

View file

@ -105,6 +105,8 @@ import com.vividsolutions.jts.geom.Polygon;
* Jan 14, 2013 1469 bkowal No longer retrieves the hdf5 data directory
* from the environment.
* Feb 12, 2013 #1608 randerso Changed to call deleteDatasets
* Mar 27, 2013 1821 bsteffen Remove extra store in persistToHDF5 for
* replace only operations.
*
* </pre>
*
@ -260,7 +262,7 @@ public abstract class PluginDao extends CoreDao {
// directory.mkdirs();
// }
IDataStore dataStore = DataStoreFactory.getDataStore(file);
IDataStore dataStore = null;
IDataStore replaceDataStore = null;
for (IPersistable persistable : persistables) {
@ -274,6 +276,9 @@ public abstract class PluginDao extends CoreDao {
populateDataStore(replaceDataStore, persistable);
} else {
if (dataStore == null) {
dataStore = DataStoreFactory.getDataStore(file);
}
populateDataStore(dataStore, persistable);
}
} catch (Exception e) {
@ -281,14 +286,15 @@ public abstract class PluginDao extends CoreDao {
}
}
try {
StorageStatus s = dataStore.store();
// add exceptions to a list for aggregation
exceptions.addAll(Arrays.asList(s.getExceptions()));
} catch (StorageException e) {
logger.error("Error persisting to HDF5", e);
if (dataStore != null) {
try {
StorageStatus s = dataStore.store();
// add exceptions to a list for aggregation
exceptions.addAll(Arrays.asList(s.getExceptions()));
} catch (StorageException e) {
logger.error("Error persisting to HDF5", e);
}
}
if (replaceDataStore != null) {
try {
StorageStatus s = replaceDataStore.store(StoreOp.REPLACE);

View file

@ -50,6 +50,7 @@ import com.raytheon.uf.common.gridcoverage.GridCoverage;
import com.raytheon.uf.common.gridcoverage.lookup.GridCoverageLookup;
import com.raytheon.uf.common.parameter.Parameter;
import com.raytheon.uf.common.parameter.lookup.ParameterLookup;
import com.raytheon.uf.common.status.UFStatus.Priority;
import com.raytheon.uf.edex.core.EDEXUtil;
import com.raytheon.uf.edex.core.EdexException;
import com.raytheon.uf.edex.core.dataplugin.PluginRegistry;
@ -66,6 +67,7 @@ import com.raytheon.uf.edex.database.plugin.PluginDao;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* 4/7/09 1994 bphillip Initial Creation
* Mar 27, 2013 1821 bsteffen Speed up GridInfoCache.
*
* </pre>
*
@ -231,8 +233,15 @@ public class GridDao extends PluginDao {
if (!validateCoverage(record)) {
return false;
}
record.setInfo(GridInfoCache.getInstance()
.getGridInfo(record.getInfo()));
try {
record.setInfo(GridInfoCache.getInstance().getGridInfo(
record.getInfo()));
} catch (DataAccessLayerException e) {
statusHandler.handle(Priority.PROBLEM,
"Cannot load GridInfoRecord from DB for: "
+ record.getDataURI(), e);
return false;
}
return true;
}

View file

@ -27,13 +27,15 @@ import java.util.List;
import java.util.Map;
import java.util.WeakHashMap;
import com.raytheon.uf.common.dataplugin.grid.GridInfoConstants;
import com.raytheon.uf.common.dataplugin.grid.GridInfoRecord;
import com.raytheon.uf.common.dataplugin.persist.PersistableDataObject;
import com.raytheon.uf.edex.database.DataAccessLayerException;
import com.raytheon.uf.edex.database.cluster.ClusterLockUtils;
import com.raytheon.uf.edex.database.cluster.ClusterLockUtils.LockState;
import com.raytheon.uf.edex.database.cluster.ClusterTask;
import com.raytheon.uf.edex.database.dao.CoreDao;
import com.raytheon.uf.edex.database.dao.DaoConfig;
import com.raytheon.uf.edex.database.query.DatabaseQuery;
/**
* Cache the gridInfo objects from the database to avoid repeated lookups.
@ -45,6 +47,7 @@ import com.raytheon.uf.edex.database.dao.DaoConfig;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* May 21, 2012 bsteffen Initial creation
* Mar 27, 2013 1821 bsteffen Speed up GridInfoCache.
*
* </pre>
*
@ -71,7 +74,8 @@ public class GridInfoCache {
dao = new CoreDao(DaoConfig.forClass(GridInfoRecord.class));
}
public GridInfoRecord getGridInfo(GridInfoRecord record) {
public GridInfoRecord getGridInfo(GridInfoRecord record)
throws DataAccessLayerException {
GridInfoRecord result = checkLocalCache(record);
if (result == null) {
result = query(record);
@ -97,19 +101,26 @@ public class GridInfoCache {
*
* @param record
* @return
* @throws DataAccessLayerException
*/
private GridInfoRecord query(GridInfoRecord record) {
private GridInfoRecord query(GridInfoRecord record)
throws DataAccessLayerException {
// It is possible that this query will return multiple
// results, for example if the record we are looking for has
// a null secondaryId but some db entries have a secondaryId
// set then this query will return all matching models
// ignoring secondaryId. In general these cases should be
// rare and small. So we handle it by caching everything
// returned and then double checking the cache.
List<PersistableDataObject<Integer>> dbList = dao
.queryByExample(record);
// results, for example in the case of models with secondary ids. In
// general these cases should be rare and small. So we handle it by
// caching everything returned and then double checking the cache.
DatabaseQuery query = new DatabaseQuery(GridInfoRecord.class);
query.addQueryParam(GridInfoConstants.DATASET_ID, record.getDatasetId());
query.addQueryParam(GridInfoConstants.PARAMETER_ABBREVIATION, record
.getParameter().getAbbreviation());
query.addQueryParam(GridInfoConstants.LEVEL_ID, record.getLevel()
.getId());
query.addQueryParam(GridInfoConstants.LOCATION_ID, record.getLocation()
.getId());
List<?> dbList = dao.queryByCriteria(query);
if (dbList != null && !dbList.isEmpty()) {
for (PersistableDataObject<Integer> pdo : dbList) {
for (Object pdo : dbList) {
GridInfoRecord gir = (GridInfoRecord) pdo;
// if we don't remove then when an entry exists already the key
// and value become references to different objects which is not
@ -131,7 +142,8 @@ public class GridInfoCache {
* @param record
* @return
*/
private GridInfoRecord insert(GridInfoRecord record) {
private GridInfoRecord insert(GridInfoRecord record)
throws DataAccessLayerException {
ClusterTask ct = null;
do {
ct = ClusterLockUtils.lock("grid_info", "newEntry", 30000, true);