diff --git a/cave/com.raytheon.uf.viz.thinclient.cave/src/com/raytheon/uf/viz/thinclient/cave/cache/CachingDataStore.java b/cave/com.raytheon.uf.viz.thinclient.cave/src/com/raytheon/uf/viz/thinclient/cave/cache/CachingDataStore.java
index 8c74a0322b..fec82757e7 100644
--- a/cave/com.raytheon.uf.viz.thinclient.cave/src/com/raytheon/uf/viz/thinclient/cave/cache/CachingDataStore.java
+++ b/cave/com.raytheon.uf.viz.thinclient.cave/src/com/raytheon/uf/viz/thinclient/cave/cache/CachingDataStore.java
@@ -19,13 +19,14 @@
**/
package com.raytheon.uf.viz.thinclient.cave.cache;
-import java.io.File;
-import java.io.FileInputStream;
import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.security.MessageDigest;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
import java.util.Map;
+import com.raytheon.uf.common.datastorage.DataStoreFactory;
import com.raytheon.uf.common.datastorage.IDataStore;
import com.raytheon.uf.common.datastorage.Request;
import com.raytheon.uf.common.datastorage.StorageException;
@@ -33,57 +34,37 @@ import com.raytheon.uf.common.datastorage.StorageProperties;
import com.raytheon.uf.common.datastorage.StorageProperties.Compression;
import com.raytheon.uf.common.datastorage.StorageStatus;
import com.raytheon.uf.common.datastorage.records.IDataRecord;
-import com.raytheon.uf.common.serialization.DynamicSerializationManager;
-import com.raytheon.uf.common.serialization.DynamicSerializationManager.SerializationType;
-import com.raytheon.uf.common.serialization.SerializationUtil;
-import com.raytheon.uf.common.util.cache.LRUCacheFS;
/**
- * Data store used to cache requests to the filesystem to save bandwidth
+ * Data store which wraps with another {@link IDataStore}. This data store will
+ * always check with its cache before using the delegate and adds all results to
+ * the cache.
*
*
*
* SOFTWARE HISTORY
*
- * Date Ticket# Engineer Description
- * ------------ ---------- ----------- --------------------------
- * Nov 8, 2011 mschenke Initial creation
- * Feb 12, 2013 #1608 randerso Added explicit deletes for groups and datasets
+ * Date Ticket# Engineer Description
+ * ------------- -------- ----------- --------------------------
+ * Nov 08, 2011 mschenke Initial creation
+ * Feb 12, 2013 1608 randerso Added explicit deletes for groups and
+ * datasets
+ * Sep 18, 2013 2309 bsteffen Move disk acces to DataStoreCache
*
*
*
* @author mschenke
* @version 1.0
*/
-
-/**
- * TODO Add Description
- *
- *
- *
- * SOFTWARE HISTORY
- *
- * Date Ticket# Engineer Description
- * ------------ ---------- ----------- --------------------------
- * Feb 12, 2013 randerso Initial creation
- *
- *
- *
- * @author randerso
- * @version 1.0
- */
public class CachingDataStore implements IDataStore {
- // quick byte string to hex conversion
- private static final String HEXES = "0123456789ABCDEF";
+ private final IDataStore delegate;
- private IDataStore delegate;
+ private DataStoreCache cache;
- private File cacheDir;
-
- CachingDataStore(IDataStore delegate, File cacheDir) {
+ CachingDataStore(IDataStore delegate, DataStoreCache cache) {
this.delegate = delegate;
- this.cacheDir = cacheDir;
+ this.cache = cache;
}
/*
@@ -95,16 +76,36 @@ public class CachingDataStore implements IDataStore {
@Override
public IDataRecord[] retrieve(String group) throws StorageException,
FileNotFoundException {
- IDataRecord[] records = null;
- File cacheFile = getCacheFile(group);
- if (cacheFile != null && cacheFile.exists()) {
- records = retrieveFromCache(cacheFile, IDataRecord[].class);
+ String[] datasets = cache.getDatasetNames(group);
+ if (datasets != null) {
+ List datasetGroupPaths = new ArrayList(
+ datasets.length);
+ List records = new ArrayList();
+ for (String dataset : datasets) {
+ String datasetGroupPath = toDatasetGroupPath(group, dataset);
+ IDataRecord record = cache.getDataset(datasetGroupPath,
+ Request.ALL);
+ if (record == null) {
+ datasetGroupPaths.add(datasetGroupPath);
+ } else {
+ records.add(record);
+ }
+ }
+ if (!datasetGroupPaths.isEmpty()) {
+ IDataRecord[] newRecords = retrieveDatasets(
+ datasetGroupPaths.toArray(new String[0]), Request.ALL);
+ for (int i = 0; i < newRecords.length; i += 1) {
+ cache.cacheDataset(datasetGroupPaths.get(i), newRecords[i],
+ Request.ALL);
+ }
+ records.addAll(Arrays.asList(newRecords));
+ }
+ return records.toArray(new IDataRecord[0]);
+ } else {
+ IDataRecord[] records = delegate.retrieve(group);
+ cacheDatasets(group, Arrays.asList(records), Request.ALL);
+ return records;
}
- if (records == null) {
- records = delegate.retrieve(group);
- storeToCache(cacheFile, records);
- }
- return records;
}
/*
@@ -117,16 +118,12 @@ public class CachingDataStore implements IDataStore {
@Override
public IDataRecord[] retrieve(String group, boolean includeInterpolated)
throws StorageException, FileNotFoundException {
- IDataRecord[] records = null;
- File cacheFile = getCacheFile(new Object[] { group, includeInterpolated });
- if (cacheFile != null && cacheFile.exists()) {
- records = retrieveFromCache(cacheFile, IDataRecord[].class);
+ if (includeInterpolated == false) {
+ return retrieve(group);
+ } else {
+ /* This is deprecated and unused so caching is not implemented. */
+ return delegate.retrieve(group, includeInterpolated);
}
- if (records == null) {
- records = delegate.retrieve(group, includeInterpolated);
- storeToCache(cacheFile, records);
- }
- return records;
}
/*
@@ -139,14 +136,11 @@ public class CachingDataStore implements IDataStore {
@Override
public IDataRecord retrieve(String group, String dataset, Request request)
throws StorageException, FileNotFoundException {
- IDataRecord record = null;
- File cacheFile = getCacheFile(new Object[] { group, dataset, request });
- if (cacheFile != null && cacheFile.exists()) {
- record = retrieveFromCache(cacheFile, IDataRecord.class);
- }
+ String datasetGroupPath = toDatasetGroupPath(group, dataset);
+ IDataRecord record = cache.getDataset(datasetGroupPath, request);
if (record == null) {
record = delegate.retrieve(group, dataset, request);
- storeToCache(cacheFile, record);
+ cache.cacheDataset(datasetGroupPath, record, request);
}
return record;
}
@@ -159,18 +153,33 @@ public class CachingDataStore implements IDataStore {
* .String[], com.raytheon.uf.common.datastorage.Request)
*/
@Override
- public IDataRecord[] retrieveDatasets(String[] datasetGroupPath,
+ public IDataRecord[] retrieveDatasets(String[] datasetGroupPaths,
Request request) throws StorageException, FileNotFoundException {
- IDataRecord[] records = null;
- File cacheFile = getCacheFile(new Object[] { datasetGroupPath, request });
- if (cacheFile != null && cacheFile.exists()) {
- records = retrieveFromCache(cacheFile, IDataRecord[].class);
+ Map records = new HashMap();
+ List toRequest = new ArrayList(datasetGroupPaths.length);
+ for (String datasetGroupPath : datasetGroupPaths) {
+ IDataRecord record = cache.getDataset(datasetGroupPath, request);
+ if (record == null) {
+ toRequest.add(datasetGroupPath);
+ } else {
+ records.put(datasetGroupPath, record);
+ }
}
- if (records == null) {
- records = delegate.retrieveDatasets(datasetGroupPath, request);
- storeToCache(cacheFile, records);
+ if (!toRequest.isEmpty()) {
+ IDataRecord[] newRecords = delegate.retrieveDatasets(
+ toRequest.toArray(new String[0]), request);
+ for (int i = 0; i < newRecords.length; i += 1) {
+ String datasetGroupPath = toRequest.get(i);
+ IDataRecord record = newRecords[i];
+ cache.cacheDataset(datasetGroupPath, record, request);
+ records.put(datasetGroupPath, record);
+ }
}
- return records;
+ IDataRecord[] result = new IDataRecord[datasetGroupPaths.length];
+ for (int i = 0; i < datasetGroupPaths.length; i += 1) {
+ result[i] = records.get(datasetGroupPaths[i]);
+ }
+ return result;
}
/*
@@ -183,16 +192,50 @@ public class CachingDataStore implements IDataStore {
@Override
public IDataRecord[] retrieveGroups(String[] groups, Request request)
throws StorageException, FileNotFoundException {
- IDataRecord[] records = null;
- File cacheFile = getCacheFile(new Object[] { groups, request });
- if (cacheFile != null && cacheFile.exists()) {
- records = retrieveFromCache(cacheFile, IDataRecord[].class);
+ List toRequest = new ArrayList();
+ Map> records = new HashMap>();
+ for (String group : groups) {
+ String[] datasets = cache.getDatasetNames(group);
+ if (datasets != null) {
+ IDataRecord[] cachedRecords = new IDataRecord[datasets.length];
+ for (int i = 0; i < datasets.length; i += 1) {
+ cachedRecords[i] = cache.getDataset(
+ toDatasetGroupPath(group, datasets[i]),
+ request);
+ if (cachedRecords[i] == null) {
+ toRequest.add(group);
+ cachedRecords = null;
+ break;
+ }
+ }
+ if (cachedRecords != null) {
+ records.put(group, Arrays.asList(cachedRecords));
+ }
+ } else {
+ toRequest.add(group);
+ }
}
- if (records == null) {
- records = delegate.retrieveGroups(groups, request);
- storeToCache(cacheFile, records);
+ if (!toRequest.isEmpty()) {
+ IDataRecord[] newRecords = delegate.retrieveGroups(
+ toRequest.toArray(new String[0]), request);
+ for (IDataRecord record : newRecords) {
+ String group = getGroup(record);
+ List groupRecs = records.get(group);
+ if (groupRecs == null) {
+ groupRecs = new ArrayList();
+ records.put(group, groupRecs);
+ }
+ groupRecs.add(record);
+ }
+ for (String group : toRequest) {
+ cacheDatasets(group, records.get(group), request);
+ }
}
- return records;
+ List result = new ArrayList();
+ for (String group : groups) {
+ result.addAll(records.get(group));
+ }
+ return result.toArray(new IDataRecord[0]);
}
/*
@@ -205,96 +248,43 @@ public class CachingDataStore implements IDataStore {
@Override
public String[] getDatasets(String group) throws StorageException,
FileNotFoundException {
- String[] datasets = null;
- File cacheFile = getCacheFile(group);
- if (cacheFile != null && cacheFile.exists()) {
- datasets = retrieveFromCache(cacheFile, String[].class);
- }
-
+ String[] datasets = cache.getDatasetNames(group);
if (datasets == null) {
datasets = delegate.getDatasets(group);
- storeToCache(cacheFile, datasets);
+ cache.cacheDatasetNames(group, datasets);
}
return datasets;
}
- @SuppressWarnings("unchecked")
- private T retrieveFromCache(File file, Class clazz) {
- long t0 = System.currentTimeMillis();
- T rval = null;
- try {
- FileInputStream fin = new FileInputStream(file);
- Object fromFile = DynamicSerializationManager.getManager(
- SerializationType.Thrift).deserialize(fin);
- fin.close();
- if (clazz.isInstance(fromFile)) {
- rval = (T) fromFile;
- }
- } catch (Throwable e) {
- System.err.println("Error retreiving object from cache file: "
- + e.getLocalizedMessage());
- }
+ // Caching utility methods
- if (rval != null) {
- LRUCacheFS.poll(file);
+ /**
+ * Cache all datasets for a group. Both the indivdual datasets and the names
+ * of the datasets are cached.
+ */
+ private void cacheDatasets(String group, List records,
+ Request request) {
+ String[] datasets = new String[records.size()];
+ for (int i = 0; i < datasets.length; i += 1) {
+ datasets[i] = records.get(i).getName();
+ cache.cacheDataset(toDatasetGroupPath(group, datasets[i]),
+ records.get(i), request);
}
- System.out.println("Time to retreive from cache = "
- + (System.currentTimeMillis() - t0) + "ms");
- return rval;
+ cache.cacheDatasetNames(group, datasets);
}
- private void storeToCache(File file, Object result) {
- long t0 = System.currentTimeMillis();
- if (result != null) {
- try {
- if (!file.getParentFile().exists()) {
- file.getParentFile().mkdirs();
- }
- FileOutputStream fout = new FileOutputStream(file);
- DynamicSerializationManager
- .getManager(SerializationType.Thrift).serialize(result,
- fout);
- fout.close();
- LRUCacheFS.poll(file);
- file.setReadable(true, false);
- file.setWritable(true, false);
- } catch (Exception e) {
- System.err.println("Error storing object to file: "
- + e.getLocalizedMessage());
- }
- }
- System.out.println("Time to store to cache = "
- + (System.currentTimeMillis() - t0) + "ms");
+ private static String getGroup(IDataRecord record) {
+ String group = record.getGroup();
+ /*
+ * TODO this works around a bug in older pypies. Delete it after 14.2.1
+ * is fielded everywhere.
+ */
+ group = group.replaceAll("::", DataStoreFactory.DEF_SEPARATOR);
+ return group;
}
- private File getCacheFile(Object obj) {
- long t0 = System.currentTimeMillis();
- try {
- byte[] thriftDigest = SerializationUtil.transformToThrift(obj);
- MessageDigest md = MessageDigest.getInstance("MD5");
- md.update(thriftDigest);
- String md5sum = toHex(md.digest());
- return new File(cacheDir, md5sum);
- } catch (Exception e) {
- System.err.println("Error getting cache file: "
- + e.getLocalizedMessage());
- } finally {
- System.out.println("Time to getCacheFile = "
- + (System.currentTimeMillis() - t0) + "ms");
- }
- return null;
- }
-
- private static String toHex(byte[] raw) {
- if (raw == null) {
- return null;
- }
- final StringBuilder hex = new StringBuilder(2 * raw.length);
- for (final byte b : raw) {
- hex.append(HEXES.charAt((b & 0xF0) >> 4)).append(
- HEXES.charAt((b & 0x0F)));
- }
- return hex.toString();
+ private static String toDatasetGroupPath(String group, String dataset) {
+ return group + DataStoreFactory.DEF_SEPARATOR + dataset;
}
// Non-caching methods
diff --git a/cave/com.raytheon.uf.viz.thinclient.cave/src/com/raytheon/uf/viz/thinclient/cave/cache/CachingDataStoreFactory.java b/cave/com.raytheon.uf.viz.thinclient.cave/src/com/raytheon/uf/viz/thinclient/cave/cache/CachingDataStoreFactory.java
index 933bc8dc9b..4014bd33aa 100644
--- a/cave/com.raytheon.uf.viz.thinclient.cave/src/com/raytheon/uf/viz/thinclient/cave/cache/CachingDataStoreFactory.java
+++ b/cave/com.raytheon.uf.viz.thinclient.cave/src/com/raytheon/uf/viz/thinclient/cave/cache/CachingDataStoreFactory.java
@@ -37,9 +37,11 @@ import com.raytheon.uf.viz.thinclient.preferences.ThinClientPreferenceConstants;
*
* SOFTWARE HISTORY
*
- * Date Ticket# Engineer Description
- * ------------ ---------- ----------- --------------------------
- * Nov 8, 2011 mschenke Initial creation
+ * Date Ticket# Engineer Description
+ * ------------- -------- ----------- --------------------------
+ * Nov 08, 2011 mschenke Initial creation
+ * Sep 18, 2013 2309 bsteffen Share a single DataStoreCache for all
+ * data stores.
*
*
*
@@ -54,15 +56,16 @@ public class CachingDataStoreFactory implements IDataStoreFactory,
private boolean cachingData;
- private File cacheDir;
+ private DataStoreCache cache;
public CachingDataStoreFactory(IDataStoreFactory delegateFactory) {
this.delegateFactory = delegateFactory;
IPreferenceStore store = Activator.getDefault().getPreferenceStore();
cachingData = store
.getBoolean(ThinClientPreferenceConstants.P_CACHE_WEATHER);
- cacheDir = new File(
+ File cacheDir = new File(
store.getString(ThinClientPreferenceConstants.P_CACHE_DIR));
+ cache = new DataStoreCache(cacheDir);
store.addPropertyChangeListener(this);
}
@@ -77,7 +80,7 @@ public class CachingDataStoreFactory implements IDataStoreFactory,
public IDataStore getDataStore(File file, boolean useLocking) {
IDataStore dataStore = delegateFactory.getDataStore(file, useLocking);
if (cachingData) {
- dataStore = new CachingDataStore(dataStore, cacheDir);
+ dataStore = new CachingDataStore(dataStore, cache);
}
return dataStore;
}
@@ -96,7 +99,8 @@ public class CachingDataStoreFactory implements IDataStoreFactory,
cachingData = Boolean.valueOf(String.valueOf(event.getNewValue()));
} else if (ThinClientPreferenceConstants.P_CACHE_DIR.equals(event
.getProperty())) {
- cacheDir = new File(String.valueOf(event.getNewValue()));
+ File cacheDir = new File(String.valueOf(event.getNewValue()));
+ cache = new DataStoreCache(cacheDir);
}
}
diff --git a/cave/com.raytheon.uf.viz.thinclient.cave/src/com/raytheon/uf/viz/thinclient/cave/cache/DataStoreCache.java b/cave/com.raytheon.uf.viz.thinclient.cave/src/com/raytheon/uf/viz/thinclient/cave/cache/DataStoreCache.java
new file mode 100644
index 0000000000..292b7810a6
--- /dev/null
+++ b/cave/com.raytheon.uf.viz.thinclient.cave/src/com/raytheon/uf/viz/thinclient/cave/cache/DataStoreCache.java
@@ -0,0 +1,555 @@
+/**
+ * This software was developed and / or modified by Raytheon Company,
+ * pursuant to Contract DG133W-05-CQ-1067 with the US Government.
+ *
+ * U.S. EXPORT CONTROLLED TECHNICAL DATA
+ * This software product contains export-restricted data whose
+ * export/transfer/disclosure is restricted by U.S. law. Dissemination
+ * to non-U.S. persons whether in the United States or abroad requires
+ * an export license or other authorization.
+ *
+ * Contractor Name: Raytheon Company
+ * Contractor Address: 6825 Pine Street, Suite 340
+ * Mail Stop B8
+ * Omaha, NE 68106
+ * 402.291.0100
+ *
+ * See the AWIPS II Master Rights File ("Master Rights File.pdf") for
+ * further licensing information.
+ **/
+package com.raytheon.uf.viz.thinclient.cave.cache;
+
+import java.awt.Point;
+import java.awt.Rectangle;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.lang.ref.Reference;
+import java.lang.ref.SoftReference;
+import java.nio.Buffer;
+import java.nio.ByteBuffer;
+import java.nio.FloatBuffer;
+import java.nio.IntBuffer;
+import java.nio.ShortBuffer;
+import java.security.MessageDigest;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import com.raytheon.uf.common.datastorage.DataStoreFactory;
+import com.raytheon.uf.common.datastorage.IDataStore;
+import com.raytheon.uf.common.datastorage.Request;
+import com.raytheon.uf.common.datastorage.Request.Type;
+import com.raytheon.uf.common.datastorage.records.ByteDataRecord;
+import com.raytheon.uf.common.datastorage.records.FloatDataRecord;
+import com.raytheon.uf.common.datastorage.records.IDataRecord;
+import com.raytheon.uf.common.datastorage.records.IntegerDataRecord;
+import com.raytheon.uf.common.datastorage.records.ShortDataRecord;
+import com.raytheon.uf.common.serialization.DynamicSerializationManager;
+import com.raytheon.uf.common.serialization.DynamicSerializationManager.SerializationType;
+import com.raytheon.uf.common.status.IUFStatusHandler;
+import com.raytheon.uf.common.status.UFStatus;
+import com.raytheon.uf.common.status.UFStatus.Priority;
+import com.raytheon.uf.common.util.cache.LRUCacheFS;
+import com.raytheon.uf.viz.core.data.BufferSlicer;
+
+/**
+ * Caching layer used by {@link CachingDataStore}. Caches two things:
+ *
+ * -
+ * For a group all dataset names are cached.
+ * -
+ * For a dataset a IDataRecord is cached for each request that has been
+ * performed.
+ *
+ *
+ * These two pieces of information together can fulfill any {@link IDataStore}
+ * request if the data has been requested before. This class maintains a short
+ * lived memory cache using {@link SoftReference} and also a persistent disk
+ * cache.
+ *
+ * In addition to simply caching java objects this class also compares new
+ * {@link Request} objects to what is already in the cache and attempts to reuse
+ * cached {@link IDataRecord} objects whenever possible.
+ *
+ *
+ *
+ * SOFTWARE HISTORY
+ *
+ * Date Ticket# Engineer Description
+ * ------------- -------- ----------- --------------------------
+ * Sep 18, 2013 2309 bsteffen Initial creation
+ *
+ *
+ *
+ * @author bsteffen
+ * @version 1.0
+ * @see IDataStore
+ */
+
+public class DataStoreCache {
+
+ private static final transient IUFStatusHandler statusHandler = UFStatus
+ .getHandler(DataStoreCache.class, "ThinClient");
+
+ // quick byte string to hex conversion
+ private static final String HEXES = "0123456789ABCDEF";
+
+ private final File cacheDir;
+
+ private final Map> namesMemoryCache;
+
+ private final Map>> dataMemoryCache;
+
+ /**
+ * Construct a DataStoreCache that will store cache files in the given
+ * cacheDir.
+ *
+ * @param cacheDir
+ * directory for storing files. If the directory does not exist
+ * it will be created. If the directory cannot be created or
+ * written too then disk caching will not work and lots of
+ * warnings will be sent to UFStatus.
+ */
+ public DataStoreCache(File cacheDir) {
+ this.cacheDir = cacheDir;
+ this.namesMemoryCache = Collections
+ .synchronizedMap(new HashMap>());
+ this.dataMemoryCache = Collections
+ .synchronizedMap(new HashMap>>());
+ }
+
+ /**
+ * Gets the dataset names for a group from this cache.
+ *
+ * @param group
+ * the name of a group, must not be null
+ * @return an array of dataset names or null if they are not in this cache
+ * @see IDataStore#getDatasets(String)
+ * @see #cacheDatasetNames(String, String[])
+ */
+ public String[] getDatasetNames(String group) {
+ String[] names = getNamesFromMemory(group);
+ if (names == null) {
+ names = getNamesFromFile(group);
+ if (names != null) {
+ cacheNamesInMemory(group, names);
+ }
+ }
+ return names;
+ }
+
+ /**
+ * Stores dataset names for a group in this cache. The names can be
+ * retrieved from the cache using {@link #getDatasetNames(String)}
+ *
+ * @param group
+ * the name of a group, must not be null
+ * @param datasetNames
+ * the names of all datasets in the group, must not be null
+ * @see #getDatasetNames(String)
+ */
+ public void cacheDatasetNames(String group, String[] datasetNames) {
+ cacheNamesInMemory(group, datasetNames);
+ cacheNamesInFile(group, datasetNames);
+ }
+
+ private String[] getNamesFromMemory(String group) {
+ Reference ref = namesMemoryCache.get(group);
+ if (ref != null) {
+ return ref.get();
+ }
+ return null;
+ }
+
+ private String[] getNamesFromFile(String group) {
+ File cacheFile = getCacheFile(group);
+ if (cacheFile != null && cacheFile.exists()) {
+ return retrieveFromCache(cacheFile, String[].class);
+
+ }
+ return null;
+ }
+
+ private void cacheNamesInMemory(String group, String[] names) {
+ namesMemoryCache.put(group, new SoftReference(names));
+ }
+
+ private void cacheNamesInFile(String group, String[] names) {
+ storeToCache(getCacheFile(group), names);
+ }
+
+ /**
+ * Gets an {@link IDataRecord} for a specific dataset/group path and request
+ * from this cache.
+ *
+ * @param datasetGroupPath
+ * the group and dataset concatenated together with a
+ * {@link DataStoreFactory#DEF_SEPARATOR}, must not be null.
+ * @param request
+ * the request defining the portion of the dataset that is
+ * needed, must not be null.
+ * @return an IDataRecord or null if it is not in the cache.
+ * @see IDataStore#retrieveDatasets(String[], Request)
+ * @see #cacheDataset(String, IDataRecord, Request)
+ */
+ public IDataRecord getDataset(String datasetGroupPath, Request request) {
+ Map data = getData(datasetGroupPath);
+ if (data == null) {
+ return null;
+ } else {
+ synchronized (data) {
+ if (data.containsKey(request)) {
+ return data.get(request);
+ } else {
+ for (Entry entry : data.entrySet()) {
+ Request cachedRequest = entry.getKey();
+ if (contains(cachedRequest, request)) {
+ IDataRecord newRecord = slice(request,
+ cachedRequest, entry.getValue());
+ if (newRecord != null) {
+ return newRecord;
+ }
+ }
+ }
+ }
+ return null;
+ }
+ }
+ }
+
+ /**
+ * Stores a portion of a dataset corresponding to request in this cache. The
+ * record can be retrieved from the cache using
+ * {@link #getDataset(String, Request)}
+ *
+ * @param datasetGroupPath
+ * the group and dataset concatenated together with a
+ * {@link DataStoreFactory#DEF_SEPARATOR}, must not be null.
+ * @param record
+ * the data record containing data corresponding to the request,
+ * must not be null.
+ * @param request
+ * the request defining the portion of the dataset that is
+ * needed, must not be null.
+ * @see #getDataset(String, Request)
+ */
+ public void cacheDataset(String datasetGroupPath, IDataRecord record,
+ Request request) {
+ Map data = getData(datasetGroupPath);
+ if (data != null) {
+ synchronized (data) {
+ /*
+ * Minimize the size of the cache by ensuring any requests that
+ * are a subset of another request are not cached.
+ */
+ Iterator it = data.keySet().iterator();
+ while (it.hasNext()) {
+ Request cachedRequest = it.next();
+ if (contains(cachedRequest, request)) {
+ return;
+ } else if (contains(request, cachedRequest)) {
+ it.remove();
+ }
+ }
+ data.put(request, record);
+ cacheDataInFile(datasetGroupPath, data);
+ }
+ } else {
+ data = new HashMap();
+ data.put(request, record);
+ cacheData(datasetGroupPath, data);
+ }
+ }
+
+ private Map getData(String datasetGroupPath) {
+ Map data = getDataFromMemory(datasetGroupPath);
+ if (data == null) {
+ data = getDataFromFile(datasetGroupPath);
+ if (data != null) {
+ cacheDataInMemory(datasetGroupPath, data);
+ }
+ }
+ return data;
+ }
+
+ private Map getDataFromMemory(String datasetGroupPath) {
+ Reference