Issue #2309 Make CachingDataStore more smarter.

Change-Id: Icb8c5ddc0ed9c648f4e2609b9801a695745f7600

Former-commit-id: 83a91dac47 [formerly 8343b7256c6d2a034d57714b0d7966282b758290]
Former-commit-id: 503282475c
This commit is contained in:
Ben Steffensmeier 2013-09-19 11:14:52 -05:00
parent f4f205e5a7
commit b5c045f6b4
5 changed files with 726 additions and 171 deletions

View file

@ -19,13 +19,14 @@
**/
package com.raytheon.uf.viz.thinclient.cave.cache;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.raytheon.uf.common.datastorage.DataStoreFactory;
import com.raytheon.uf.common.datastorage.IDataStore;
import com.raytheon.uf.common.datastorage.Request;
import com.raytheon.uf.common.datastorage.StorageException;
@ -33,57 +34,37 @@ import com.raytheon.uf.common.datastorage.StorageProperties;
import com.raytheon.uf.common.datastorage.StorageProperties.Compression;
import com.raytheon.uf.common.datastorage.StorageStatus;
import com.raytheon.uf.common.datastorage.records.IDataRecord;
import com.raytheon.uf.common.serialization.DynamicSerializationManager;
import com.raytheon.uf.common.serialization.DynamicSerializationManager.SerializationType;
import com.raytheon.uf.common.serialization.SerializationUtil;
import com.raytheon.uf.common.util.cache.LRUCacheFS;
/**
* Data store used to cache requests to the filesystem to save bandwidth
* Data store which wraps with another {@link IDataStore}. This data store will
* always check with its cache before using the delegate and adds all results to
* the cache.
*
* <pre>
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Nov 8, 2011 mschenke Initial creation
* Feb 12, 2013 #1608 randerso Added explicit deletes for groups and datasets
* Date Ticket# Engineer Description
* ------------- -------- ----------- --------------------------
* Nov 08, 2011 mschenke Initial creation
* Feb 12, 2013 1608 randerso Added explicit deletes for groups and
* datasets
* Sep 18, 2013 2309 bsteffen Move disk acces to DataStoreCache
*
* </pre>
*
* @author mschenke
* @version 1.0
*/
/**
* TODO Add Description
*
* <pre>
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Feb 12, 2013 randerso Initial creation
*
* </pre>
*
* @author randerso
* @version 1.0
*/
public class CachingDataStore implements IDataStore {
// quick byte string to hex conversion
private static final String HEXES = "0123456789ABCDEF";
private final IDataStore delegate;
private IDataStore delegate;
private DataStoreCache cache;
private File cacheDir;
CachingDataStore(IDataStore delegate, File cacheDir) {
CachingDataStore(IDataStore delegate, DataStoreCache cache) {
this.delegate = delegate;
this.cacheDir = cacheDir;
this.cache = cache;
}
/*
@ -95,16 +76,36 @@ public class CachingDataStore implements IDataStore {
@Override
public IDataRecord[] retrieve(String group) throws StorageException,
FileNotFoundException {
IDataRecord[] records = null;
File cacheFile = getCacheFile(group);
if (cacheFile != null && cacheFile.exists()) {
records = retrieveFromCache(cacheFile, IDataRecord[].class);
String[] datasets = cache.getDatasetNames(group);
if (datasets != null) {
List<String> datasetGroupPaths = new ArrayList<String>(
datasets.length);
List<IDataRecord> records = new ArrayList<IDataRecord>();
for (String dataset : datasets) {
String datasetGroupPath = toDatasetGroupPath(group, dataset);
IDataRecord record = cache.getDataset(datasetGroupPath,
Request.ALL);
if (record == null) {
datasetGroupPaths.add(datasetGroupPath);
} else {
records.add(record);
}
}
if (!datasetGroupPaths.isEmpty()) {
IDataRecord[] newRecords = retrieveDatasets(
datasetGroupPaths.toArray(new String[0]), Request.ALL);
for (int i = 0; i < newRecords.length; i += 1) {
cache.cacheDataset(datasetGroupPaths.get(i), newRecords[i],
Request.ALL);
}
records.addAll(Arrays.asList(newRecords));
}
return records.toArray(new IDataRecord[0]);
} else {
IDataRecord[] records = delegate.retrieve(group);
cacheDatasets(group, Arrays.asList(records), Request.ALL);
return records;
}
if (records == null) {
records = delegate.retrieve(group);
storeToCache(cacheFile, records);
}
return records;
}
/*
@ -117,16 +118,12 @@ public class CachingDataStore implements IDataStore {
@Override
public IDataRecord[] retrieve(String group, boolean includeInterpolated)
throws StorageException, FileNotFoundException {
IDataRecord[] records = null;
File cacheFile = getCacheFile(new Object[] { group, includeInterpolated });
if (cacheFile != null && cacheFile.exists()) {
records = retrieveFromCache(cacheFile, IDataRecord[].class);
if (includeInterpolated == false) {
return retrieve(group);
} else {
/* This is deprecated and unused so caching is not implemented. */
return delegate.retrieve(group, includeInterpolated);
}
if (records == null) {
records = delegate.retrieve(group, includeInterpolated);
storeToCache(cacheFile, records);
}
return records;
}
/*
@ -139,14 +136,11 @@ public class CachingDataStore implements IDataStore {
@Override
public IDataRecord retrieve(String group, String dataset, Request request)
throws StorageException, FileNotFoundException {
IDataRecord record = null;
File cacheFile = getCacheFile(new Object[] { group, dataset, request });
if (cacheFile != null && cacheFile.exists()) {
record = retrieveFromCache(cacheFile, IDataRecord.class);
}
String datasetGroupPath = toDatasetGroupPath(group, dataset);
IDataRecord record = cache.getDataset(datasetGroupPath, request);
if (record == null) {
record = delegate.retrieve(group, dataset, request);
storeToCache(cacheFile, record);
cache.cacheDataset(datasetGroupPath, record, request);
}
return record;
}
@ -159,18 +153,33 @@ public class CachingDataStore implements IDataStore {
* .String[], com.raytheon.uf.common.datastorage.Request)
*/
@Override
public IDataRecord[] retrieveDatasets(String[] datasetGroupPath,
public IDataRecord[] retrieveDatasets(String[] datasetGroupPaths,
Request request) throws StorageException, FileNotFoundException {
IDataRecord[] records = null;
File cacheFile = getCacheFile(new Object[] { datasetGroupPath, request });
if (cacheFile != null && cacheFile.exists()) {
records = retrieveFromCache(cacheFile, IDataRecord[].class);
Map<String, IDataRecord> records = new HashMap<String, IDataRecord>();
List<String> toRequest = new ArrayList<String>(datasetGroupPaths.length);
for (String datasetGroupPath : datasetGroupPaths) {
IDataRecord record = cache.getDataset(datasetGroupPath, request);
if (record == null) {
toRequest.add(datasetGroupPath);
} else {
records.put(datasetGroupPath, record);
}
}
if (records == null) {
records = delegate.retrieveDatasets(datasetGroupPath, request);
storeToCache(cacheFile, records);
if (!toRequest.isEmpty()) {
IDataRecord[] newRecords = delegate.retrieveDatasets(
toRequest.toArray(new String[0]), request);
for (int i = 0; i < newRecords.length; i += 1) {
String datasetGroupPath = toRequest.get(i);
IDataRecord record = newRecords[i];
cache.cacheDataset(datasetGroupPath, record, request);
records.put(datasetGroupPath, record);
}
}
return records;
IDataRecord[] result = new IDataRecord[datasetGroupPaths.length];
for (int i = 0; i < datasetGroupPaths.length; i += 1) {
result[i] = records.get(datasetGroupPaths[i]);
}
return result;
}
/*
@ -183,16 +192,50 @@ public class CachingDataStore implements IDataStore {
@Override
public IDataRecord[] retrieveGroups(String[] groups, Request request)
throws StorageException, FileNotFoundException {
IDataRecord[] records = null;
File cacheFile = getCacheFile(new Object[] { groups, request });
if (cacheFile != null && cacheFile.exists()) {
records = retrieveFromCache(cacheFile, IDataRecord[].class);
List<String> toRequest = new ArrayList<String>();
Map<String, List<IDataRecord>> records = new HashMap<String, List<IDataRecord>>();
for (String group : groups) {
String[] datasets = cache.getDatasetNames(group);
if (datasets != null) {
IDataRecord[] cachedRecords = new IDataRecord[datasets.length];
for (int i = 0; i < datasets.length; i += 1) {
cachedRecords[i] = cache.getDataset(
toDatasetGroupPath(group, datasets[i]),
request);
if (cachedRecords[i] == null) {
toRequest.add(group);
cachedRecords = null;
break;
}
}
if (cachedRecords != null) {
records.put(group, Arrays.asList(cachedRecords));
}
} else {
toRequest.add(group);
}
}
if (records == null) {
records = delegate.retrieveGroups(groups, request);
storeToCache(cacheFile, records);
if (!toRequest.isEmpty()) {
IDataRecord[] newRecords = delegate.retrieveGroups(
toRequest.toArray(new String[0]), request);
for (IDataRecord record : newRecords) {
String group = getGroup(record);
List<IDataRecord> groupRecs = records.get(group);
if (groupRecs == null) {
groupRecs = new ArrayList<IDataRecord>();
records.put(group, groupRecs);
}
groupRecs.add(record);
}
for (String group : toRequest) {
cacheDatasets(group, records.get(group), request);
}
}
return records;
List<IDataRecord> result = new ArrayList<IDataRecord>();
for (String group : groups) {
result.addAll(records.get(group));
}
return result.toArray(new IDataRecord[0]);
}
/*
@ -205,96 +248,43 @@ public class CachingDataStore implements IDataStore {
@Override
public String[] getDatasets(String group) throws StorageException,
FileNotFoundException {
String[] datasets = null;
File cacheFile = getCacheFile(group);
if (cacheFile != null && cacheFile.exists()) {
datasets = retrieveFromCache(cacheFile, String[].class);
}
String[] datasets = cache.getDatasetNames(group);
if (datasets == null) {
datasets = delegate.getDatasets(group);
storeToCache(cacheFile, datasets);
cache.cacheDatasetNames(group, datasets);
}
return datasets;
}
@SuppressWarnings("unchecked")
private <T> T retrieveFromCache(File file, Class<T> clazz) {
long t0 = System.currentTimeMillis();
T rval = null;
try {
FileInputStream fin = new FileInputStream(file);
Object fromFile = DynamicSerializationManager.getManager(
SerializationType.Thrift).deserialize(fin);
fin.close();
if (clazz.isInstance(fromFile)) {
rval = (T) fromFile;
}
} catch (Throwable e) {
System.err.println("Error retreiving object from cache file: "
+ e.getLocalizedMessage());
}
// Caching utility methods
if (rval != null) {
LRUCacheFS.poll(file);
/**
* Cache all datasets for a group. Both the indivdual datasets and the names
* of the datasets are cached.
*/
private void cacheDatasets(String group, List<IDataRecord> records,
Request request) {
String[] datasets = new String[records.size()];
for (int i = 0; i < datasets.length; i += 1) {
datasets[i] = records.get(i).getName();
cache.cacheDataset(toDatasetGroupPath(group, datasets[i]),
records.get(i), request);
}
System.out.println("Time to retreive from cache = "
+ (System.currentTimeMillis() - t0) + "ms");
return rval;
cache.cacheDatasetNames(group, datasets);
}
private void storeToCache(File file, Object result) {
long t0 = System.currentTimeMillis();
if (result != null) {
try {
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
FileOutputStream fout = new FileOutputStream(file);
DynamicSerializationManager
.getManager(SerializationType.Thrift).serialize(result,
fout);
fout.close();
LRUCacheFS.poll(file);
file.setReadable(true, false);
file.setWritable(true, false);
} catch (Exception e) {
System.err.println("Error storing object to file: "
+ e.getLocalizedMessage());
}
}
System.out.println("Time to store to cache = "
+ (System.currentTimeMillis() - t0) + "ms");
private static String getGroup(IDataRecord record) {
String group = record.getGroup();
/*
* TODO this works around a bug in older pypies. Delete it after 14.2.1
* is fielded everywhere.
*/
group = group.replaceAll("::", DataStoreFactory.DEF_SEPARATOR);
return group;
}
private File getCacheFile(Object obj) {
long t0 = System.currentTimeMillis();
try {
byte[] thriftDigest = SerializationUtil.transformToThrift(obj);
MessageDigest md = MessageDigest.getInstance("MD5");
md.update(thriftDigest);
String md5sum = toHex(md.digest());
return new File(cacheDir, md5sum);
} catch (Exception e) {
System.err.println("Error getting cache file: "
+ e.getLocalizedMessage());
} finally {
System.out.println("Time to getCacheFile = "
+ (System.currentTimeMillis() - t0) + "ms");
}
return null;
}
private static String toHex(byte[] raw) {
if (raw == null) {
return null;
}
final StringBuilder hex = new StringBuilder(2 * raw.length);
for (final byte b : raw) {
hex.append(HEXES.charAt((b & 0xF0) >> 4)).append(
HEXES.charAt((b & 0x0F)));
}
return hex.toString();
private static String toDatasetGroupPath(String group, String dataset) {
return group + DataStoreFactory.DEF_SEPARATOR + dataset;
}
// Non-caching methods

View file

@ -37,9 +37,11 @@ import com.raytheon.uf.viz.thinclient.preferences.ThinClientPreferenceConstants;
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Nov 8, 2011 mschenke Initial creation
* Date Ticket# Engineer Description
* ------------- -------- ----------- --------------------------
* Nov 08, 2011 mschenke Initial creation
* Sep 18, 2013 2309 bsteffen Share a single DataStoreCache for all
* data stores.
*
* </pre>
*
@ -54,15 +56,16 @@ public class CachingDataStoreFactory implements IDataStoreFactory,
private boolean cachingData;
private File cacheDir;
private DataStoreCache cache;
public CachingDataStoreFactory(IDataStoreFactory delegateFactory) {
this.delegateFactory = delegateFactory;
IPreferenceStore store = Activator.getDefault().getPreferenceStore();
cachingData = store
.getBoolean(ThinClientPreferenceConstants.P_CACHE_WEATHER);
cacheDir = new File(
File cacheDir = new File(
store.getString(ThinClientPreferenceConstants.P_CACHE_DIR));
cache = new DataStoreCache(cacheDir);
store.addPropertyChangeListener(this);
}
@ -77,7 +80,7 @@ public class CachingDataStoreFactory implements IDataStoreFactory,
public IDataStore getDataStore(File file, boolean useLocking) {
IDataStore dataStore = delegateFactory.getDataStore(file, useLocking);
if (cachingData) {
dataStore = new CachingDataStore(dataStore, cacheDir);
dataStore = new CachingDataStore(dataStore, cache);
}
return dataStore;
}
@ -96,7 +99,8 @@ public class CachingDataStoreFactory implements IDataStoreFactory,
cachingData = Boolean.valueOf(String.valueOf(event.getNewValue()));
} else if (ThinClientPreferenceConstants.P_CACHE_DIR.equals(event
.getProperty())) {
cacheDir = new File(String.valueOf(event.getNewValue()));
File cacheDir = new File(String.valueOf(event.getNewValue()));
cache = new DataStoreCache(cacheDir);
}
}

View file

@ -0,0 +1,555 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.viz.thinclient.cave.cache;
import java.awt.Point;
import java.awt.Rectangle;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.lang.ref.Reference;
import java.lang.ref.SoftReference;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.nio.ShortBuffer;
import java.security.MessageDigest;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import com.raytheon.uf.common.datastorage.DataStoreFactory;
import com.raytheon.uf.common.datastorage.IDataStore;
import com.raytheon.uf.common.datastorage.Request;
import com.raytheon.uf.common.datastorage.Request.Type;
import com.raytheon.uf.common.datastorage.records.ByteDataRecord;
import com.raytheon.uf.common.datastorage.records.FloatDataRecord;
import com.raytheon.uf.common.datastorage.records.IDataRecord;
import com.raytheon.uf.common.datastorage.records.IntegerDataRecord;
import com.raytheon.uf.common.datastorage.records.ShortDataRecord;
import com.raytheon.uf.common.serialization.DynamicSerializationManager;
import com.raytheon.uf.common.serialization.DynamicSerializationManager.SerializationType;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.UFStatus;
import com.raytheon.uf.common.status.UFStatus.Priority;
import com.raytheon.uf.common.util.cache.LRUCacheFS;
import com.raytheon.uf.viz.core.data.BufferSlicer;
/**
* Caching layer used by {@link CachingDataStore}. Caches two things:
* <ul>
* <li>
* For a group all dataset names are cached.</li>
* <li>
* For a dataset a IDataRecord is cached for each request that has been
* performed.</li>
* </ul>
*
* These two pieces of information together can fulfill any {@link IDataStore}
* request if the data has been requested before. This class maintains a short
* lived memory cache using {@link SoftReference} and also a persistent disk
* cache.
*
* In addition to simply caching java objects this class also compares new
* {@link Request} objects to what is already in the cache and attempts to reuse
* cached {@link IDataRecord} objects whenever possible.
*
* <pre>
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------- -------- ----------- --------------------------
* Sep 18, 2013 2309 bsteffen Initial creation
*
* </pre>
*
* @author bsteffen
* @version 1.0
* @see IDataStore
*/
public class DataStoreCache {
private static final transient IUFStatusHandler statusHandler = UFStatus
.getHandler(DataStoreCache.class, "ThinClient");
// quick byte string to hex conversion
private static final String HEXES = "0123456789ABCDEF";
private final File cacheDir;
private final Map<String, Reference<String[]>> namesMemoryCache;
private final Map<String, Reference<Map<Request, IDataRecord>>> dataMemoryCache;
/**
* Construct a DataStoreCache that will store cache files in the given
* cacheDir.
*
* @param cacheDir
* directory for storing files. If the directory does not exist
* it will be created. If the directory cannot be created or
* written too then disk caching will not work and lots of
* warnings will be sent to UFStatus.
*/
public DataStoreCache(File cacheDir) {
this.cacheDir = cacheDir;
this.namesMemoryCache = Collections
.synchronizedMap(new HashMap<String, Reference<String[]>>());
this.dataMemoryCache = Collections
.synchronizedMap(new HashMap<String, Reference<Map<Request, IDataRecord>>>());
}
/**
* Gets the dataset names for a group from this cache.
*
* @param group
* the name of a group, must not be null
* @return an array of dataset names or null if they are not in this cache
* @see IDataStore#getDatasets(String)
* @see #cacheDatasetNames(String, String[])
*/
public String[] getDatasetNames(String group) {
String[] names = getNamesFromMemory(group);
if (names == null) {
names = getNamesFromFile(group);
if (names != null) {
cacheNamesInMemory(group, names);
}
}
return names;
}
/**
* Stores dataset names for a group in this cache. The names can be
* retrieved from the cache using {@link #getDatasetNames(String)}
*
* @param group
* the name of a group, must not be null
* @param datasetNames
* the names of all datasets in the group, must not be null
* @see #getDatasetNames(String)
*/
public void cacheDatasetNames(String group, String[] datasetNames) {
cacheNamesInMemory(group, datasetNames);
cacheNamesInFile(group, datasetNames);
}
private String[] getNamesFromMemory(String group) {
Reference<String[]> ref = namesMemoryCache.get(group);
if (ref != null) {
return ref.get();
}
return null;
}
private String[] getNamesFromFile(String group) {
File cacheFile = getCacheFile(group);
if (cacheFile != null && cacheFile.exists()) {
return retrieveFromCache(cacheFile, String[].class);
}
return null;
}
private void cacheNamesInMemory(String group, String[] names) {
namesMemoryCache.put(group, new SoftReference<String[]>(names));
}
private void cacheNamesInFile(String group, String[] names) {
storeToCache(getCacheFile(group), names);
}
/**
* Gets an {@link IDataRecord} for a specific dataset/group path and request
* from this cache.
*
* @param datasetGroupPath
* the group and dataset concatenated together with a
* {@link DataStoreFactory#DEF_SEPARATOR}, must not be null.
* @param request
* the request defining the portion of the dataset that is
* needed, must not be null.
* @return an IDataRecord or null if it is not in the cache.
* @see IDataStore#retrieveDatasets(String[], Request)
* @see #cacheDataset(String, IDataRecord, Request)
*/
public IDataRecord getDataset(String datasetGroupPath, Request request) {
Map<Request, IDataRecord> data = getData(datasetGroupPath);
if (data == null) {
return null;
} else {
synchronized (data) {
if (data.containsKey(request)) {
return data.get(request);
} else {
for (Entry<Request, IDataRecord> entry : data.entrySet()) {
Request cachedRequest = entry.getKey();
if (contains(cachedRequest, request)) {
IDataRecord newRecord = slice(request,
cachedRequest, entry.getValue());
if (newRecord != null) {
return newRecord;
}
}
}
}
return null;
}
}
}
/**
* Stores a portion of a dataset corresponding to request in this cache. The
* record can be retrieved from the cache using
* {@link #getDataset(String, Request)}
*
* @param datasetGroupPath
* the group and dataset concatenated together with a
* {@link DataStoreFactory#DEF_SEPARATOR}, must not be null.
* @param record
* the data record containing data corresponding to the request,
* must not be null.
* @param request
* the request defining the portion of the dataset that is
* needed, must not be null.
* @see #getDataset(String, Request)
*/
public void cacheDataset(String datasetGroupPath, IDataRecord record,
Request request) {
Map<Request, IDataRecord> data = getData(datasetGroupPath);
if (data != null) {
synchronized (data) {
/*
* Minimize the size of the cache by ensuring any requests that
* are a subset of another request are not cached.
*/
Iterator<Request> it = data.keySet().iterator();
while (it.hasNext()) {
Request cachedRequest = it.next();
if (contains(cachedRequest, request)) {
return;
} else if (contains(request, cachedRequest)) {
it.remove();
}
}
data.put(request, record);
cacheDataInFile(datasetGroupPath, data);
}
} else {
data = new HashMap<Request, IDataRecord>();
data.put(request, record);
cacheData(datasetGroupPath, data);
}
}
private Map<Request, IDataRecord> getData(String datasetGroupPath) {
Map<Request, IDataRecord> data = getDataFromMemory(datasetGroupPath);
if (data == null) {
data = getDataFromFile(datasetGroupPath);
if (data != null) {
cacheDataInMemory(datasetGroupPath, data);
}
}
return data;
}
private Map<Request, IDataRecord> getDataFromMemory(String datasetGroupPath) {
Reference<Map<Request, IDataRecord>> ref = dataMemoryCache
.get(datasetGroupPath);
if (ref != null) {
return ref.get();
}
return null;
}
@SuppressWarnings("unchecked")
private Map<Request, IDataRecord> getDataFromFile(String datasetGroupPath) {
File cacheFile = getCacheFile(datasetGroupPath);
if (cacheFile != null && cacheFile.exists()) {
return retrieveFromCache(cacheFile, Map.class);
}
return null;
}
private void cacheData(String datasetGroupPath,
Map<Request, IDataRecord> data) {
cacheDataInMemory(datasetGroupPath, data);
cacheDataInFile(datasetGroupPath, data);
}
private void cacheDataInMemory(String datasetGroupPath,
Map<Request, IDataRecord> data) {
dataMemoryCache.put(datasetGroupPath,
new SoftReference<Map<Request, IDataRecord>>(data));
}
private void cacheDataInFile(String datasetGroupPath,
Map<Request, IDataRecord> data) {
storeToCache(getCacheFile(datasetGroupPath), data);
}
/**
* Determine if the data returned by outer contains enough of the data to
* fulfill inner.
*
* @return true if outer has enough data for inner, false if not.
*/
private static boolean contains(Request outer, Request inner) {
Type outerType = outer.getType();
Type innerType = outer.getType();
if (outerType == Type.ALL) {
return true;
} else if (outerType == Type.SLAB) {
if (innerType == Type.SLAB) {
return getRectangle(outer).contains(getRectangle(inner));
}
if (innerType == Type.POINT) {
Rectangle rect = getRectangle(outer);
for (Point p : inner.getPoints()) {
if (!rect.contains(p)) {
return false;
}
}
return true;
}
} else if (outerType == Type.POINT) {
if (innerType == Type.POINT) {
Set<Point> outerSet = new HashSet<Point>(Arrays.asList(outer
.getPoints()));
for (Point p : inner.getPoints()) {
if (!outerSet.contains(p)) {
return false;
}
}
return true;
}
}
return false;
}
/**
* Create a new {@link IDataRecord} with a subset of the data in
* cachedRecord. The new record will be the data needed to fulfill
* newRequest. This method chould only be used if cacheRequest contains
* newRequest.
*/
private static IDataRecord slice(Request newRequest, Request cacheRequest,
IDataRecord cachedRecord) {
Type cacheType = cacheRequest.getType();
Type newType = newRequest.getType();
if (cacheType == Type.ALL) {
if (newType == Type.SLAB) {
return sliceAllToSlab(newRequest, cacheRequest, cachedRecord);
}
} else if (cacheType == Type.SLAB) {
if (newType == Type.SLAB) {
return sliceSlabToSlab(newRequest, cacheRequest, cachedRecord);
}
}
return null;
}
/**
* Specialized slice handling for getting a slab record from an ALL request.
*/
private static IDataRecord sliceAllToSlab(Request newRequest,
Request cacheRequest, IDataRecord cachedRecord) {
Buffer buffer = toBuffer(cachedRecord);
if (buffer == null) {
return null;
}
Rectangle newRect = getRectangle(newRequest);
long[] sizes = cachedRecord.getSizes();
Rectangle cacheRect = new Rectangle((int) sizes[0], (int) sizes[1]);
buffer = BufferSlicer.slice(buffer, newRect, cacheRect);
IDataRecord newRecord = toDataRecord(buffer, newRect, cachedRecord);
return newRecord;
}
/**
* Specialized slice handling for getting a slab record from a larger slab
* record.
*/
private static IDataRecord sliceSlabToSlab(Request newRequest,
Request cacheRequest, IDataRecord cachedRecord) {
Buffer buffer = toBuffer(cachedRecord);
if (buffer == null) {
return null;
}
Rectangle newRect = getRectangle(newRequest);
Rectangle cacheRect = getRectangle(cacheRequest);
buffer = BufferSlicer.slice(buffer, newRect, cacheRect);
IDataRecord newRecord = toDataRecord(buffer, newRect, cachedRecord);
return newRecord;
}
/**
* Extract the raw numeric data from a {@link IDataRecord} in {@link Buffer}
* form.
*/
private static Buffer toBuffer(IDataRecord record) {
if (record instanceof FloatDataRecord) {
return FloatBuffer.wrap(((FloatDataRecord) record).getFloatData());
} else if (record instanceof IntegerDataRecord) {
return IntBuffer.wrap(((IntegerDataRecord) record).getIntData());
} else if (record instanceof ShortDataRecord) {
return ShortBuffer.wrap(((ShortDataRecord) record).getShortData());
} else if (record instanceof ByteDataRecord) {
return ByteBuffer.wrap(((ByteDataRecord) record).getByteData());
}
return null;
}
/**
* Convert a {@link Buffer} into an {@link IDataRecord}, the record will
* have dataset attributes bfrom copy but with new sizes.
*/
private static IDataRecord toDataRecord(Buffer buffer, Rectangle sizes,
IDataRecord copy) {
String name = copy.getName();
String group = copy.getGroup();
IDataRecord record = null;
if (buffer instanceof FloatBuffer) {
record = new FloatDataRecord(name, group,
((FloatBuffer) buffer).array());
} else if (buffer instanceof IntBuffer) {
record = new IntegerDataRecord(name, group,
((IntBuffer) buffer).array());
} else if (buffer instanceof ShortBuffer) {
record = new ShortDataRecord(name, group,
((ShortBuffer) buffer).array());
} else if (buffer instanceof ByteBuffer) {
record = new ByteDataRecord(name, group,
((ByteBuffer) buffer).array());
} else {
return null;
}
record.setSizes(new long[] { sizes.width, sizes.height });
record.setDataAttributes(copy.getDataAttributes());
record.setDimension(copy.getDimension());
record.setFillValue(copy.getFillValue());
record.setProperties(copy.getProperties());
return record;
}
/**
* Convert a slab {@link Request} into a {@link Rectangle}.
*/
private static Rectangle getRectangle(Request request) {
int[] max = request.getMaxIndexForSlab();
int[] min = request.getMinIndexForSlab();
return new Rectangle(min[0], min[1], max[0] - min[0], max[1] - min[1]);
}
/**
* Get a unique File to store a cached object
*/
private File getCacheFile(String obj) {
try {
MessageDigest md = MessageDigest.getInstance("MD5");
md.update(obj.getBytes());
String md5sum = toHex(md.digest());
return new File(cacheDir, md5sum);
} catch (Exception e) {
statusHandler.handle(Priority.WARN, "Error getting cache file", e);
}
return null;
}
/**
* Load a java object from a cache file.
*
* @param file
* the file containing the object
* @param clazz
* the type of the object we are expecting
* @return an object of type clazz loaded from file.
*/
private static <T> T retrieveFromCache(File file, Class<T> clazz) {
T rval = null;
try {
FileInputStream fin = new FileInputStream(file);
Object fromFile = DynamicSerializationManager.getManager(
SerializationType.Thrift).deserialize(fin);
fin.close();
if (clazz.isInstance(fromFile)) {
rval = clazz.cast(fromFile);
}
} catch (Throwable e) {
statusHandler.handle(Priority.WARN,
"Error retreiving object from cache file", e);
}
if (rval != null) {
LRUCacheFS.poll(file);
}
return rval;
}
/**
* Store a java object into a file
*/
private static void storeToCache(File file, Object result) {
if (result != null) {
try {
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
FileOutputStream fout = new FileOutputStream(file);
DynamicSerializationManager
.getManager(SerializationType.Thrift).serialize(result,
fout);
fout.close();
LRUCacheFS.poll(file);
file.setReadable(true, false);
file.setWritable(true, false);
} catch (Exception e) {
statusHandler.handle(Priority.WARN,
"Error storing object to file", e);
}
}
}
/**
* Convert some bytes to hex, used to generate unique file names.
*/
private static String toHex(byte[] raw) {
if (raw == null) {
return null;
}
final StringBuilder hex = new StringBuilder(2 * raw.length);
for (final byte b : raw) {
hex.append(HEXES.charAt((b & 0xF0) >> 4)).append(
HEXES.charAt((b & 0x0F)));
}
return hex.toString();
}
}

View file

@ -32,13 +32,16 @@ import com.raytheon.uf.common.serialization.ISerializableObject;
*
* <pre>
* SOFTWARE HISTORY
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Feb 9, 2007 chammack Initial Creation.
* Apr 1, 2008 chammack Added delete API
* Aug 3, 2009 chammack Modified to support Request
* Sep 27, 2010 5091 njensen Added deleteFiles(String)
* Feb 12, 2013 #1608 randerso Added explicit methods for deleting groups and datasets
* Date Ticket# Engineer Description
* ------------- -------- ----------- --------------------------
* Feb 09, 2007 chammack Initial Creation.
* Apr 01, 2008 chammack Added delete API
* Aug 03, 2009 chammack Modified to support Request
* Sep 27, 2010 5091 njensen Added deleteFiles(String)
* Feb 12, 2013 1608 randerso Added explicit methods for deleting
* groups and datasets
* Sep 19, 2013 2309 bsteffen Deprecate retrieve(String, boolean)
*
*
* </pre>
*
@ -155,6 +158,7 @@ public interface IDataStore extends ISerializableObject {
* @throws StorageException
* @throws FileNotFoundException
*/
@Deprecated
public abstract IDataRecord[] retrieve(String group,
boolean includeInterpolated) throws StorageException,
FileNotFoundException;

View file

@ -28,6 +28,8 @@
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 07/21/10 njensen Initial Creation.
# 09/19/13 2309 bsteffen Fix group name in returned
# records.
#
#
#
@ -57,7 +59,7 @@ def createStorageRecord(rawData, ds):
parentName = '/'
slashIndex = name.rfind('/')
if slashIndex > -1:
parentName = name[0:slashIndex]
parentName = name[0:slashIndex].replace('::','/')
name = name[slashIndex+1:]
inst.setName(name)
inst.setGroup(parentName)