Issue #1949: Normalize the GFE Database, incorporate peer review comments, add upgrade script.

Updated GFE DataURI to individual fields to fix D2D autoUpdate.

Change-Id: I9cf7a2192ef2e4c0956aa0836d69767c1ede0624

Former-commit-id: 9df464410f7d87df9c6ce52958431d0718ed7d5a
This commit is contained in:
Richard Peter 2013-04-24 16:48:27 -05:00
parent da98c61c36
commit 8124a4c557
50 changed files with 2872 additions and 2027 deletions

View file

@ -67,6 +67,7 @@ import com.vividsolutions.jts.geom.Coordinate;
* 03/13/2008 879 rbell Legacy conversion.
* 06/10/2009 2159 rjpeter Updated isValid to call gridSlice.isValid
* 02/19/2013 1637 randerso Added throws declarations to translateDataFrom
* 04/23/2013 1949 rjpeter Removed validation on copy, source is verified on store.
* </pre>
*
* @author chammack
@ -305,14 +306,6 @@ public abstract class AbstractGridData implements IGridData {
public boolean copyGridValues(final IGridData sourceGrid) {
populate();
// ensure valid data in the source before attempting a copy
if (!sourceGrid.isValid()) {
statusHandler.handle(Priority.PROBLEM,
"Attempt to copyGridValues from invalid grid on "
+ getParm().getParmID());
return false;
}
// validate data type
if (sourceGrid.getParm().getGridInfo().getGridType() != getParm()
.getGridInfo().getGridType()) {
@ -324,7 +317,6 @@ public abstract class AbstractGridData implements IGridData {
}
// validate units same or can be converted
if (!getParm()
.getGridInfo()
.getUnitObject()
@ -411,16 +403,16 @@ public abstract class AbstractGridData implements IGridData {
ArrayList<GridDataHistory> thisGDHA = new ArrayList<GridDataHistory>();
// add one by one to eliminate any duplicates
for (int i = 0; i < history.length; i++) {
for (GridDataHistory element : history) {
boolean found = false;
for (GridDataHistory thisGDH : thisGDHA) {
if (history[i].equals(thisGDH)) {
if (element.equals(thisGDH)) {
found = true;
break;
}
}
if (!found) {
thisGDHA.add(history[i]);
thisGDHA.add(element);
}
}
this.gridSlice.setHistory(thisGDHA
@ -565,8 +557,8 @@ public abstract class AbstractGridData implements IGridData {
// If we've left the area, we're all done
int x = (int) (pos.x + 0.5); // round off
int y = (int) (pos.y + 0.5); // round off
if (x >= area.getXdim() || y >= area.getYdim()
|| area.get(x, y) != 1) {
if ((x >= area.getXdim()) || (y >= area.getYdim())
|| (area.get(x, y) != 1)) {
// We're either off the grid or out of the area
edge.x = x;
edge.y = y;
@ -652,7 +644,7 @@ public abstract class AbstractGridData implements IGridData {
for (int i = ll.x; i <= ur.x; i++) {
for (int j = ll.y; j <= ur.y; j++) {
if (points.get(i, j) > 0) {
if (i == 0 || i == xMax || j == 0 || j == yMax) {
if ((i == 0) || (i == xMax) || (j == 0) || (j == yMax)) {
edge.set(i, j);
} else {
for (int k = i - 1; k <= i + 1; k++) {
@ -727,7 +719,8 @@ public abstract class AbstractGridData implements IGridData {
Point p = new Point((int) thisCoord.x, (int) thisCoord.y);
// if point is in the grid
if (p.x >= 0 && p.x < gridSize.x && p.y >= 0 && p.y < gridSize.y) {
if ((p.x >= 0) && (p.x < gridSize.x) && (p.y >= 0)
&& (p.y < gridSize.y)) {
gridCoords.add(p);
}
}
@ -888,8 +881,8 @@ public abstract class AbstractGridData implements IGridData {
public List<String> getHistorySites() {
GridDataHistory[] h = this.getHistory();
List<String> sites = new ArrayList<String>();
for (int i = 0; i < h.length; i++) {
String site = h[i].getOriginParm().getDbId().getSiteId();
for (GridDataHistory element : h) {
String site = element.getOriginParm().getDbId().getSiteId();
if (!sites.contains(site)) {
sites.add(site);
}

View file

@ -82,7 +82,7 @@ import com.raytheon.viz.gfe.core.griddata.IGridData;
* 01/21/12 #1504 randerso Cleaned up old debug logging to improve performance
* 02/12/13 #1597 randerso Made save threshold a configurable value. Added detailed
* logging for save performance
*
* 04/23/13 #1949 rjpeter Added logging of number of records.
* </pre>
*
* @author chammack
@ -120,7 +120,8 @@ public class DbParm extends Parm {
}
}
if (this.dataManager != null && this.dataManager.getClient() != null) {
if ((this.dataManager != null)
&& (this.dataManager.getClient() != null)) {
this.lockTable = this.dataManager.getClient().getLockTable(
this.getParmID());
}
@ -222,7 +223,7 @@ public class DbParm extends Parm {
.getGridHistory(getParmID(), gridTimes);
histories = (Map<TimeRange, List<GridDataHistory>>) sr
.getPayload();
if (!sr.isOkay() || histories.size() != gridTimes.size()) {
if (!sr.isOkay() || (histories.size() != gridTimes.size())) {
statusHandler.handle(Priority.PROBLEM,
"Unable to retrieve gridded data [history] for "
+ getParmID() + sr);
@ -456,13 +457,13 @@ public class DbParm extends Parm {
IGridData[] grids = this.getGridInventory(tr);
// if only a single unmodified grid exactly matches the time range
if (grids.length == 1 && !this.isLocked(tr)
if ((grids.length == 1) && !this.isLocked(tr)
&& grids[0].getGridTime().equals(tr)) {
List<GridDataHistory> newHist = histories.get(tr);
GridDataHistory[] currentHist = grids[0].getHistory();
// if current history exists and has a matching update time
if (currentHist != null
if ((currentHist != null)
&& currentHist[0].getUpdateTime().equals(
newHist.get(0).getUpdateTime())) {
// update last sent time
@ -495,7 +496,7 @@ public class DbParm extends Parm {
}
timer.stop();
perfLog.logDuration("Server lock change for " + this.getParmID() + " "
+ lreq.size() + " time rangess", timer.getElapsedTime());
+ lreq.size() + " time ranges", timer.getElapsedTime());
timer.reset();
timer.start();
@ -565,7 +566,9 @@ public class DbParm extends Parm {
int gridCount = 0;
int totalGrids = 0;
long totalSize = 0;
int totalRecords = 0;
long size = 0;
int recordCount = 0;
for (int i = 0; i < trs.size(); i++) {
// ensure we have a lock for the time period
TimeRange lockTime = new TimeRange();
@ -629,6 +632,7 @@ public class DbParm extends Parm {
}
totalGrids += gridCount;
totalRecords += records.size();
totalSize += size;
pendingUnlocks.clear();
@ -641,9 +645,10 @@ public class DbParm extends Parm {
}
// if any grids or any time not saved
if (size > 0 || saveTime.getDuration() > 0) {
if ((size > 0) || (saveTime.getDuration() > 0)) {
sgr.add(new SaveGridRequest(getParmID(), saveTime, records,
dataManager.clientISCSendStatus()));
recordCount = records.size();
}
// if we haven't had a failure yet add to pending locks
@ -666,6 +671,7 @@ public class DbParm extends Parm {
totalSize += size;
totalGrids += gridCount;
totalRecords += recordCount;
pendingUnlocks.clear();
}
@ -685,8 +691,8 @@ public class DbParm extends Parm {
timer.stop();
perfLog.logDuration("Save Grids " + getParmID().getParmName() + ": "
+ totalGrids + " grids (" + totalSize + " bytes) ",
timer.getElapsedTime());
+ totalRecords + " records, " + totalGrids + " grids ("
+ totalSize + " bytes) ", timer.getElapsedTime());
return success;
}

View file

@ -143,7 +143,7 @@ public class VParm extends Parm {
@Override
public void looseLocks() {
// simply replace the lock table with one without any locks
this.lockTable.setLocks(new ArrayList<Lock>());
this.lockTable.setLocks(new ArrayList<Lock>(0));
}
}

View file

@ -42,12 +42,10 @@
</win>
</launcher>
<vm>
<windows include="true">jdk1.6.0</windows>
</vm>
<plugins>
</plugins>

View file

@ -0,0 +1,84 @@
-- called by normalizeGfe.sh to create new GFE tables
DROP TABLE IF EXISTS gfe_locks CASCADE;
DROP TABLE IF EXISTS gfe_parmid CASCADE;
DROP TABLE IF EXISTS gfe_dbid CASCADE;
DROP SEQUENCE IF EXISTS gfe_lock_seq;
DROP SEQUENCE IF EXISTS gfe_parmid_seq;
DROP SEQUENCE IF EXISTS gfe_dbid_seq;
DROP SEQUENCE IF EXISTS gfe_history_seq;
CREATE TABLE gfe_dbid
(
id integer NOT NULL,
dbtype character varying(15),
format character varying(255) NOT NULL,
modelname character varying(64) NOT NULL,
modeltime character varying(13) NOT NULL,
siteid character varying(4) NOT NULL,
CONSTRAINT gfe_dbid_pkey PRIMARY KEY (id),
CONSTRAINT gfe_dbid_siteid_modelname_modeltime_dbtype_key UNIQUE (siteid, modelname, modeltime, dbtype)
)
WITH (
OIDS=FALSE
);
ALTER TABLE gfe_dbid
OWNER TO awips;
CREATE TABLE gfe_parmid
(
id integer NOT NULL,
parmlevel character varying(8),
parmname character varying(100),
dbid_id integer NOT NULL,
CONSTRAINT gfe_parmid_pkey PRIMARY KEY (id),
CONSTRAINT fkbec2950012156549 FOREIGN KEY (dbid_id)
REFERENCES gfe_dbid (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE CASCADE,
CONSTRAINT gfe_parmid_dbid_id_parmname_parmlevel_key UNIQUE (dbid_id, parmname, parmlevel)
)
WITH (
OIDS=FALSE
);
ALTER TABLE gfe_parmid
OWNER TO awips;
CREATE TABLE gfe_locks
(
id integer NOT NULL,
endtime timestamp without time zone NOT NULL,
starttime timestamp without time zone NOT NULL,
wsid character varying(255) NOT NULL,
parmid_id integer NOT NULL,
CONSTRAINT gfe_locks_pkey PRIMARY KEY (id),
CONSTRAINT fk92582e8f7bab05cc FOREIGN KEY (parmid_id)
REFERENCES gfe_parmid (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE CASCADE,
CONSTRAINT gfe_locks_parmid_id_starttime_endtime_key UNIQUE (parmid_id, starttime, endtime)
)
WITH (
OIDS=FALSE
);
ALTER TABLE gfe_locks
OWNER TO awips;
CREATE SEQUENCE gfe_lock_seq
INCREMENT 1
MINVALUE 1
MAXVALUE 9223372036854775807
START 1
CACHE 1;
ALTER TABLE gfe_lock_seq
OWNER TO awips;
CREATE SEQUENCE gfe_history_seq
INCREMENT 1
MINVALUE 1
MAXVALUE 9223372036854775807
START 1
CACHE 1;
ALTER TABLE gfe_history_seq
OWNER TO awips;
ALTER TABLE gfe ADD COLUMN parmId_id integer;

View file

@ -0,0 +1,104 @@
#!/bin/bash
# Main script for updating GFE database structure
PSQL="/awips2/psql/bin/psql"
PYTHON="/awips2/python/bin/python"
SQL_SCRIPT="createNewGfeTables.sql"
# ensure that the sql script is present
if [ ! -f ${SQL_SCRIPT} ]; then
echo "ERROR: the required sql script - ${SQL_SCRIPT} was not found."
echo "FATAL: the update has failed!"
exit 1
fi
echo "Creating new GFE tables"
${PSQL} -U awips -d metadata -f ${SQL_SCRIPT}
if [ $? -ne 0 ]; then
echo "FATAL: the update has failed!"
exit 1
fi
echo
echo "Querying GFE parmIds"
RETRIEVE_PARMIDS_SQL="SELECT distinct parmId FROM gfe order by parmID"
_parmid_list_txt=parmIdList.txt
${PSQL} -U awips -d metadata -c "${RETRIEVE_PARMIDS_SQL}" -t -o ${_parmid_list_txt}
if [ $? -ne 0 ]; then
echo "ERROR: Failed to retrieve the list of parm ids."
echo "FATAL: The update has failed."
exit 1
fi
echo
echo "Parsing parmIds for insertion into new tables"
PYTHON_PARSE_SCRIPT="parseParmIds.py"
if [ ! -f ${PYTHON_PARSE_SCRIPT} ]; then
echo "ERROR: the required python script - ${PYTHON_PARSE_SCRIPT} was not found."
echo "FATAL: the update has failed!"
exit 1
fi
${PYTHON} ${PYTHON_PARSE_SCRIPT} ${_parmid_list_txt}
if [ $? -ne 0 ]; then
echo "ERROR: Failed to parse parm ids."
echo "FATAL: The update has failed."
exit 1
fi
echo
echo "Inserting db ids"
# dbIdInserts.sql generated from parseParmIds.py
${PSQL} -U awips -d metadata -q -f dbIdInserts.sql
if [ $? -ne 0 ]; then
echo "ERROR: Failed to insert database ids."
echo "FATAL: The update has failed."
exit 1
fi
echo
echo "Inserting parm ids"
# parmIdInserts.sql generated from parseParmIds.py
${PSQL} -U awips -d metadata -q -f parmIdInserts.sql
if [ $? -ne 0 ]; then
echo "ERROR: Failed to insert parm ids."
echo "FATAL: The update has failed."
exit 1
fi
echo
echo "Add gfe record reference to parm id table"
# gfeToParmIdUpdates.sql generated from parseParmIds.py
${PSQL} -U awips -d metadata -q -f gfeToParmIdUpdates.sql
if [ $? -ne 0 ]; then
echo "ERROR: Failed to add gfe to parm id mapping."
echo "FATAL: The update has failed."
exit 1
fi
echo
echo "Updating constraints and indexes on gfe"
SQL_SCRIPT="updateGfeConstraintsAndIndexes.sql"
${PSQL} -U awips -d metadata -f ${SQL_SCRIPT}
if [ $? -ne 0 ]; then
echo "ERROR: Failed to update constraints and indexes."
echo "FATAL: The update has failed."
exit 1
fi
echo
echo "Updating dataURIs for gfe"
UPDATE_DATAURIS_SQL="UPDATE gfe SET dataURI =regexp_replace(dataURI, '(/gfe/[^/]+)/([^_]+)_([^:]+):([^_]+)_GRID_([^_]*)_([^_]+)_(\\d{8}_\\d{4})/[^/]+', '\\1/\\4/\\6/\\7/\\5/\\2/\\3') where dataURI ~ '/gfe/[^/]+/[^/]+/[^/]+';"
${PSQL} -U awips -d metadata -c "${UPDATE_DATAURIS_SQL}"
if [ $? -ne 0 ]; then
echo "ERROR: Failed to retrieve the list of parm ids."
echo "FATAL: The update has failed."
exit 1
fi
echo
echo "Running full vacuum for gfe"
${PSQL} -U awips -d metadata -c "VACUUM FULL VERBOSE ANALYZE gfe"

View file

@ -0,0 +1,53 @@
# Called by normalizeGfe.sh to parse the distinct parmIds into table insert
import sys
from dynamicserialize.dstypes.com.raytheon.uf.common.dataplugin.gfe.db.objects import ParmID
from dynamicserialize.dstypes.com.raytheon.uf.common.dataplugin.gfe.db.objects import DatabaseID
fileName = sys.argv[1]
f = open(fileName, 'r')
dbIdInsertFile = open('dbIdInserts.sql', 'w')
parmIdInsertFile = open('parmIdInserts.sql', 'w')
recordUpdateFile = open('gfeToParmIdUpdates.sql', 'w')
dbIds={}
parmIds={}
dbIdCounter = 1
parmIdCounter = 1
for parmIdString in f:
# Strip new line
parmIdString = parmIdString.strip()
# skip last line of file that's empty
if len(parmIdString.strip()) > 0:
if not parmIds.has_key(parmIdString):
parmIds[parmIdString] = parmIdCounter
parmId = ParmID(parmIdString)
dbId = parmId.getDbId()
dbIdString = dbId.getModelId()
if not dbIds.has_key(dbIdString):
dbIds[dbIdString] = dbIdCounter
dbIdInsertFile.write("INSERT INTO gfe_dbid (id, dbtype, format, modelname, modeltime, siteid) VALUES (" +
str(dbIdCounter) + ", '" + dbId.getDbType() + "', '" + dbId.getFormat() + "', '" +
dbId.getModelName() + "', '" + dbId.getModelTime() + "', '" + dbId.getSiteId() + "');\n")
dbIdCounter += 1
dbIdVal = dbIds[dbIdString]
parmIdInsertFile.write("INSERT INTO gfe_parmid (id, parmlevel, parmname, dbid_id) VALUES (" +
str(parmIdCounter) + ", '" + parmId.getParmLevel() + "', '" +
parmId.getParmName() + "', " + str(dbIdVal) + ");\n")
recordUpdateFile.write("UPDATE gfe set parmId_id = " + str(parmIdCounter) +
" WHERE parmId = '" + parmIdString + "';\n")
parmIdCounter+=1
else:
# should never happen if query feeding this is using distinct
print "Received duplicate parmId: " + parmIdString
dbIdInsertFile.write("CREATE SEQUENCE gfe_dbid_seq INCREMENT 1 MINVALUE 1 MAXVALUE 9223372036854775807 START "
+ str((dbIdCounter / 50) + 1) + " CACHE 1;\nALTER TABLE gfe_dbid_seq OWNER TO awips;")
parmIdInsertFile.write("CREATE SEQUENCE gfe_parmid_seq INCREMENT 1 MINVALUE 1 MAXVALUE 9223372036854775807 START "
+ str((parmIdCounter / 50) + 1) + " CACHE 1;\nALTER TABLE gfe_parmid_seq OWNER TO awips;")
f.close()
dbIdInsertFile.close()
parmIdInsertFile.close()
recordUpdateFile.close()

View file

@ -0,0 +1,36 @@
-- Called by normalizeGfe.sh to dop and add constraints
ALTER TABLE gfe DROP CONSTRAINT gfe_datauri_key;
DROP INDEX IF EXISTS gfeParmTime_idx;
DROP INDEX IF EXISTS gfedatauri_idx;
DROP INDEX IF EXISTS gfefcsttimeindex;
ALTER TABLE gfe DROP COLUMN IF EXISTS parmname;
ALTER TABLE gfe DROP COLUMN IF EXISTS parmlevel;
ALTER TABLE gfe DROP COLUMN IF EXISTS dbid;
ALTER TABLE gfe DROP COLUMN IF EXISTS parmid;
ALTER TABLE gfe ADD CONSTRAINT fk18f667bab05cc FOREIGN KEY (parmid_id)
REFERENCES gfe_parmid (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE CASCADE;
ALTER TABLE gfe ADD CONSTRAINT gfe_parmid_id_rangestart_rangeend_reftime_forecasttime_key
UNIQUE (parmid_id, rangestart, rangeend, reftime, forecasttime);
ALTER TABLE gfe_gridhistory DROP CONSTRAINT fk66434335e416514f;
ALTER TABLE gfe_gridhistory RENAME COLUMN key TO id;
ALTER TABLE gfe_gridhistory RENAME COLUMN parent to parent_id;
ALTER TABLE gfe_gridhistory ADD CONSTRAINT fk664343359ad1f975 FOREIGN KEY (parent_id)
REFERENCES gfe (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE CASCADE;
DROP TABLE IF EXISTS gfelocktable;

View file

@ -50,6 +50,7 @@ import com.raytheon.uf.edex.database.query.DatabaseQuery;
* ------------ ---------- ----------- --------------------------
* 14Nov2008 1709 MW Fegan Initial creation.
* 14Apr2011 5163 cjeanbap NWRWAVES Setting AFOS text triggers in AWIPS II
* 04/24/13 1949 rjpeter Removed @Override on delete.
* </pre>
*
* @author mfegan
@ -90,8 +91,7 @@ public class SubscriptionDAO extends CoreDao {
super(config);
}
@Override
public void delete(PersistableDataObject obj) {
public void delete(PersistableDataObject<?> obj) {
super.delete(obj);
sendSubscriptionNotifyMessage(String.valueOf(obj.getIdentifier()));
}
@ -145,7 +145,7 @@ public class SubscriptionDAO extends CoreDao {
*/
@SuppressWarnings("unchecked")
public List<SubscriptionRecord> getSubscriptions() {
if (cachedRecords == null || dirtyRecords) {
if ((cachedRecords == null) || dirtyRecords) {
List<?> retVal = getHibernateTemplate().loadAll(this.daoClass);
if (retVal == null) {
logger.info("Unable to perform query, 'null' result returned");
@ -182,7 +182,7 @@ public class SubscriptionDAO extends CoreDao {
synchronized (recordsMap) {
rval = recordsMap.get(key);
}
if (rval == null || rval.isEmpty() || rval.size() == 0) {
if ((rval == null) || rval.isEmpty()) {
List<?> retVal = null;
List<QueryParam> params = new ArrayList<QueryParam>();
for (Property prop : props) {

View file

@ -36,7 +36,7 @@
factory-method="getInstance" depends-on="commonTimeRegistered">
</bean>
<bean factory-bean="siteAwareRegistry" factory-method="register">
<bean id="gfeSitesActive" factory-bean="siteAwareRegistry" factory-method="register">
<constructor-arg ref="gfeSiteActivation" />
</bean>

View file

@ -465,9 +465,8 @@
<property name="threads" value="1"/>
<property name="runningTimeOutMillis" value="300000"/>
<property name="threadSleepInterval" value="5000"/>
<property name="initialDelay" value="120000"/>
</bean>
<bean depends-on="gfeDbRegistered" id="sendIscSrv" class="com.raytheon.edex.plugin.gfe.isc.SendIscSrv">
<bean depends-on="gfeDbRegistered, gfeSitesActive" id="sendIscSrv" class="com.raytheon.edex.plugin.gfe.isc.SendIscSrv">
<constructor-arg ref="iscSendSrvCfg" />
</bean>
<!-- End ISC Send Beans -->

View file

@ -22,10 +22,9 @@
<property name="pendingInitMinTimeMillis" value="180000"/>
<property name="runningInitTimeOutMillis" value="300000"/>
<property name="threadSleepInterval" value="60000"/>
<property name="initialDelay" value="320000"/>
</bean>
<bean depends-on="gfeDbRegistered" id="smartInitSrv" class="com.raytheon.edex.plugin.gfe.smartinit.SmartInitSrv">
<bean depends-on="gfeDbRegistered, gfeSitesActive" id="smartInitSrv" class="com.raytheon.edex.plugin.gfe.smartinit.SmartInitSrv">
<constructor-arg ref="smartInitSrvCfg" />
</bean>

View file

@ -27,19 +27,20 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.hibernate.LockOptions;
import org.hibernate.Query;
import org.hibernate.Session;
import org.hibernate.StatelessSession;
import org.hibernate.Transaction;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.ParmID;
import com.raytheon.uf.common.dataplugin.gfe.server.lock.Lock;
import com.raytheon.uf.common.dataplugin.gfe.server.lock.LockTable;
import com.raytheon.uf.common.dataquery.db.QueryParam.QueryOperand;
import com.raytheon.uf.common.message.WsId;
import com.raytheon.uf.common.time.TimeRange;
import com.raytheon.uf.common.util.CollectionUtil;
import com.raytheon.uf.edex.database.DataAccessLayerException;
import com.raytheon.uf.edex.database.dao.CoreDao;
import com.raytheon.uf.edex.database.dao.DaoConfig;
import com.raytheon.uf.edex.database.query.DatabaseQuery;
/**
* Data access object for manipulating locks
@ -49,6 +50,7 @@ import com.raytheon.uf.edex.database.query.DatabaseQuery;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* 06/17/08 #940 bphillip Initial Creation
* 04/19/13 rjpeter Normalized GFE Database.
* </pre>
*
* @author bphillip
@ -60,89 +62,12 @@ public class GFELockDao extends CoreDao {
super(DaoConfig.forClass(Lock.class));
}
/**
* Gets all locks held by a specified user
*
* @param wsId
* The workstation ID of the user
* @return All locks held by a specified user
* @throws DataAccessLayerException
* If database errors occur
*/
@SuppressWarnings("unchecked")
public List<Lock> getLocksByOwner(String wsId)
throws DataAccessLayerException {
DatabaseQuery query = new DatabaseQuery(daoClass.getName());
query.addQueryParam("wsId", wsId);
List<Lock> locks = (List<Lock>) queryByCriteria(query);
return locks;
}
/**
* Gets all locks in the specified time range
*
* @param parmId
* The parmId of the locks
* @param timeRange
* The time range to examine
* @return All locks in the specified time range
* @throws DataAccessLayerException
* If database errors occur
*/
@SuppressWarnings("unchecked")
public List<Lock> getLocksInRange(ParmID parmId, TimeRange timeRange)
throws DataAccessLayerException {
DatabaseQuery query = new DatabaseQuery(daoClass.getName());
query.addQueryParam("startTime", timeRange.getStart().getTime(),
QueryOperand.GREATERTHANEQUALS);
query.addQueryParam("endTime", timeRange.getEnd().getTime(),
QueryOperand.LESSTHANEQUALS);
query.addQueryParam("parmId", parmId);
List<Lock> locks = (List<Lock>) queryByCriteria(query);
return locks;
}
/**
* Gets a specific lock
*
* @param parmId
* The parmId of the lock
* @param timeRange
* The time range of the lock
* @param wsId
* The workstation ID of the lock holder
* @return A specific lock
* @throws DataAccessLayerException
* If database errors occur
*/
@SuppressWarnings("unchecked")
public Lock getLock(ParmID parmId, TimeRange timeRange, WsId wsId)
throws DataAccessLayerException {
DatabaseQuery query = new DatabaseQuery(daoClass.getName());
query.addQueryParam("startTime", timeRange.getStart().getTime());
query.addQueryParam("endTime", timeRange.getEnd().getTime());
query.addQueryParam("parmId", parmId);
List<Lock> locks = (List<Lock>) queryByCriteria(query);
if (locks.isEmpty()) {
logger.info("No locks returned for -- ParmID: " + parmId
+ " TimeRange: " + timeRange + " wsId: " + wsId);
return null;
} else if (locks.size() > 1) {
logger.info("Duplicate locks detected for -- ParmID: " + parmId
+ " TimeRange: " + timeRange + " wsId: " + wsId);
return locks.get(0);
} else {
return locks.get(0);
}
}
/**
* Gets locks for the provided list of ParmIDs. The locks are retrieved,
* lock tables are constructed and assigned the provided workstation ID
*
* @param parmIds
* The ParmIDs to get the lock tables for
* The database ParmIDs to get the lock tables for
* @param wsId
* The workstation ID to assign to the lock tables
* @return A map of the ParmID and its associated lock table
@ -150,77 +75,114 @@ public class GFELockDao extends CoreDao {
* If errors occur during database interaction
*/
@SuppressWarnings("unchecked")
public Map<ParmID, LockTable> getLocks(List<ParmID> parmIds, WsId wsId)
public Map<ParmID, LockTable> getLocks(final List<ParmID> parmIds, WsId wsId)
throws DataAccessLayerException {
// The return variable
Map<ParmID, LockTable> lockMap = new HashMap<ParmID, LockTable>();
// Variable to hold the results of the lock table query
List<Lock> queryResult = null;
// Return if no parmIDs are provided
if (parmIds.isEmpty()) {
return Collections.emptyMap();
}
DatabaseQuery query = new DatabaseQuery(daoClass.getName());
query.addQueryParam("parmId", parmIds, QueryOperand.IN);
queryResult = (List<Lock>) queryByCriteria(query);
// The return variable
Map<ParmID, LockTable> lockMap = new HashMap<ParmID, LockTable>(
parmIds.size(), 1);
ParmID lockParmID = null;
for (Lock lock : queryResult) {
lockParmID = lock.getParmId();
LockTable lockTable = lockMap.get(lockParmID);
if (lockTable == null) {
lockTable = new LockTable(lockParmID, new ArrayList<Lock>(),
wsId);
lockMap.put(lockParmID, lockTable);
}
lockTable.addLock(lock);
// create a blank lock table for each parmId ensuring all parms are
// covered
for (ParmID requiredParmId : parmIds) {
lockMap.put(requiredParmId, new LockTable(requiredParmId,
new ArrayList<Lock>(), wsId));
}
/*
* Do a check to make sure all required lock tables are present in the
* map
*/
if (parmIds != null) {
Session sess = null;
Transaction tx = null;
try {
sess = getHibernateTemplate().getSessionFactory().openSession();
tx = sess.beginTransaction();
// reattach object so any parmIds found don't requery
for (ParmID requiredParmId : parmIds) {
if (!lockMap.containsKey(requiredParmId)) {
lockMap.put(requiredParmId, new LockTable(requiredParmId,
new ArrayList<Lock>(0), wsId));
sess.buildLockRequest(LockOptions.NONE).lock(requiredParmId);
}
Query query = sess
.createQuery("FROM Lock WHERE parmId IN (:parmIds)");
query.setParameterList("parmIds", parmIds);
List<Lock> locks = query.list();
tx.commit();
// populate Lock table
for (Lock lock : locks) {
lockMap.get(lock.getParmId()).addLock(lock);
}
return lockMap;
} catch (Exception e) {
if (tx != null) {
try {
tx.rollback();
} catch (Exception e1) {
logger.error("Error occurred rolling back transaction", e1);
}
}
throw new DataAccessLayerException(
"Unable to look up locks for parmIds " + parmIds, e);
} finally {
if (sess != null) {
try {
sess.close();
} catch (Exception e) {
statusHandler.error(
"Error occurred closing database session", e);
}
}
}
return lockMap;
}
/**
* Updates additions and deletions to the lock table in a single transaction
* Adds and removes the passed locks.
*
* @param locksToDelete
* The locks to delete
* @param locksToAdd
* The locks to add
*/
public void updateCombinedLocks(Collection<Lock> locksToDelete,
Collection<Lock> locksToAdd) throws DataAccessLayerException {
if (!locksToDelete.isEmpty() || !locksToAdd.isEmpty()) {
Session s = this.getHibernateTemplate().getSessionFactory()
.openSession();
Transaction tx = s.beginTransaction();
try {
public void addRemoveLocks(final Collection<Lock> locksToAdd,
final Collection<Integer> locksToDelete)
throws DataAccessLayerException {
StatelessSession s = null;
Transaction tx = null;
try {
s = this.getHibernateTemplate().getSessionFactory()
.openStatelessSession();
tx = s.beginTransaction();
if (!CollectionUtil.isNullOrEmpty(locksToDelete)) {
Query q = s
.createQuery("DELETE FROM Lock WHERE id IN (:locksToDelete)");
q.setParameterList("locksToDelete", locksToDelete);
q.executeUpdate();
}
if (!CollectionUtil.isNullOrEmpty(locksToAdd)) {
for (Lock lock : locksToAdd) {
s.save(lock);
s.insert(lock);
}
for (Lock lock : locksToDelete) {
s.delete(lock);
}
tx.commit();
} catch (Throwable e) {
tx.rollback();
throw new DataAccessLayerException("Error combining locks", e);
} finally {
if (s != null) {
}
tx.commit();
} catch (Throwable e) {
tx.rollback();
throw new DataAccessLayerException("Error combining locks", e);
} finally {
if (s != null) {
try {
s.close();
} catch (Exception e) {
statusHandler.error(
"Error occurred closing database session", e);
}
}
}

View file

@ -40,7 +40,6 @@ import com.raytheon.edex.plugin.gfe.server.GridParmManager;
import com.raytheon.edex.plugin.gfe.util.SendNotifications;
import com.raytheon.uf.common.dataplugin.PluginException;
import com.raytheon.uf.common.dataplugin.gfe.GridDataHistory;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.GFERecord;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.ParmID;
import com.raytheon.uf.common.dataplugin.gfe.server.message.ServerResponse;
import com.raytheon.uf.common.dataplugin.gfe.server.notify.GridHistoryUpdateNotification;
@ -63,7 +62,8 @@ import com.raytheon.uf.edex.database.plugin.PluginFactory;
* 07/06/09 1995 bphillip Initial release
* 04/06/12 #457 dgilling Move call to delete records
* from queue into run().
*
* 04/23/13 #1949 rjpeter Move setting of lastSentTime to dao
* and removed initial delay.
* </pre>
*
* @author bphillip
@ -78,14 +78,12 @@ public class IscSendJob implements Runnable {
private static final SimpleDateFormat ISC_EXTRACT_DATE = new SimpleDateFormat(
"yyyyMMdd_HHmm");
private Map<String, IscSendScript> scripts;
private final Map<String, IscSendScript> scripts;
private int runningTimeOutMillis;
private int threadSleepInterval;
private int initialDelay;
/**
* Constructs a new IscSendJob
*
@ -98,14 +96,11 @@ public class IscSendJob implements Runnable {
scripts = new HashMap<String, IscSendScript>();
runningTimeOutMillis = 300000;
threadSleepInterval = 30000;
initialDelay = 120000;
}
@Override
public void run() {
long curTime = System.currentTimeMillis();
while ((!EDEXUtil.isRunning())
|| ((System.currentTimeMillis() - curTime) < initialDelay)) {
while (!EDEXUtil.isRunning()) {
try {
Thread.sleep(threadSleepInterval);
} catch (Throwable t) {
@ -205,6 +200,7 @@ public class IscSendJob implements Runnable {
}
try {
// TODO: Interact with IFPGridDatabase
GFEDao dao = (GFEDao) PluginFactory.getInstance().getPluginDao(
"gfe");
@ -216,31 +212,14 @@ public class IscSendJob implements Runnable {
}
WsId wsId = new WsId(InetAddress.getLocalHost(), "ISC", "ISC");
List<TimeRange> inventory = sr.getPayload();
List<TimeRange> overlapTimes = new ArrayList<TimeRange>();
for (TimeRange range : inventory) {
if (tr.contains(range)) {
overlapTimes.add(range);
}
}
List<GridHistoryUpdateNotification> notifications = new ArrayList<GridHistoryUpdateNotification>();
List<GFERecord> records = dao.getRecords(id, overlapTimes);
for (GFERecord record : records) {
List<GridDataHistory> history = record.getGridHistory();
Map<TimeRange, List<GridDataHistory>> historyMap = new HashMap<TimeRange, List<GridDataHistory>>();
Date now = new Date();
for (GridDataHistory hist : history) {
hist.setLastSentTime(now);
}
historyMap.put(record.getTimeRange(), history);
dao.saveOrUpdate(record);
notifications.add(new GridHistoryUpdateNotification(id,
historyMap, wsId, siteId));
}
List<GridHistoryUpdateNotification> notifications = new ArrayList<GridHistoryUpdateNotification>(
1);
Map<TimeRange, List<GridDataHistory>> histories = dao
.updateSentTime(id, tr, new Date());
notifications.add(new GridHistoryUpdateNotification(id,
histories, wsId, siteId));
SendNotifications.send(notifications);
} catch (PluginException e) {
statusHandler.error("Error creating GFE dao!", e);
} catch (Exception e) {
@ -268,12 +247,4 @@ public class IscSendJob implements Runnable {
public void setThreadSleepInterval(int threadSleepInterval) {
this.threadSleepInterval = threadSleepInterval;
}
public int getInitialDelay() {
return initialDelay;
}
public void setInitialDelay(int initialDelay) {
this.initialDelay = initialDelay;
}
}

View file

@ -78,6 +78,7 @@ public class IscSendRecord implements IPersistableDataObject, Serializable,
@GeneratedValue()
private int key;
// TODO: Normalize with parmId table
@DynamicSerializeElement
@Column(nullable = false)
@Type(type = "com.raytheon.uf.common.dataplugin.gfe.db.type.ParmIdType")
@ -143,7 +144,7 @@ public class IscSendRecord implements IPersistableDataObject, Serializable,
*/
@Override
public IscSendRecord clone() throws CloneNotSupportedException {
IscSendRecord rval = new IscSendRecord(this.parmID.clone(),
IscSendRecord rval = new IscSendRecord(this.parmID,
this.timeRange.clone(), this.xmlDest, this.state);
rval.setInsertTime((Date) this.insertTime.clone());
return rval;

View file

@ -52,7 +52,6 @@ public class SendIscSrv {
IscSendJob thread = new IscSendJob();
thread.setRunningTimeOutMillis(cfg.getRunningTimeOutMillis());
thread.setThreadSleepInterval(cfg.getThreadSleepInterval());
thread.setInitialDelay(cfg.getInitialDelay());
executor.execute(thread);
}
}

View file

@ -30,8 +30,8 @@ import java.util.concurrent.Executor;
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Oct 20, 2011 dgilling Initial creation
*
* Oct 20, 2011 dgilling Initial creation
* Apr 30, 2013 1949 rjpeter Removed initial delay.
* </pre>
*
* @author dgilling
@ -48,8 +48,6 @@ public class SendIscSrvConfig {
protected int threadSleepInterval;
protected int initialDelay;
public int getThreads() {
return threads;
}
@ -81,12 +79,4 @@ public class SendIscSrvConfig {
public void setThreadSleepInterval(int threadSleepInterval) {
this.threadSleepInterval = threadSleepInterval;
}
public int getInitialDelay() {
return initialDelay;
}
public void setInitialDelay(int initialDelay) {
this.initialDelay = initialDelay;
}
}

View file

@ -31,12 +31,12 @@ import com.raytheon.edex.plugin.gfe.db.dao.GFEDao;
import com.raytheon.edex.plugin.gfe.server.database.D2DGridDatabase;
import com.raytheon.edex.plugin.gfe.server.database.GridDatabase;
import com.raytheon.edex.plugin.gfe.server.lock.LockManager;
import com.raytheon.uf.common.dataplugin.PluginException;
import com.raytheon.uf.common.dataplugin.gfe.GridDataHistory;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.GFERecord;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.GridParmInfo;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.ParmID;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.TimeConstraints;
import com.raytheon.uf.common.dataplugin.gfe.server.lock.Lock;
import com.raytheon.uf.common.dataplugin.gfe.server.lock.LockTable;
import com.raytheon.uf.common.dataplugin.gfe.server.lock.LockTable.LockMode;
import com.raytheon.uf.common.dataplugin.gfe.server.message.ServerResponse;
@ -50,7 +50,14 @@ import com.raytheon.uf.common.dataplugin.gfe.slice.DiscreteGridSlice;
import com.raytheon.uf.common.dataplugin.gfe.slice.IGridSlice;
import com.raytheon.uf.common.dataplugin.gfe.slice.WeatherGridSlice;
import com.raytheon.uf.common.message.WsId;
import com.raytheon.uf.common.status.IPerformanceStatusHandler;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.PerformanceStatus;
import com.raytheon.uf.common.status.UFStatus;
import com.raytheon.uf.common.time.TimeRange;
import com.raytheon.uf.common.time.util.ITimer;
import com.raytheon.uf.common.time.util.TimeUtil;
import com.raytheon.uf.common.util.CollectionUtil;
import com.raytheon.uf.edex.database.plugin.PluginFactory;
/**
@ -64,9 +71,10 @@ import com.raytheon.uf.edex.database.plugin.PluginFactory;
* ------------ ---------- ----------- --------------------------
* 04/08/08 #875 bphillip Initial Creation
* 06/17/08 #940 bphillip Implemented GFE Locking
* 02/10/13 #1603 randerso Returned number of records purged from timePurge
* 03/15/13 #1795 njensen Added updatePublishTime()
*
* 02/10/13 #1603 randerso Returned number of records purged from timePurge
* 03/15/13 #1795 njensen Added updatePublishTime()
* 04/23/13 #1949 rjpeter Removed excess validation on retrieval, added
* inventory for a given time range.
* </pre>
*
* @author bphillip
@ -74,6 +82,13 @@ import com.raytheon.uf.edex.database.plugin.PluginFactory;
*/
public class GridParm {
// separate logger for GFE performance logging
private final IPerformanceStatusHandler perfLog = PerformanceStatus
.getHandler("GFE:");
private static final transient IUFStatusHandler statusHandler = UFStatus
.getHandler(GridParm.class);
/** The parm ID associated with this GridParm */
private ParmID id;
@ -120,6 +135,16 @@ public class GridParm {
return db.getGridInventory(id);
}
/**
* Returns the grid inventory for this parameter that overlaps the given
* timeRange
*
* @return The server response containing the grid inventory
*/
public ServerResponse<List<TimeRange>> getGridInventory(TimeRange tr) {
return db.getGridInventory(id, tr);
}
/**
* Returns the grid history for this parameter and specified grids through
* history. Returns the status
@ -133,12 +158,6 @@ public class GridParm {
return db.getGridHistory(id, trs);
}
@Deprecated
public ServerResponse<?> updateGridHistory(
Map<TimeRange, List<GridDataHistory>> history) {
return db.updateGridHistory(id, history);
}
/**
* Updates the publish times in the database of all provided
* GridDataHistories. Does not alter the publish times in memory.
@ -228,8 +247,13 @@ public class GridParm {
}
// validate the data
ITimer timer = TimeUtil.getTimer();
timer.start();
sr.addMessages(recordsOkay(saveRequest.getGridSlices(),
new ArrayList<TimeRange>()));
new ArrayList<TimeRange>(0)));
timer.stop();
perfLog.logDuration("Validating " + saveRequest.getGridSlices().size()
+ " grids for saving", timer.getElapsedTime());
if (!sr.isOkay()) {
return sr;
}
@ -286,45 +310,20 @@ public class GridParm {
// Get current inventory
List<TimeRange> reqTimes = getRequest.getTimes();
// TODO do we really need this time range check? it's not worth much
// njensen made it only work on non-D2D databases since
// it was slowing down smart init
if (!id.getDbId().getDbType().equals("D2D")) {
List<TimeRange> trs = null;
ServerResponse<List<TimeRange>> ssr = getGridInventory();
trs = ssr.getPayload();
sr.addMessages(ssr);
if (!CollectionUtil.isNullOrEmpty(reqTimes)) {
// Get the data
if (getRequest.isConvertUnit() && (db instanceof D2DGridDatabase)) {
sr = ((D2DGridDatabase) db).getGridData(id, reqTimes,
getRequest.isConvertUnit());
} else {
sr = db.getGridData(id, reqTimes);
}
if (!sr.isOkay()) {
sr.addMessage("Cannot get grid data with the get inventory failure");
sr.addMessage("Failure in retrieving grid data from GridDatabase");
return sr;
}
// Ensure that all requested time ranges are in the inventory
if (!trs.containsAll(reqTimes)) {
sr.addMessage("Some of the requested time ranges are not in the inventory."
+ " Inv: "
+ trs
+ " requestTimes: "
+ getRequest.getTimes());
return sr;
}
}
// Get the data
if (getRequest.isConvertUnit() && (db instanceof D2DGridDatabase)) {
sr = ((D2DGridDatabase) db).getGridData(id, reqTimes,
getRequest.isConvertUnit());
} else {
sr = db.getGridData(id, reqTimes);
}
if (!sr.isOkay()) {
sr.addMessage("Failure in retrieving grid data from GridDatabase");
return sr;
}
// Validate the data
sr.addMessages(dataOkay(sr.getPayload(), badDataTimes));
if (!sr.isOkay()) {
sr.addMessage("Cannot get grid data - data is not valid");
sr.setPayload(new ArrayList<IGridSlice>(0));
}
return sr;
@ -374,52 +373,49 @@ public class GridParm {
// Get the lock table
WsId wsId = new WsId(null, "timePurge", "EDEX");
List<LockTable> lts = new ArrayList<LockTable>();
List<LockTable> lts = new ArrayList<LockTable>(0);
LockTableRequest lockreq = new LockTableRequest(this.id);
ServerResponse<List<LockTable>> ssr2 = LockManager.getInstance()
.getLockTables(lockreq, wsId, siteID);
sr.addMessages(ssr2);
lts = ssr2.getPayload();
if (!sr.isOkay() || lts.size() != 1) {
if (!sr.isOkay() || (lts.size() != 1)) {
sr.addMessage("Cannot timePurge since getting lock table failed");
}
List<TimeRange> breakList = new ArrayList<TimeRange>();
List<TimeRange> noBreak = new ArrayList<TimeRange>();
for (int i = 0; i < lts.get(0).getLocks().size(); i++) {
if (lts.get(0).getLocks().get(i).getTimeRange().getEnd()
.before(purgeTime)
|| lts.get(0).getLocks().get(i).getTimeRange().getEnd()
.equals(purgeTime)) {
breakList.add(lts.get(0).getLocks().get(i).getTimeRange());
LockTable myLockTable = lts.get(0);
for (Lock lock : myLockTable.getLocks()) {
if (lock.getEndTime() < purgeTime.getTime()) {
breakList.add(lock.getTimeRange());
} else {
noBreak.add(lts.get(0).getLocks().get(i).getTimeRange());
noBreak.add(lock.getTimeRange());
}
}
List<TimeRange> purge = new ArrayList<TimeRange>();
for (int i = 0; i < trs.size(); i++) {
if (trs.get(i).getEnd().before(purgeTime)
|| trs.get(i).getEnd().equals(purgeTime)) {
for (TimeRange tr : trs) {
if (tr.getEnd().getTime() <= purgeTime.getTime()) {
boolean found = false;
for (int j = 0; j < noBreak.size(); j++) {
if (noBreak.get(j).contains(trs.get(i))) {
for (TimeRange noBreakTr : noBreak) {
if (noBreakTr.contains(tr)) {
found = true;
break;
}
}
if (!found) {
purge.add(trs.get(i));
purge.add(tr);
}
}
}
List<LockRequest> lreqs = new ArrayList<LockRequest>();
List<LockTable> ltChanged = new ArrayList<LockTable>();
for (int i = 0; i < breakList.size(); i++) {
lreqs.add(new LockRequest(id, breakList.get(i), LockMode.BREAK_LOCK));
for (TimeRange tr : breakList) {
lreqs.add(new LockRequest(id, tr, LockMode.BREAK_LOCK));
}
ServerResponse<List<LockTable>> lockResponse = LockManager
@ -434,25 +430,24 @@ public class GridParm {
for (int i = 0; i < ltChanged.size(); i++) {
lockNotifications
.add(new LockNotification(ltChanged.get(i), siteID));
// gridNotifications.add(new GridUpdateNotification(id, breakList
// .get(i), Arrays.asList, ""));
}
GFEDao dao = null;
try {
dao = (GFEDao) PluginFactory.getInstance().getPluginDao("gfe");
} catch (PluginException e) {
sr.addMessage("Unable to get gfe dao");
}
dao.deleteRecords(id, purge);
for (int i = 0; i < purge.size(); i++) {
// assemble the GridUpdateNotification
dao.deleteRecords(id, purge);
Map<TimeRange, List<GridDataHistory>> histories = new HashMap<TimeRange, List<GridDataHistory>>(
0);
gridNotifications.add(new GridUpdateNotification(id, purge.get(i),
histories, wsId, siteID));
for (TimeRange tr : purge) {
// assemble the GridUpdateNotification
gridNotifications.add(new GridUpdateNotification(id, tr,
histories, wsId, siteID));
}
sr.setPayload(new Integer(purge.size()));
} catch (Exception e) {
sr.addMessage("Failed to delete records for timePurge");
statusHandler.error("Failed to delete records for timePurge", e);
}
sr.setPayload(new Integer(purge.size()));
return sr;
}
@ -469,20 +464,6 @@ public class GridParm {
return "ParmID: " + id;
}
private ServerResponse<?> dataOkay(List<IGridSlice> gridSlices,
List<TimeRange> badDataTimes) {
ServerResponse<?> sr = new ServerResponse<String>();
for (IGridSlice slice : gridSlices) {
ServerResponse<?> sr1 = gridSliceOkay(slice);
sr.addMessages(sr1);
if (!sr1.isOkay()) {
badDataTimes.add(slice.getValidTime());
}
}
return sr;
}
/**
* Checks the data to ensure that it is valid. If there is a bad data slice,
* then place the valid time of that grid in the badDataTimes entry.
@ -582,7 +563,7 @@ public class GridParm {
.getLockTables(req, requestor, siteID);
lockTables = ssr.getPayload();
sr.addMessages(ssr);
if (!sr.isOkay() || lockTables.size() != 1) {
if (!sr.isOkay() || (lockTables.size() != 1)) {
sr.addMessage("Cannot verify locks due to problem with Lock Manager");
return sr;

View file

@ -69,6 +69,7 @@ import com.raytheon.uf.common.status.UFStatus.Priority;
import com.raytheon.uf.common.time.TimeRange;
import com.raytheon.uf.common.time.util.ITimer;
import com.raytheon.uf.common.time.util.TimeUtil;
import com.raytheon.uf.edex.database.DataAccessLayerException;
import com.raytheon.uf.edex.database.plugin.PluginFactory;
import com.raytheon.uf.edex.database.purge.PurgeLogger;
@ -89,11 +90,11 @@ import com.raytheon.uf.edex.database.purge.PurgeLogger;
* fixed a purge inefficiency,
* fixed error which caused D2D purging to remove
* smartInit hdf5 data
* 03/07/13 #1773 njensen Logged commitGrid() times
* 03/15/13 #1795 njensen Sped up commitGrid()
* 03/20/2013 #1774 randerso Removed dead method, changed to use new
* 03/07/13 #1773 njensen Logged commitGrid() times
* 03/15/13 #1795 njensen Sped up commitGrid()
* 03/20/2013 #1774 randerso Removed dead method, changed to use new
* D2DGridDatabase constructor
*
* 04/23/2013 #1949 rjpeter Added inventory retrieval for a given time range.
* </pre>
*
* @author bphillip
@ -174,6 +175,39 @@ public class GridParmManager {
return sr;
}
/**
* Returns the grid inventory overlapping timeRange for the parmId. Returns
* the status. Calls gridParm() to look up the parameter. If not found,
* returns the appropriate error. Calls the grid parm's getGridInventory()
* to obtain the inventory.
*
* @param parmId
* The parmID to get the inventory for
* @param timeRange
* The timeRange to get the inventory for
* @return The server response
*/
public static ServerResponse<List<TimeRange>> getGridInventory(
ParmID parmId, TimeRange timeRange) {
ServerResponse<List<TimeRange>> sr = new ServerResponse<List<TimeRange>>();
try {
GridParm gp = gridParm(parmId);
if (gp.isValid()) {
sr = gp.getGridInventory(timeRange);
} else {
sr.addMessage("Unknown Parm: " + parmId
+ " in getGridInventory()");
}
} catch (Exception e) {
sr.addMessage("Unknown Parm: " + parmId + " in getGridInventory()");
logger.error("Unknown Parm: " + parmId + " in getGridInventory()",
e);
}
return sr;
}
/**
* Returns the grid history through "history" for the parmId and specified
* grids. Returns the status.
@ -485,6 +519,9 @@ public class GridParmManager {
continue;
}
// TODO: No need to get inventory and then compare for history
// times, just request the history times directly
// get the source data inventory
inventoryTimer.start();
ServerResponse<List<TimeRange>> invSr = sourceGP.getGridInventory();
@ -543,14 +580,14 @@ public class GridParmManager {
// if update time is less than publish time, grid has not
// changed since last published, therefore only update
// history, do not publish
if (gdh.getPublishTime() == null
if ((gdh.getPublishTime() == null)
|| (gdh.getUpdateTime().getTime() > gdh
.getPublishTime().getTime())
// in service backup, times on srcHistory could
// appear as not needing a publish, even though
// dest data does not exist
|| currentDestHistory.get(tr) == null
|| currentDestHistory.get(tr).size() == 0) {
|| (currentDestHistory.get(tr) == null)
|| (currentDestHistory.get(tr).size() == 0)) {
doPublish = true;
}
}
@ -778,11 +815,18 @@ public class GridParmManager {
public static ServerResponse<List<DatabaseID>> getDbInventory(String siteID) {
ServerResponse<List<DatabaseID>> sr = new ServerResponse<List<DatabaseID>>();
List<DatabaseID> databases = new ArrayList<DatabaseID>();
List<DatabaseID> gfeDbs = gfeDao.getDatabaseInventory();
List<DatabaseID> gfeDbs = null;
List<DatabaseID> singletons = null;
List<DatabaseID> d2dDbs = null;
try {
gfeDbs = gfeDao.getDatabaseInventory(siteID);
} catch (DataAccessLayerException e) {
sr.addMessage("Unable to get IFP databases for site: " + siteID);
logger.error("Unable to get IFP databases for site: " + siteID, e);
return sr;
}
d2dDbs = D2DParmIdCache.getInstance().getDatabaseIDs();
try {
@ -793,6 +837,7 @@ public class GridParmManager {
logger.error("Unable to get singleton databases", e);
return sr;
}
if (singletons != null) {
for (DatabaseID singleton : singletons) {
if (singleton.getSiteId().equals(siteID)) {
@ -800,11 +845,13 @@ public class GridParmManager {
}
}
}
for (DatabaseID dbId : gfeDbs) {
if (!databases.contains(dbId) && dbId.getSiteId().equals(siteID)) {
if (!databases.contains(dbId)) {
databases.add(dbId);
}
}
if (d2dDbs != null) {
for (DatabaseID d2d : d2dDbs) {
if (d2d.getSiteId().equals(siteID)) {
@ -815,9 +862,7 @@ public class GridParmManager {
DatabaseID topoDbId = TopoDatabaseManager.getTopoDbId(siteID);
databases.add(topoDbId);
databases.addAll(NetCDFDatabaseManager.getDatabaseIds(siteID));
sr.setPayload(databases);
return sr;
}
@ -994,7 +1039,7 @@ public class GridParmManager {
// process the id and determine whether it should be purged
count++;
if (count > desiredVersions
if ((count > desiredVersions)
&& !dbId.getModelTime().equals(DatabaseID.NO_MODEL_TIME)) {
deallocateDb(dbId, true);
PurgeLogger.logInfo("Purging " + dbId, "gfe");
@ -1114,7 +1159,7 @@ public class GridParmManager {
* Validate the database ID. Throws an exception if the database ID is
* invalid
*/
if (!dbId.isValid() || dbId.getFormat() != DatabaseID.DataType.GRID) {
if (!dbId.isValid() || (dbId.getFormat() != DatabaseID.DataType.GRID)) {
throw new GfeException(
"Database id "
+ dbId
@ -1192,15 +1237,11 @@ public class GridParmManager {
}
public static void purgeDbCache(String siteID) {
List<DatabaseID> toRemove = new ArrayList<DatabaseID>();
for (DatabaseID dbId : dbMap.keySet()) {
if (dbId.getSiteId().equals(siteID)) {
toRemove.add(dbId);
removeDbFromMap(dbId);
}
}
for (DatabaseID dbId : toRemove) {
removeDbFromMap(dbId);
}
}
private static ServerResponse<GridDatabase> getOfficialDB(

View file

@ -25,6 +25,7 @@ import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import com.raytheon.uf.common.dataplugin.gfe.GridDataHistory;
@ -72,8 +73,9 @@ import com.raytheon.uf.common.time.TimeRange;
* in the gfeBaseDataDir.
* 02/10/13 #1603 randerso Moved removeFromDb, removeFromHDF5 and deleteModelHDF5
* methods down to IFPGridDatabase
* 03/15/13 #1795 njensen Added updatePublishTime()
*
* 03/15/13 #1795 njensen Added updatePublishTime()
* 04/23/13 #1949 rjpeter Added default implementations of history by time range
* and cachedParmId
* </pre>
*
* @author bphillip
@ -348,6 +350,29 @@ public abstract class GridDatabase {
*/
public abstract ServerResponse<List<TimeRange>> getGridInventory(ParmID id);
/**
* Gets the inventory of time ranges currently for the specified ParmID that
* overlap the given time range.
*
* @param id
* The parmID to get the inventory for
* @return The server response
*/
public ServerResponse<List<TimeRange>> getGridInventory(ParmID id,
TimeRange tr) {
// default to prior behavior with removing the extra inventories
ServerResponse<List<TimeRange>> sr = getGridInventory(id);
List<TimeRange> trs = sr.getPayload();
ListIterator<TimeRange> iter = trs.listIterator(trs.size());
while (iter.hasPrevious()) {
TimeRange curTr = iter.previous();
if (!curTr.overlaps(tr)) {
iter.remove();
}
}
return sr;
}
/**
* Retrieves a sequence gridSlices from the database based on the specified
* parameters and stores them in the data parameter. TimeRanges of the grids
@ -438,12 +463,6 @@ public abstract class GridDatabase {
+ this.getClass().getName());
}
public ServerResponse<?> updateGridHistory(ParmID parmId,
Map<TimeRange, List<GridDataHistory>> history) {
throw new UnsupportedOperationException("Not implemented for class "
+ this.getClass().getName());
}
/**
* Updates the publish times in the database of all provided
* GridDataHistories. Does not alter the publish times in memory.
@ -469,4 +488,16 @@ public abstract class GridDatabase {
}
public abstract void updateDbs();
/**
* Return the internally cache'd parmID for this database implementation.
*
* @param parmID
* @return
* @throws GfeException
* If the parm does not exist for this database.
*/
public ParmID getCachedParmID(ParmID parmID) throws GfeException {
return parmID;
}
}

View file

@ -61,7 +61,7 @@ import com.raytheon.uf.edex.database.DataAccessLayerException;
* Jun 19, 2008 #1160 randerso Initial creation
* Jul 10, 2009 #2590 njensen Support for multiple sites.
* May 04, 2012 #574 dgilling Re-port to better match AWIPS1.
*
* Apr 23, 2013 #1949 rjpeter Removed unused method.
* </pre>
*
* @author randerso
@ -70,13 +70,13 @@ import com.raytheon.uf.edex.database.DataAccessLayerException;
public class TopoDatabase extends VGridDatabase {
private static final TimeRange TR = TimeRange.allTimes();
private TopoDatabaseManager topoMgr;
private final TopoDatabaseManager topoMgr;
private GridLocation gloc;
private final GridLocation gloc;
private ParmID pid;
private final ParmID pid;
private GridParmInfo gpi;
private final GridParmInfo gpi;
public TopoDatabase(final IFPServerConfig config,
TopoDatabaseManager topoMgr) {
@ -195,7 +195,7 @@ public class TopoDatabase extends VGridDatabase {
if (!this.pid.equals(id)) {
sr.addMessage("Unknown ParmID: " + id);
} else if (timeRanges.size() != 1 || !timeRanges.get(0).equals(TR)) {
} else if ((timeRanges.size() != 1) || !timeRanges.get(0).equals(TR)) {
sr.addMessage("Invalid time requested");
} else {
@ -278,21 +278,6 @@ public class TopoDatabase extends VGridDatabase {
// no-op
}
/*
* (non-Javadoc)
*
* @see
* com.raytheon.edex.plugin.gfe.server.database.GridDatabase#updateGridHistory
* (com.raytheon.uf.common.dataplugin.gfe.db.objects.ParmID, java.util.Map)
*/
@Override
public ServerResponse<?> updateGridHistory(ParmID parmId,
Map<TimeRange, List<GridDataHistory>> history) {
ServerResponse<?> sr = new ServerResponse<Object>();
sr.addMessage("Can't update Grid History on TopoDatabase");
return sr;
}
/*
* (non-Javadoc)
*

View file

@ -1,54 +0,0 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.edex.plugin.gfe.server.handler;
import java.util.Date;
import com.raytheon.edex.plugin.gfe.db.dao.GFEDao;
import com.raytheon.uf.common.dataplugin.gfe.request.GetLatestDbTimeRequest;
import com.raytheon.uf.common.serialization.comm.IRequestHandler;
/**
* Handler for getting the latest insert time for a given database ID
*
* <pre>
*
* SOFTWARE HISTORY
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* 8/16/2010 6349 bphillip Initial creation
*
* </pre>
*
* @author bphillip
* @version 1.0
*/
public class GetLatestDbTimeHandler implements
IRequestHandler<GetLatestDbTimeRequest> {
@Override
public Date handleRequest(GetLatestDbTimeRequest request)
throws Exception {
Date latestDate = new GFEDao().getLatestDbIdInsertTime(request.getDbId());
return latestDate;
}
}

View file

@ -1,61 +0,0 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.edex.plugin.gfe.server.handler;
import com.raytheon.edex.plugin.gfe.db.dao.GFEDao;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.DatabaseID;
import com.raytheon.uf.common.dataplugin.gfe.request.GetLatestModelDbIdRequest;
import com.raytheon.uf.common.serialization.comm.IRequestHandler;
/**
* Handler for getting the latest DatabaseID for a given model name and site ID.
*
* <pre>
*
* SOFTWARE HISTORY
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Aug 17, 2010 dgilling Initial creation
*
* </pre>
*
* @author dgilling
* @version 1.0
*/
public class GetLatestModelDbIdHandler implements
IRequestHandler<GetLatestModelDbIdRequest> {
/*
* (non-Javadoc)
*
* @see
* com.raytheon.uf.common.serialization.comm.IRequestHandler#handleRequest
* (com.raytheon.uf.common.serialization.comm.IServerRequest)
*/
@Override
public DatabaseID handleRequest(GetLatestModelDbIdRequest request)
throws Exception {
DatabaseID dbId = new GFEDao().getLatestModelDbId(request.getSiteId(), request
.getModelName());
return dbId;
}
}

View file

@ -43,6 +43,7 @@ import com.raytheon.uf.common.serialization.comm.IRequestHandler;
* 04/08/08 #875 bphillip Initial Creation
* 06/17/08 #940 bphillip Implemented GFE Locking
* 09/22/09 3058 rjpeter Converted to IRequestHandler
* 04/24/13 1949 rjpeter Added list sizing
* </pre>
*
* @author bphillip
@ -59,8 +60,11 @@ public class LockChangeHandler implements IRequestHandler<LockChangeRequest> {
if (sr.isOkay()) {
try {
List<GfeNotification> notes = new ArrayList<GfeNotification>();
for (LockTable table : sr.getPayload()) {
List<LockTable> lockTables = sr.getPayload();
List<GfeNotification> notes = new ArrayList<GfeNotification>(
lockTables.size());
for (LockTable table : lockTables) {
notes.add(new LockNotification(table, siteID));
}
ServerResponse<?> notifyResponse = SendNotifications
@ -75,7 +79,6 @@ public class LockChangeHandler implements IRequestHandler<LockChangeRequest> {
+ e.getMessage());
}
}
return sr;
}
}

View file

@ -59,7 +59,7 @@ import com.raytheon.uf.common.status.UFStatus;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Apr 21, 2011 dgilling Initial creation
*
* Apr 23, 2013 1949 rjpeter Removed extra lock table look up
* </pre>
*
* @author dgilling
@ -149,18 +149,6 @@ public class SaveASCIIGridsHandler implements
// make a LockTableRequest
LockTableRequest ltr = new LockTableRequest(pid);
// get the lock tables
ServerResponse<List<LockTable>> srLockTables = LockManager
.getInstance().getLockTables(ltr,
request.getWorkstationID(), siteId);
if (!srLockTables.isOkay()) {
msg = "Skipping grid storage [" + (i + 1) + " of " + ngrids
+ "]. Unable to obtain lock table for "
+ pid.toString() + ": " + srLockTables.message();
sr.addMessage(msg);
continue;
}
// make the Lock Request object to lock
LockRequest lrl = new LockRequest(pid, agrid.getGridSlices().get(i)
.getValidTime(), LockMode.LOCK);
@ -185,8 +173,7 @@ public class SaveASCIIGridsHandler implements
grid.setMessageData(agrid.getGridSlices().get(i));
grid.setGridHistory(agrid.getGridSlices().get(i).getHistory());
records.add(grid);
final List<SaveGridRequest> sgrs = new ArrayList<SaveGridRequest>();
sgrs.clear();
final List<SaveGridRequest> sgrs = new ArrayList<SaveGridRequest>(1);
SaveGridRequest sgr = new SaveGridRequest(pid, agrid
.getGridSlices().get(i).getValidTime(), records);
sgrs.add(sgr);

View file

@ -21,9 +21,12 @@
package com.raytheon.edex.plugin.gfe.server.lock;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -60,7 +63,8 @@ import com.raytheon.uf.edex.database.DataAccessLayerException;
* ------------ ---------- ----------- --------------------------
* 04/08/08 #875 bphillip Initial Creation
* 06/17/08 #940 bphillip Implemented GFE Locking
*
* 04/23/13 #1949 rjpeter Updated to work with Normalized Database,
* fixed inefficiencies in querying/merging
* </pre>
*
* @author bphillip
@ -69,22 +73,21 @@ import com.raytheon.uf.edex.database.DataAccessLayerException;
public class LockManager {
/** The logger */
private Log logger = LogFactory.getLog(getClass());
private final Log logger = LogFactory.getLog(getClass());
private LockComparator startTimeComparator = new LockComparator();
private final LockComparator startTimeComparator = new LockComparator();
private final GFELockDao dao = new GFELockDao();
/** The singleton instance of the LockManager */
private static LockManager instance;
private static LockManager instance = new LockManager();
/**
* Gets the singleton instance of the LockManager
*
* @return The singleton instance of the LockManager
*/
public synchronized static LockManager getInstance() {
if (instance == null) {
instance = new LockManager();
}
public static LockManager getInstance() {
return instance;
}
@ -117,14 +120,15 @@ public class LockManager {
// extract the ParmIds from the request list
List<ParmID> parmIds = new ArrayList<ParmID>();
sr.addMessages(extractParmIds(request, parmIds, siteID));
try {
sr.setPayload(new ArrayList<LockTable>(new GFELockDao().getLocks(
parmIds, requestor).values()));
} catch (DataAccessLayerException e) {
sr.addMessages(extractParmIds(request, parmIds, siteID));
sr.setPayload(new ArrayList<LockTable>(dao.getLocks(parmIds,
requestor).values()));
} catch (Exception e) {
logger.error("Error getting lock tables for " + parmIds, e);
sr.addMessage("Error getting lock tables for " + parmIds);
sr.setPayload(new ArrayList<LockTable>());
sr.setPayload(new ArrayList<LockTable>(0));
}
return sr;
@ -143,9 +147,7 @@ public class LockManager {
*/
public ServerResponse<List<LockTable>> getLockTables(
LockTableRequest request, WsId wsId, String siteID) {
List<LockTableRequest> requests = new ArrayList<LockTableRequest>();
requests.add(request);
return getLockTables(requests, wsId, siteID);
return getLockTables(Arrays.asList(request), wsId, siteID);
}
public ServerResponse<List<LockTable>> requestLockChange(
@ -167,9 +169,8 @@ public class LockManager {
public ServerResponse<List<LockTable>> requestLockChange(
LockRequest request, WsId requestor, String siteID,
boolean combineLocks) throws GfeLockException {
List<LockRequest> requests = new ArrayList<LockRequest>();
requests.add(request);
return requestLockChange(requests, requestor, siteID, combineLocks);
return requestLockChange(Arrays.asList(request), requestor, siteID,
combineLocks);
}
public ServerResponse<List<LockTable>> requestLockChange(
@ -191,8 +192,8 @@ public class LockManager {
List<LockRequest> requests, WsId requestor, String siteID,
boolean combineLocks) {
List<LockTable> lockTablesAffected = new ArrayList<LockTable>();
List<GridUpdateNotification> gridUpdatesAffected = new ArrayList<GridUpdateNotification>();
List<LockTable> lockTablesAffected = new LinkedList<LockTable>();
List<GridUpdateNotification> gridUpdatesAffected = new LinkedList<GridUpdateNotification>();
ServerResponse<List<LockTable>> sr = new ServerResponse<List<LockTable>>();
sr.setPayload(lockTablesAffected);
@ -205,7 +206,7 @@ public class LockManager {
// expand the request as appropriate to the time boundary requirements
// and convert to all parm-type requests
List<LockRequest> req = new ArrayList<LockRequest>();
List<LockRequest> req = new ArrayList<LockRequest>(requests.size());
sr.addMessages(adjustLockToTimeBoundaries(requests, req));
if (!sr.isOkay()) {
@ -213,85 +214,87 @@ public class LockManager {
return sr;
}
// extract the ParmIds from the requests
List<ParmID> parmIds = new ArrayList<ParmID>();
sr.addMessages(extractParmIdsFromLockReq(req, parmIds, siteID));
// get the lock tables specific to the extracted parmIds
List<ParmID> parmIds = new LinkedList<ParmID>();
Map<ParmID, LockTable> lockTableMap;
try {
lockTableMap = new GFELockDao().getLocks(parmIds, requestor);
} catch (DataAccessLayerException e) {
// extract the ParmIds from the requests
sr.addMessages(extractParmIdsFromLockReq(req, parmIds, siteID));
// get the lock tables specific to the extracted parmIds
lockTableMap = dao.getLocks(parmIds, requestor);
} catch (Exception e) {
logger.error("Error getting lock tables for " + parmIds, e);
sr.addMessage("Error getting lock tables for " + parmIds);
return sr;
}
// process each modified lock request, these are all parm-type requests
ParmID currentParmId = null;
TimeRange currentTimeRange = null;
for (LockRequest currentRequest : req) {
currentParmId = currentRequest.getParmId();
currentTimeRange = currentRequest.getTimeRange();
// get table from sequence
LockTable lt = lockTableMap.get(currentParmId);
LockTable prevLT = lt.clone();
try {
// Change Lock
if (!changeLock(lt, currentRequest.getTimeRange(), requestor,
if (!changeLock(lt, currentTimeRange, requestor,
currentRequest.getMode(), combineLocks)) {
sr.addMessage("Requested change lock failed - Lock is owned by another user - "
+ currentRequest + " LockTable=" + lt);
lockTablesAffected.clear();
gridUpdatesAffected.clear();
return sr;
continue;
}
} catch (Exception e) {
logger.error("Error changing lock", e);
sr.addMessage("Requested change lock failed - Exception thrown - "
+ currentRequest
+ " LockTable="
+ lt
+ " Exception: "
+ e.getLocalizedMessage());
lockTablesAffected.clear();
gridUpdatesAffected.clear();
return sr;
continue;
}
// the change lock worked, but resulted in the same lock situation
if (prevLT.equals(lt)) {
// TODO: Equals not implemented, this is dead code due to clone
continue;
}
// add the lock table to the lockTablesAffected" if it already
// doesn't exist -- if it does exist, then replace it but don't add
// it if it really didn't change
// add the lock table to the lockTablesAffected if it already
// doesn't exist
LockTable tableToRemove = null;
for (int j = 0; j < lockTablesAffected.size(); j++) {
if (lockTablesAffected.get(j).getParmId().equals(currentParmId)) {
tableToRemove = lockTablesAffected.get(j);
boolean addTable = true;
for (LockTable ltAffected : lockTablesAffected) {
if (ltAffected.getParmId().equals(currentParmId)) {
addTable = false;
break;
}
}
if (tableToRemove != null) {
lockTablesAffected.remove(tableToRemove);
if (addTable) {
lockTablesAffected.add(lt);
}
lockTablesAffected.add(lt);
// assemble a grid update notification since the lock table has
// changed - IF this is BREAK LOCK request
if (currentRequest.getMode().equals(LockTable.LockMode.BREAK_LOCK)) {
// TODO: Should be able to do in a single look up that retrieves
// the histories that intersect the time ranges instead of the
// current two stage query
List<TimeRange> trs = new ArrayList<TimeRange>();
ServerResponse<List<TimeRange>> ssr = GridParmManager
.getGridInventory(currentParmId);
.getGridInventory(currentParmId, currentTimeRange);
sr.addMessages(ssr);
trs = ssr.getPayload();
if (!sr.isOkay()) {
lockTablesAffected.clear();
gridUpdatesAffected.clear();
return sr;
// unable to get payload, can't reverse the break lock, add
// a new message to our current response and keep going
sr.addMessages(ssr);
continue;
}
List<TimeRange> updatedGridsTR = new ArrayList<TimeRange>();
for (int p = 0; p < trs.size(); p++) {
if (trs.get(p).overlaps(currentRequest.getTimeRange())) {
@ -312,14 +315,17 @@ public class LockManager {
}
}
// if we made it here, then all lock requests were successful
for (int k = 0; k < lockTablesAffected.size(); k++) {
lockTablesAffected.get(k).resetWsId(requestor);
// update the lockTables that were successful
for (LockTable lt : lockTablesAffected) {
lt.resetWsId(requestor);
}
// process the break lock notifications that were successful
for (GridUpdateNotification notify : gridUpdatesAffected) {
sr.addNotifications(notify);
}
return sr;
}
@ -348,143 +354,161 @@ public class LockManager {
} else if (ls.equals(LockTable.LockStatus.LOCKED_BY_ME)) {
return true;
} else if (ls.equals(LockTable.LockStatus.LOCKABLE)) {
GFELockDao dao = new GFELockDao();
List<Lock> existingLocks = new ArrayList<Lock>();
try {
existingLocks = dao.getLocksInRange(lt.getParmId(),
timeRange);
if (existingLocks != null && !existingLocks.isEmpty()) {
for (Lock lock : existingLocks) {
dao.delete(lock);
lt.removeLock(lock);
}
}
Lock newLock = new Lock(lt.getParmId(), timeRange,
requestorId);
replaceLocks(lt, newLock, combineLocks);
} catch (DataAccessLayerException e) {
logger.error("Error changing locks", e);
logger.error("Error adding lock", e);
throw new GfeLockException("Unable add new lock", e);
}
Lock newLock = new Lock(timeRange, requestorId);
newLock.setParmId(lt.getParmId());
dao.persist(newLock);
try {
newLock = dao.getLock(newLock.getParmId(),
newLock.getTimeRange(), newLock.getWsId());
} catch (DataAccessLayerException e) {
throw new GfeLockException("Unable to update new lock", e);
}
lt.addLock(newLock);
}
if (combineLocks) {
combineLocks(lt);
}
}
else if (lockMode.equals(LockTable.LockMode.UNLOCK)) {
} else if (lockMode.equals(LockTable.LockMode.UNLOCK)) {
if (ls.equals(LockTable.LockStatus.LOCKED_BY_ME)) {
try {
GFELockDao dao = new GFELockDao();
Lock newLock = dao.getLock(lt.getParmId(), timeRange,
requestorId);
if (newLock != null) {
dao.delete(newLock);
lt.removeLock(newLock);
}
deleteLocks(lt, timeRange);
} catch (DataAccessLayerException e) {
throw new GfeLockException(
"Unable to retrieve lock information for: "
+ lt.getParmId() + " TimeRange: "
+ timeRange + " WorkstationID: "
+ requestorId);
throw new GfeLockException("Unable to delete locks for: "
+ lt.getParmId() + " TimeRange: " + timeRange
+ " WorkstationID: " + requestorId);
}
} else if (ls.equals(LockTable.LockStatus.LOCKED_BY_OTHER)) {
logger.warn("Lock for time range: " + timeRange
+ " already owned");
} else {
// Record already unlocked
}
}
else if (lockMode.equals(LockTable.LockMode.BREAK_LOCK)) {
} else if (lockMode.equals(LockTable.LockMode.BREAK_LOCK)) {
try {
GFELockDao dao = new GFELockDao();
Lock newLock = dao.getLock(lt.getParmId(), timeRange,
requestorId);
if (newLock != null) {
dao.delete(newLock);
lt.removeLock(newLock);
}
deleteLocks(lt, timeRange);
} catch (DataAccessLayerException e) {
throw new GfeLockException(
"Unable to retrieve lock information for: "
+ lt.getParmId() + " TimeRange: " + timeRange
+ " WorkstationID: " + requestorId);
throw new GfeLockException("Unable to delete locks for: "
+ lt.getParmId() + " TimeRange: " + timeRange
+ " WorkstationID: " + requestorId);
}
}
return true;
}
/**
* Examines the locks contained in a given lock table and combines locks if
* possible.
* Replaces locks in the given time range with the passed lock.
*
* @param lt
* The lock table to examine
* @param newLock
* The lock to add
* @throws GfeLockException
* If errors occur when updating the locks in the database
*/
private void combineLocks(final LockTable lt) throws GfeLockException {
private void replaceLocks(final LockTable lt, final Lock newLock,
boolean combineLocks) throws DataAccessLayerException {
// update the locks in memory
Set<Lock> added = new HashSet<Lock>();
Set<Lock> deleted = new HashSet<Lock>();
List<Lock> locks = null;
Lock currentLock = null;
Lock nextLock = null;
boolean lockCombined = true;
while (lockCombined) {
lockCombined = false;
lt.addLocks(added);
lt.removeLocks(deleted);
Collections.sort(lt.getLocks(), startTimeComparator);
locks = lt.getLocks();
for (int i = 0; i < locks.size() - 1; i++) {
currentLock = locks.get(i);
nextLock = locks.get(i + 1);
if (currentLock.getEndTime() >= nextLock.getStartTime()
&& currentLock.getWsId().equals(nextLock.getWsId())) {
lockCombined = true;
deleted.add(currentLock);
deleted.add(nextLock);
Lock newLock = new Lock(new TimeRange(
currentLock.getStartTime(), nextLock.getEndTime()),
lt.getWsId());
newLock.setParmId(lt.getParmId());
added.add(newLock);
List<Integer> removed = new ArrayList<Integer>();
List<Lock> locks = lt.getLocks();
Collections.sort(locks, startTimeComparator);
long start = newLock.getStartTime();
long end = newLock.getEndTime();
Iterator<Lock> iter = locks.iterator();
while (iter.hasNext()) {
Lock lock = iter.next();
if (start <= lock.getStartTime()) {
if (end >= lock.getEndTime()) {
removed.add(lock.getId());
iter.remove();
} else {
// list was sorted by start time, not need to go any further
break;
}
}
}
try {
new GFELockDao().updateCombinedLocks(deleted, added);
} catch (DataAccessLayerException e) {
throw new GfeLockException("Error combining locks", e);
added.add(newLock);
locks.add(newLock);
// if combineLocks, do the combine before storing the initial change to
// the db
if (combineLocks) {
Lock prevLock = null;
Lock currentLock = null;
Collections.sort(locks, startTimeComparator);
prevLock = locks.get(0);
for (int i = 1; i < locks.size(); i++) {
currentLock = locks.get(i);
if ((prevLock.getEndTime() >= currentLock.getStartTime())
&& prevLock.getWsId().equals(currentLock.getWsId())) {
// remove previous lock, checking if it was a new lock first
if (!added.remove(prevLock)) {
removed.add(prevLock.getId());
}
// remove currentLock, checking if it was a new lock first
if (!added.remove(currentLock)) {
removed.add(currentLock.getId());
}
locks.remove(i);
// replace prevLock with new Lock
prevLock = new Lock(lt.getParmId(), new TimeRange(
prevLock.getStartTime(), Math.max(
prevLock.getEndTime(),
currentLock.getEndTime())), lt.getWsId());
added.add(prevLock);
locks.set(i - 1, prevLock);
// keep current position
i--;
} else {
prevLock = currentLock;
}
}
}
// update the database
if (!added.isEmpty() || !removed.isEmpty()) {
dao.addRemoveLocks(added, removed);
}
}
/**
* Notification that one or more databases were deleted. No lock change
* notifications are generated since all clients will already know that the
* databases have been removed.
* Deletes locks in the given time range
*
* Asks the LockDatabase for the list of LockTables. Eliminate entries in
* LockTables that no longer should exist based on the sequence of database
* identifiers that were deleted. Restore the LockTables by passing them to
* the LockDatabase.
*
* @param deletions
* @param lt
* The lock table to examine
* @param tr
* The TimeRange to delete
* @throws GfeLockException
* If errors occur when updating the locks in the database
*/
public void databaseDeleted(List<DatabaseID> deletions) {
// TODO: Implement database deletion
private void deleteLocks(final LockTable lt, final TimeRange tr)
throws DataAccessLayerException {
// update the locks in memory
List<Lock> locks = lt.getLocks();
Collections.sort(locks, startTimeComparator);
long start = tr.getStart().getTime();
long end = tr.getEnd().getTime();
Iterator<Lock> iter = locks.iterator();
List<Integer> locksToRemove = new LinkedList<Integer>();
while (iter.hasNext()) {
Lock lock = iter.next();
if (start <= lock.getStartTime()) {
if (end >= lock.getEndTime()) {
locksToRemove.add(lock.getId());
iter.remove();
} else {
// list was sorted by start time, not need to go any further
break;
}
}
}
// update the database
dao.addRemoveLocks(null, locksToRemove);
}
/**
@ -520,10 +544,7 @@ public class LockManager {
}
for (LockRequest req : requestsOut) {
List<LockRequest> reqList = new ArrayList<LockRequest>();
reqList.add(req);
sr.addMessages(expandRequestToBoundary(reqList));
req = reqList.get(0);
sr.addMessages(expandRequestToBoundary(req));
if (!sr.isOkay()) {
return sr;
}
@ -596,27 +617,27 @@ public class LockManager {
* The lock requst
* @return
*/
private ServerResponse<?> expandRequestToBoundary(List<LockRequest> req) {
private ServerResponse<?> expandRequestToBoundary(LockRequest req) {
ServerResponse<?> sr = new ServerResponse<String>();
if (!req.get(0).isParmRequest()) {
if (!req.isParmRequest()) {
logger.error("Expected parm-type request in expandRequestToBoundary");
}
// If this is a break-lock request, then do not expand to time constrts
if (req.get(0).getMode().equals(LockTable.LockMode.BREAK_LOCK)) {
if (req.getMode().equals(LockTable.LockMode.BREAK_LOCK)) {
return sr;
}
// Only expand for request lock and unlock
DatabaseID dbid = req.get(0).getParmId().getDbId();
DatabaseID dbid = req.getParmId().getDbId();
TimeRange tr = null;
switch (dbid.getFormat()) {
case GRID:
ServerResponse<GridParmInfo> ssr = GridParmManager
.getGridParmInfo(req.get(0).getParmId());
.getGridParmInfo(req.getParmId());
GridParmInfo gpi = ssr.getPayload();
sr.addMessages(ssr);
if (!sr.isOkay()) {
@ -624,8 +645,7 @@ public class LockManager {
return sr;
}
// calculate the expanded time
tr = gpi.getTimeConstraints().expandTRToQuantum(
req.get(0).getTimeRange());
tr = gpi.getTimeConstraints().expandTRToQuantum(req.getTimeRange());
if (!tr.isValid()) {
sr.addMessage("Request does not match TimeConstraints "
+ gpi.getTimeConstraints() + " ParmReq: " + req);
@ -638,9 +658,7 @@ public class LockManager {
// Update the lock request
if (tr.isValid()) {
req.add(new LockRequest(req.get(0).getParmId(), tr, req.get(0)
.getMode()));
req.remove(0);
req.setTimeRange(tr);
} else {
sr.addMessage("TimeRange not valid in LockRequest");
}
@ -649,10 +667,10 @@ public class LockManager {
}
/**
* * Overloaded utility routine to extract the ParmIds for a given
* Overloaded utility routine to extract the ParmIds for a given
* SeqOf<LockTableRequest>. If the request is a parm request, the ParmId
* will be appended to the list ParmIds to be returned. If the request is a
* database request, all the ParmIds for the given databaseId wil be
* database request, all the ParmIds for the given databaseId will be
* appended to the list of ParmIds. If the request is neither a parm nor a
* database request, all the parmId for all the databaseIds are appended to
* the list of ParmIds to be returned.
@ -662,22 +680,23 @@ public class LockManager {
* @return The ParmIDs contained in the requests
* @throws GfeException
*/
private ServerResponse<?> extractParmIds(List<LockTableRequest> ltr,
List<ParmID> parmIds, String siteID) {
private ServerResponse<?> extractParmIds(List<LockTableRequest> ltrList,
List<ParmID> parmIds, String siteID) throws GfeException {
ServerResponse<?> sr = new ServerResponse<String>();
// process each request
for (int i = 0; i < ltr.size(); i++) {
if (ltr.get(i).isParmRequest()) {
for (LockTableRequest ltr : ltrList) {
if (ltr.isParmRequest()) {
ParmID parmId = ltr.getParmId();
// append parm (if not already in the list)
if (!parmIds.contains(ltr.get(i).getParmId())) {
parmIds.add(ltr.get(i).getParmId());
if (!parmIds.contains(parmId)) {
parmIds.add(GridParmManager.getDb(parmId.getDbId())
.getCachedParmID(parmId));
}
} else if (ltr.get(i).isDatabaseRequest()) {
} else if (ltr.isDatabaseRequest()) {
// get all the parmIds for that databaseId
List<ParmID> pids = GridParmManager.getParmList(
ltr.get(i).getDbId()).getPayload();
List<ParmID> pids = GridParmManager.getParmList(ltr.getDbId())
.getPayload();
for (ParmID id : pids) {
if (!parmIds.contains(id)) {
parmIds.add(id);
@ -696,7 +715,6 @@ public class LockManager {
}
}
}
// this only needs to be done once
}
}
return sr;
@ -719,23 +737,24 @@ public class LockManager {
* @throws GfeException
* If errors occur
*/
private ServerResponse<?> extractParmIdsFromLockReq(List<LockRequest> lr,
List<ParmID> parmIds, String siteID) {
private ServerResponse<?> extractParmIdsFromLockReq(List<LockRequest> lrs,
List<ParmID> parmIds, String siteID) throws GfeException {
ServerResponse<?> sr = new ServerResponse<String>();
// process each request
for (int i = 0; i < lr.size(); i++) {
if (lr.get(i).isParmRequest()) {
for (LockRequest lr : lrs) {
if (lr.isParmRequest()) {
ParmID parmId = lr.getParmId();
// append parm (if not already in the list)
if (!parmIds.contains(lr.get(i).getParmId())) {
parmIds.add(lr.get(i).getParmId());
if (!parmIds.contains(parmId)) {
parmIds.add(GridParmManager.getDb(parmId.getDbId())
.getCachedParmID(parmId));
}
} else if (lr.get(i).isDatabaseRequest()) {
List<ParmID> pids = new ArrayList<ParmID>();
} else if (lr.isDatabaseRequest()) {
ServerResponse<List<ParmID>> ssr = GridParmManager
.getParmList(lr.get(i).getDbId());
.getParmList(lr.getDbId());
sr.addMessages(ssr);
pids = ssr.getPayload();
List<ParmID> pids = ssr.getPayload();
if (!sr.isOkay()) {
return sr;
}
@ -755,11 +774,10 @@ public class LockManager {
return sr;
}
for (int j = 0; j < dbids.size(); j++) {
List<ParmID> pids = new ArrayList<ParmID>();
ServerResponse<List<ParmID>> ssr1 = GridParmManager
.getParmList(dbids.get(j));
sr.addMessages(ssr1);
pids = ssr1.getPayload();
List<ParmID> pids = ssr1.getPayload();
if (!sr.isOkay()) {
return sr;
}
@ -769,7 +787,6 @@ public class LockManager {
}
}
}
// this only needs to be done once
}
}
@ -786,17 +803,16 @@ public class LockManager {
*/
private ServerResponse<?> officialDbLockCheck(final List<LockRequest> req,
String siteID) {
ServerResponse<?> sr = new ServerResponse<String>();
Set<DatabaseID> official = null;
List<DatabaseID> official = new ArrayList<DatabaseID>();
List<DatabaseID> officialDbs = null;
try {
officialDbs = IFPServerConfigManager.getServerConfig(siteID)
.getOfficialDatabases();
List<DatabaseID> officialDbs = IFPServerConfigManager
.getServerConfig(siteID).getOfficialDatabases();
official = new HashSet<DatabaseID>(officialDbs.size(), 1);
for (int j = 0; j < officialDbs.size(); j++) {
official.add(officialDbs.get(j).stripModelTime());
for (DatabaseID offDbId : officialDbs) {
official.add(offDbId.stripModelTime());
}
} catch (GfeException e) {
sr.addMessage("Unable to get official databases from IFPServer config");
@ -806,15 +822,15 @@ public class LockManager {
}
// process each request - extracting out the databse id w/o modeltime
for (int i = 0; i < req.size(); i++) {
for (LockRequest lr : req) {
DatabaseID requestDB = null;
if (req.get(i).isDatabaseRequest()) {
requestDB = req.get(i).getDbId();
} else if (req.get(i).isParmRequest()) {
requestDB = req.get(i).getParmId().getDbId();
if (lr.isDatabaseRequest()) {
requestDB = lr.getDbId();
} else if (lr.isParmRequest()) {
requestDB = lr.getParmId().getDbId();
} else {
sr.addMessage("Invalid Lock Request (not parm or databse type): "
+ req.get(i));
+ lr);
return sr;
}
requestDB = requestDB.stripModelTime();
@ -822,7 +838,7 @@ public class LockManager {
// look for a match
if (official.contains(requestDB)) {
sr.addMessage("Request locks on official database not permitted. Request="
+ req.get(i));
+ lr);
return sr;
}

View file

@ -54,7 +54,7 @@ import com.raytheon.uf.edex.core.props.PropertiesFactory;
* Jul 17, 2009 #2590 njensen Multiple site support
* Jul 28, 2010 #6725 jdynina Manual init support
* Aug 27, 2010 #3688 wkwock Find model class for a model
*
* Aug 24, 2013 #1949 rjpeter Updated start up logic
* </pre>
*
* @author njensen
@ -63,7 +63,7 @@ import com.raytheon.uf.edex.core.props.PropertiesFactory;
public class SmartInitSrv {
private Map<Long, SmartInitScript> cachedInterpreters = new HashMap<Long, SmartInitScript>();
private final Map<Long, SmartInitScript> cachedInterpreters = new HashMap<Long, SmartInitScript>();
private static boolean enabled = true;
@ -84,7 +84,6 @@ public class SmartInitSrv {
thread.pendingInitMinTimeMillis = cfg.getPendingInitMinTimeMillis();
thread.runningInitTimeOutMillis = cfg.getRunningInitTimeOutMillis();
thread.threadSleepInterval = cfg.getThreadSleepInterval();
thread.initialDelay = cfg.getInitialDelay();
executor.execute(thread);
}
}
@ -99,21 +98,18 @@ public class SmartInitSrv {
private final transient Log logger = LogFactory.getLog(getClass());
private int initialDelay = 120000;
@Override
public void run() {
long curTime = System.currentTimeMillis();
while (!EDEXUtil.isRunning()
|| System.currentTimeMillis() - curTime < initialDelay) {
try {
Thread.sleep(threadSleepInterval);
} catch (Throwable t) {
// ignore
}
}
try {
// Wait for server to come fully up due to route dependencies
while (!EDEXUtil.isRunning()) {
try {
Thread.sleep(threadSleepInterval);
} catch (InterruptedException e) {
// ignore
}
}
// run forever
while (true) {
SmartInitRecord record = SmartInitTransactions
@ -170,7 +166,7 @@ public class SmartInitSrv {
LocalizationLevel.BASE);
File file = pathMgr.getFile(ctx, "smartinit");
if (file != null && file.exists()) {
if ((file != null) && file.exists()) {
initScript.addSitePath(file.getPath(), pathMgr
.getFile(baseCtx, "smartinit")
.getPath());
@ -178,7 +174,7 @@ public class SmartInitSrv {
}
file = pathMgr.getFile(ctx,
FileUtil.join("config", "gfe"));
if (file != null && file.exists()) {
if ((file != null) && file.exists()) {
initScript.addSitePath(
file.getPath(),
pathMgr.getFile(baseCtx,

View file

@ -30,7 +30,7 @@ import java.util.concurrent.Executor;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Sep 24, 2010 #7277 rjpeter Initial creation
*
* Apr 23, 2013 #1949 rjpeter Removed initial delay
* </pre>
*
* @author rjpeter
@ -48,8 +48,6 @@ public class SmartInitSrvConfig {
protected int threadSleepInterval;
protected int initialDelay;
public int getThreads() {
return threads;
}
@ -90,12 +88,4 @@ public class SmartInitSrvConfig {
this.threadSleepInterval = threadSleepInterval;
}
public int getInitialDelay() {
return initialDelay;
}
public void setInitialDelay(int initialDelay) {
this.initialDelay = initialDelay;
}
}

View file

@ -6,7 +6,6 @@ com.raytheon.uf.common.dataplugin.gfe.db.objects.ParmID
com.raytheon.uf.common.dataplugin.gfe.reference.ReferenceData
com.raytheon.uf.common.dataplugin.gfe.sample.SampleData
com.raytheon.uf.common.dataplugin.gfe.server.lock.Lock
com.raytheon.uf.common.dataplugin.gfe.server.lock.LockTable
com.raytheon.uf.common.dataplugin.gfe.server.message.ServerMsg
com.raytheon.uf.common.dataplugin.gfe.weatherelement.WEGroup
com.raytheon.uf.common.dataplugin.gfe.weatherelement.WEItem

View file

@ -0,0 +1,21 @@
CREATE OR REPLACE VIEW gfe_view AS
SELECT db.siteid, db.modelname, db.dbtype, db.modeltime, parm.parmname,
parm.parmlevel, rec.rangestart, rec.rangeend, rec.inserttime
FROM gfe rec, gfe_parmid parm, gfe_dbid db
WHERE rec.parmid_id = parm.id AND parm.dbid_id = db.id;
ALTER TABLE gfe_view
OWNER TO awips;
GRANT ALL ON TABLE gfe_view TO awips;
GRANT SELECT ON TABLE gfe_view TO public;
CREATE OR REPLACE VIEW gfe_locks_view AS
SELECT db.siteid, db.modelname, db.dbtype, db.modeltime, parm.parmname,
parm.parmlevel, lk.starttime, lk.endtime, lk.wsid
FROM gfe_locks lk, gfe_parmid parm, gfe_dbid db
WHERE lk.parmid_id = parm.id AND parm.dbid_id = db.id;
ALTER TABLE gfe_locks_view
OWNER TO awips;
GRANT ALL ON TABLE gfe_locks_view TO awips;
GRANT SELECT ON TABLE gfe_locks_view TO public;

View file

@ -1,24 +0,0 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
CREATE INDEX "gfeParmTime_idx"
ON gfe
USING btree
(parmid, rangestart, rangeend);

View file

@ -20,21 +20,30 @@
package com.raytheon.uf.common.dataplugin.gfe;
import java.io.Serializable;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Embedded;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
import javax.persistence.FetchType;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.PrimaryKeyJoinColumn;
import javax.persistence.SequenceGenerator;
import javax.persistence.Table;
import org.hibernate.annotations.Index;
import org.hibernate.annotations.Type;
import com.raytheon.edex.util.Util;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.GFERecord;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.ParmID;
import com.raytheon.uf.common.message.WsId;
import com.raytheon.uf.common.serialization.ISerializableObject;
import com.raytheon.uf.common.serialization.annotations.DynamicSerialize;
import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
import com.raytheon.uf.common.time.TimeRange;
@ -49,22 +58,26 @@ import com.raytheon.uf.common.time.TimeRange;
* randerso Initial creation
* 02/27/2008 879 rbell Added clone()
* 04/18/2008 #875 bphillip Changed date fields to use java.util.Calendar
*
* 03/28/2013 1949 rjpeter Normalized database structure.
* </pre>
*
* @author randerso
* @version 1.0
*/
@Entity(name = "gfe_gridhistory")
@Entity
@Table(name = "gfe_gridhistory")
@DynamicSerialize
public class GridDataHistory implements Cloneable, Serializable,
ISerializableObject {
public class GridDataHistory implements Cloneable {
private static final long serialVersionUID = 1L;
/**
* Auto-generated surrogate key
*/
@Id
@GeneratedValue()
private int key;
@SequenceGenerator(name = "GFE_HISTORY_GENERATOR", sequenceName = "gfe_history_seq", allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "GFE_HISTORY_GENERATOR")
private int id;
public enum OriginType {
INITIALIZED("Populated"), TIME_INTERPOLATED("Interpolated"), SCRATCH(
@ -89,10 +102,13 @@ public class GridDataHistory implements Cloneable, Serializable,
};
@Column
@Type(type = "com.raytheon.uf.common.dataplugin.gfe.db.type.OriginHibType")
@Enumerated(value = EnumType.STRING)
@DynamicSerializeElement
private OriginType origin;
// DO NOT LINK TO PARMID TABLE. The ParmId table may be purged out
// independent of the history of a forecast grid. Need to keep the history
// of where the grid came from.
@Column
@Type(type = "com.raytheon.uf.common.dataplugin.gfe.db.type.ParmIdType")
@DynamicSerializeElement
@ -123,6 +139,16 @@ public class GridDataHistory implements Cloneable, Serializable,
@DynamicSerializeElement
private Date lastSentTime;
/**
* Used only for hibernate mappings to allow a look up of GridDataHistory by
* a given parmId/timeRange. Do not set cascade options.
*/
@ManyToOne(fetch = FetchType.LAZY, optional = false)
@PrimaryKeyJoinColumn
@JoinColumn(updatable = false)
@Index(name = "gfe_gridhistory_history_idx")
private GFERecord parent;
/**
* Default constructor (all fields initialized null)
*/
@ -452,16 +478,16 @@ public class GridDataHistory implements Cloneable, Serializable,
/**
* @return the key
*/
public int getKey() {
return key;
public int getId() {
return id;
}
/**
* @param key
* the key to set
*/
public void setKey(int key) {
this.key = key;
public void setId(int id) {
this.id = id;
}
public void replaceValues(GridDataHistory replacement) {
@ -573,4 +599,19 @@ public class GridDataHistory implements Cloneable, Serializable,
}
return true;
}
/**
* @return the parent
*/
public GFERecord getParent() {
return parent;
}
/**
* @param parent
* the parent to set
*/
public void setParent(GFERecord parent) {
this.parent = parent;
}
}

View file

@ -20,25 +20,37 @@
package com.raytheon.uf.common.dataplugin.gfe.db.objects;
import java.io.Serializable;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.Set;
import java.util.TimeZone;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
import javax.persistence.FetchType;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.OneToMany;
import javax.persistence.SequenceGenerator;
import javax.persistence.Table;
import javax.persistence.Transient;
import javax.persistence.UniqueConstraint;
import org.hibernate.annotations.OnDelete;
import org.hibernate.annotations.OnDeleteAction;
import com.raytheon.uf.common.dataplugin.annotations.DataURI;
import com.raytheon.uf.common.dataplugin.gfe.serialize.DatabaseIDAdapter;
import com.raytheon.uf.common.serialization.ISerializableObject;
import com.raytheon.uf.common.serialization.annotations.DynamicSerialize;
import com.raytheon.uf.common.serialization.annotations.DynamicSerializeTypeAdapter;
/**
*
* Object used to identify an hdf5 grid "database".<br>
* This is a port from original DatabaseID found in AWIPS I
*
@ -47,23 +59,22 @@ import com.raytheon.uf.common.serialization.annotations.DynamicSerializeTypeAdap
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* 3/6/08 875 bphillip Initial Creation
* 8/19/09 2899 njensen Rewrote equals() for performance
* 5/08/12 #600 dgilling Implement clone().
* 6/25/12 #766 dgilling Fix isValid().
* 01/18/13 #1504 randerso Removed setters since class should be immutable
*
* 8/19/09 2899 njensen Rewrote equals() for performance
* 5/08/12 600 dgilling Implement clone().
* 6/25/12 766 dgilling Fix isValid().
* 01/18/13 1504 randerso Removed setters since class should be immutable
* 03/28/13 1949 rjpeter Normalized database structure.
* </pre>
*
* @author bphillip
* @version 1.0
*/
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
@XmlJavaTypeAdapter(value = DatabaseIDAdapter.class)
@Entity
@Table(name = "gfe_dbid", uniqueConstraints = { @UniqueConstraint(columnNames = {
"siteId", "modelName", "modelTime", "dbType" }) })
@DynamicSerialize
@DynamicSerializeTypeAdapter(factory = DatabaseIDAdapter.class)
public class DatabaseID implements Serializable, Comparable<DatabaseID>,
ISerializableObject, Cloneable {
public class DatabaseID implements Comparable<DatabaseID> {
private static final long serialVersionUID = 5792890762609478694L;
@ -92,27 +103,59 @@ public class DatabaseID implements Serializable, Comparable<DatabaseID>,
NONE, GRID
};
/**
* Auto-generated surrogate key
*/
@Id
@SequenceGenerator(name = "GFE_DBID_GENERATOR", sequenceName = "gfe_dbid_seq")
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "GFE_DBID_GENERATOR")
private int id;
/** The site identifier */
@Column(length = 4, nullable = false)
@DataURI(position = 0)
private String siteId;
/** The database format */
@Column(nullable = false)
@Enumerated(EnumType.STRING)
// will this ever not be GRID for any persisted database?
private DataType format;
/** Optional database type */
@Column(length = 15)
@DataURI(position = 3)
private String dbType;
/** The model name */
@Column(length = 64, nullable = false)
@DataURI(position = 1)
private String modelName;
// TODO: Use actual time for db column
/** Model Time yyyymmdd_hhmm */
private String modelTime;
@Column(length = 13, nullable = false)
@DataURI(position = 2)
private String modelTime = NO_MODEL_TIME;
/** The model identifier */
@Transient
private String modelId;
/** The short model identifier */
@Transient
private String shortModelId;
/**
* Used only for hibernate mappings to allow a cascade delete to all child
* parmIds when the databaseId is deleted. These should not be loaded by or
* referenced normally from code from this object.
*/
@OneToMany(fetch = FetchType.LAZY, mappedBy = "dbId", cascade = { CascadeType.REMOVE })
@OnDelete(action = OnDeleteAction.CASCADE)
@SuppressWarnings("unused")
private Set<ParmID> parmIds;
/**
* Creates a new DatabaseID
*/
@ -126,9 +169,7 @@ public class DatabaseID implements Serializable, Comparable<DatabaseID>,
* @param dbIdentifier
*/
public DatabaseID(String dbIdentifier) {
if (decodeIdentifier(dbIdentifier)) {
encodeIdentifier();
} else {
if (!decodeIdentifier(dbIdentifier)) {
// set to default values
format = DataType.NONE;
dbType = "";
@ -181,7 +222,6 @@ public class DatabaseID implements Serializable, Comparable<DatabaseID>,
this.dbType = dbType;
this.modelName = modelName;
this.modelTime = modelTime;
encodeIdentifier();
}
/**
@ -202,6 +242,15 @@ public class DatabaseID implements Serializable, Comparable<DatabaseID>,
this(siteId, format, dbType, modelName, NO_MODEL_TIME);
}
/**
* Returns the id field, auto-generated surrogate key.
*
* @return
*/
public int getId() {
return id;
}
/**
* The less than operator. Ordering is by site, format, type, model, and
* modeltime. <br>
@ -269,7 +318,7 @@ public class DatabaseID implements Serializable, Comparable<DatabaseID>,
*/
public DatabaseID stripType() {
if (modelTime.equals(DatabaseID.NO_MODEL_TIME)) {
if (NO_MODEL_TIME.equals(modelTime)) {
return new DatabaseID(siteId, format, "", modelName);
} else {
return new DatabaseID(siteId, format, "", modelName, modelTime);
@ -366,7 +415,7 @@ public class DatabaseID implements Serializable, Comparable<DatabaseID>,
shortModelId += "_" + dbType;
}
if (!modelTime.equals(NO_MODEL_TIME)) {
if (!NO_MODEL_TIME.equals(modelTime)) {
modelId += "_" + modelTime;
shortModelId += "_" + modelTime.substring(6, 8)
+ modelTime.substring(9, 11);
@ -379,7 +428,7 @@ public class DatabaseID implements Serializable, Comparable<DatabaseID>,
@Override
public String toString() {
return modelId;
return getModelId();
}
/**
@ -426,6 +475,10 @@ public class DatabaseID implements Serializable, Comparable<DatabaseID>,
*/
public String getModelId() {
if (modelId == null) {
encodeIdentifier();
}
return modelId;
}
@ -434,6 +487,10 @@ public class DatabaseID implements Serializable, Comparable<DatabaseID>,
*/
public String getShortModelId() {
if (shortModelId == null) {
encodeIdentifier();
}
return shortModelId;
}
@ -448,7 +505,9 @@ public class DatabaseID implements Serializable, Comparable<DatabaseID>,
int result = 1;
result = prime * result + (dbType == null ? 0 : dbType.hashCode());
result = prime * result + (format == null ? 0 : format.hashCode());
result = prime * result + (modelId == null ? 0 : modelId.hashCode());
String localModelId = getModelId();
result = prime * result
+ (localModelId == null ? 0 : localModelId.hashCode());
result = prime * result
+ (modelTime == null ? 0 : modelTime.hashCode());
result = prime * result + (siteId == null ? 0 : siteId.hashCode());
@ -488,6 +547,7 @@ public class DatabaseID implements Serializable, Comparable<DatabaseID>,
return date;
}
// TODO: DELETE THIS METHOD
public Date getModelTimeAsDate() {
if (this.modelTime.equals(NO_MODEL_TIME)) {
return new Date(0);
@ -538,16 +598,4 @@ public class DatabaseID implements Serializable, Comparable<DatabaseID>,
int time = -this.getModelTimeAsDate().compareTo(o.getModelTimeAsDate());
return time;
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#clone()
*/
@Override
protected DatabaseID clone() throws CloneNotSupportedException {
return new DatabaseID(this.siteId, this.format, this.dbType,
this.modelName, this.modelTime);
}
}

View file

@ -28,40 +28,35 @@ import java.util.Calendar;
import java.util.List;
import java.util.TimeZone;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.OneToMany;
import javax.persistence.OrderBy;
import javax.persistence.PrimaryKeyJoinColumn;
import javax.persistence.SequenceGenerator;
import javax.persistence.Table;
import javax.persistence.Transient;
import javax.persistence.UniqueConstraint;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import org.hibernate.annotations.Cache;
import org.hibernate.annotations.CacheConcurrencyStrategy;
import org.hibernate.annotations.BatchSize;
import org.hibernate.annotations.Index;
import org.hibernate.annotations.Type;
import org.hibernate.annotations.OnDelete;
import org.hibernate.annotations.OnDeleteAction;
import com.raytheon.uf.common.dataplugin.IDecoderGettable;
import com.raytheon.uf.common.dataplugin.PluginDataObject;
import com.raytheon.uf.common.dataplugin.annotations.DataURI;
import com.raytheon.uf.common.dataplugin.gfe.GridDataHistory;
import com.raytheon.uf.common.dataplugin.gfe.util.GfeUtil;
import com.raytheon.uf.common.serialization.annotations.DynamicSerialize;
import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
import com.raytheon.uf.common.time.DataTime;
import com.raytheon.uf.common.time.TimeRange;
/**
* Record implementation for GFE plugin.
* Record implementation for GFE plugin. Record is essentially read only and
* should never need to be updated. GridDataHistory referenced by record may
* update.
*
* <pre>
*
@ -70,37 +65,30 @@ import com.raytheon.uf.common.time.TimeRange;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* -------- --- randerso Initial creation
* 20070914 379 jkorman Added populateDataStore() and
* 20070914 379 jkorman Added populateDataStore() and
* getPersistenceTime() from new IPersistable
* 20071129 472 jkorman Added IDecoderGettable interface.
* 06/17/08 #940 bphillip Implemented GFE Locking
* Apr 4, 2013 1846 bkowal Added an index on refTime and forecastTime
* Apr 12, 2013 1857 bgonzale Added SequenceGenerator annotation.
* 20071129 472 jkorman Added IDecoderGettable interface.
* 06/17/08 940 bphillip Implemented GFE Locking
* Apr 4, 2013 1846 bkowal Added an index on refTime and forecastTime
* Apr 12, 2013 1857 bgonzale Added SequenceGenerator annotation.
* Apr 23, 2013 1949 rjpeter Normalized database structure.
* </pre>
*
* @author randerso
* @version 1
*/
/**
*
*/
@Entity
@SequenceGenerator(initialValue = 1, name = PluginDataObject.ID_GEN, sequenceName = "gfeseq")
@Table(name = "gfe", uniqueConstraints = { @UniqueConstraint(columnNames = { "dataURI" }) })
@Table(name = "gfe", uniqueConstraints = { @UniqueConstraint(columnNames = {
"parmId_id", "rangestart", "rangeend", "refTime", "forecasttime" }) })
/*
* Both refTime and forecastTime are included in the refTimeIndex since
* forecastTime is unlikely to be used.
*/
@org.hibernate.annotations.Table(
appliesTo = "gfe",
indexes = {
@Index(name = "gfe_refTimeIndex", columnNames = { "refTime", "forecastTime" } )
}
)
@Cache(usage = CacheConcurrencyStrategy.TRANSACTIONAL)
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
@org.hibernate.annotations.Table(appliesTo = "gfe", indexes = { @Index(name = "gfe_refTimeIndex", columnNames = {
"refTime", "forecastTime" }) })
@DynamicSerialize
@BatchSize(size = 500)
public class GFERecord extends PluginDataObject {
private static final long serialVersionUID = 1L;
@ -110,59 +98,41 @@ public class GFERecord extends PluginDataObject {
NONE, SCALAR, VECTOR, WEATHER, DISCRETE
};
/** The name of the parm parameter */
@Column(length = 100)
@XmlAttribute
@DynamicSerializeElement
private String parmName;
/** The name of the parm level */
@Column(length = 8)
@XmlAttribute
@DynamicSerializeElement
private String parmLevel;
/**
* The parmID of the associated parm.<br>
* This field is constructed when the getter is called.<br>
* It is constructed from the parmName, parmLevel, and the databaseID
*/
@DataURI(position = 1)
@Column
@Type(type = "com.raytheon.uf.common.dataplugin.gfe.db.type.ParmIdType")
@XmlElement
@DataURI(position = 1, embedded = true)
@ManyToOne(fetch = FetchType.EAGER, optional = false)
@PrimaryKeyJoinColumn
@DynamicSerializeElement
private ParmID parmId;
/**
* The database associated with this record
*/
@DataURI(position = 2)
@Column
@Type(type = "com.raytheon.uf.common.dataplugin.gfe.db.type.DatabaseIdType")
@XmlElement
@DynamicSerializeElement
private DatabaseID dbId;
/** The grid parm information associated with this parameter */
@Transient
private GridParmInfo gridInfo;
@OneToMany(cascade = CascadeType.ALL, fetch = FetchType.EAGER, orphanRemoval = true)
@JoinColumn(name = "parent", nullable = false)
@Index(name = "gfe_gridhistory_history_idx")
@OrderBy("key")
@XmlElement
/**
* GridHistories for this record. Only cascade on remove. Insert/Update
* managed independently.
*/
@OneToMany(fetch = FetchType.EAGER, mappedBy = "parent")
@BatchSize(size = 500)
@OnDelete(action = OnDeleteAction.CASCADE)
@OrderBy("id")
@DynamicSerializeElement
private List<GridDataHistory> gridHistory = new ArrayList<GridDataHistory>(
0);
private List<GridDataHistory> gridHistory = null;
/**
* Histories to remove when updated
*/
@Transient
private List<GridDataHistory> oldHistory = null;
/**
* Creates a new empty GFERecord. Must use setters to fill in private fields
* or the object is invalid.
*/
public GFERecord() {
}
/**
@ -180,13 +150,14 @@ public class GFERecord extends PluginDataObject {
cal.setTime(timeRange.getStart());
this.dataTime = new DataTime(cal, timeRange);
this.parmId = parmId;
this.parmName = parmId.getParmName();
this.parmLevel = parmId.getParmLevel();
this.dbId = parmId.getDbId();
}
public void addHistoryEntry(GridDataHistory historyEntry) {
if (gridHistory == null) {
gridHistory = new ArrayList<GridDataHistory>(1);
}
gridHistory.add(historyEntry);
historyEntry.setParent(this);
}
/**
@ -226,27 +197,15 @@ public class GFERecord extends PluginDataObject {
}
public DatabaseID getDbId() {
return dbId;
}
public void setDbId(DatabaseID dbId) {
this.dbId = dbId;
return parmId.getDbId();
}
public String getParmName() {
return parmName;
}
public void setParmName(String parmName) {
this.parmName = parmName;
return parmId.getParmName();
}
public String getParmLevel() {
return parmLevel;
}
public void setParmLevel(String parmLevel) {
this.parmLevel = parmLevel;
return parmId.getParmLevel();
}
public GridParmInfo getGridInfo() {
@ -261,14 +220,14 @@ public class GFERecord extends PluginDataObject {
return this.dataTime.getValidPeriod();
}
public String getGridHistoryStrings() {
return GfeUtil.getHistoryStrings(this.gridHistory);
}
/**
* @return the gridHistory
*/
public List<GridDataHistory> getGridHistory() {
if (gridHistory == null) {
gridHistory = new ArrayList<GridDataHistory>(0);
}
return gridHistory;
}
@ -278,6 +237,41 @@ public class GFERecord extends PluginDataObject {
*/
public void setGridHistory(List<GridDataHistory> gridHistory) {
this.gridHistory = gridHistory;
if (gridHistory != null) {
for (GridDataHistory hist : gridHistory) {
hist.setParent(this);
}
}
}
/**
* @return the oldHistory
*/
public List<GridDataHistory> getOldHistory() {
return oldHistory;
}
public void consolidateHistory(List<GridDataHistory> newHistory) {
for (int i = 0; i < newHistory.size(); i++) {
if (i < gridHistory.size()) {
gridHistory.get(i).replaceValues(newHistory.get(i));
} else {
GridDataHistory hist = newHistory.get(i);
hist.setParent(this);
gridHistory.add(hist);
}
}
if (gridHistory.size() > newHistory.size()) {
if (oldHistory == null) {
oldHistory = new ArrayList<GridDataHistory>(
gridHistory.subList(newHistory.size(),
gridHistory.size()));
}
for (int i = newHistory.size(); i < gridHistory.size(); i++) {
gridHistory.remove(i);
}
}
}
}

View file

@ -20,19 +20,33 @@
package com.raytheon.uf.common.dataplugin.gfe.db.objects;
import java.io.Serializable;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Set;
import java.util.TimeZone;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.ManyToOne;
import javax.persistence.OneToMany;
import javax.persistence.PrimaryKeyJoinColumn;
import javax.persistence.SequenceGenerator;
import javax.persistence.Table;
import javax.persistence.Transient;
import javax.persistence.UniqueConstraint;
import org.hibernate.annotations.OnDelete;
import org.hibernate.annotations.OnDeleteAction;
import com.raytheon.edex.util.Util;
import com.raytheon.uf.common.dataplugin.annotations.DataURI;
import com.raytheon.uf.common.dataplugin.gfe.serialize.ParmIDAdapter;
import com.raytheon.uf.common.serialization.ISerializableObject;
import com.raytheon.uf.common.dataplugin.gfe.server.lock.Lock;
import com.raytheon.uf.common.serialization.annotations.DynamicSerialize;
import com.raytheon.uf.common.serialization.annotations.DynamicSerializeTypeAdapter;
@ -47,66 +61,92 @@ import com.raytheon.uf.common.serialization.annotations.DynamicSerializeTypeAdap
* ------------ ---------- ----------- --------------------------
* 3/6/08 875 bphillip Initial Creation
* 5/8/12 #600 dgilling Implement clone().
* 01/18/13 #1504 randerso Removed setters since class should be immutable
*
* 01/18/13 1504 randerso Removed setters since class should be immutable
* 03/28/13 1949 rjpeter Normalized database structure.
* </pre>
*
* @author bphillip
* @version 1.0
*/
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
@XmlJavaTypeAdapter(value = ParmIDAdapter.class)
@Entity
@Table(name = "gfe_parmid", uniqueConstraints = { @UniqueConstraint(columnNames = {
"dbId_id", "parmName", "parmLevel" }) })
@DynamicSerialize
@DynamicSerializeTypeAdapter(factory = ParmIDAdapter.class)
public class ParmID implements Comparable<ParmID>, Serializable,
ISerializableObject, Cloneable {
public class ParmID implements Comparable<ParmID> {
private static final long serialVersionUID = 6801523496768037356L;
private static final String DEFAULT_LEVEL = "SFC";
private static final SimpleDateFormat MODEL_TIME_FORMAT;
static {
MODEL_TIME_FORMAT = new SimpleDateFormat("MMMddHH");
MODEL_TIME_FORMAT.setTimeZone(TimeZone.getTimeZone("GMT"));
}
/**
* Auto-generated surrogate key
*/
@Id
@SequenceGenerator(name = "GFE_PARMID_GENERATOR", sequenceName = "gfe_parmid_seq")
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "GFE_PARMID_GENERATOR")
private int id;
/** The name of the parm (i.e. T for Temperature) */
@Column(length = 100)
@DataURI(position = 1)
private String parmName;
/** The level at which this parm applies */
@Column(length = 8)
@DataURI(position = 2)
private String parmLevel;
/** The database that this parm ID is associated with */
@ManyToOne(fetch = FetchType.EAGER, optional = false)
@PrimaryKeyJoinColumn
@DataURI(position = 0, embedded = true)
private DatabaseID dbId;
/**
* The parameter name/level information <br>
* Example: T_SFC (Temperature parameter and surface level)
*/
@Transient
private String compositeName;
/** The parmID including the parameter, level and database ID */
@Transient
private String shortParmId;
/** A more extended version of the parameter ID */
@Transient
private String parmId;
/**
* Used only for hibernate mappings to allow a cascade delete to all records
* when the databaseId is deleted
*/
@OneToMany(fetch = FetchType.LAZY, mappedBy = "parmId", cascade = { CascadeType.REMOVE })
@OnDelete(action = OnDeleteAction.CASCADE)
@SuppressWarnings("unused")
private Set<GFERecord> records;
/**
* Used only for hibernate mappings to allow a cascade delete to all locks
* when the databaseId is deleted
*/
@OneToMany(fetch = FetchType.LAZY, mappedBy = "parmId", cascade = { CascadeType.REMOVE })
@OnDelete(action = OnDeleteAction.CASCADE)
@SuppressWarnings("unused")
private Set<Lock> locks;
@Override
public String toString() {
return this.parmId;
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#clone()
*/
@Override
public ParmID clone() throws CloneNotSupportedException {
return new ParmID(this.parmName, this.dbId.clone(), this.parmLevel);
return getParmId();
}
/**
@ -120,7 +160,6 @@ public class ParmID implements Comparable<ParmID>, Serializable,
}
public ParmID() {
}
/**
@ -132,11 +171,9 @@ public class ParmID implements Comparable<ParmID>, Serializable,
* The model name
*/
public ParmID(String parmName, String parmModel) {
this.parmName = parmName;
this.parmLevel = defaultLevel();
this.dbId = new DatabaseID(parmModel);
encodeIdentifier();
}
/**
@ -153,7 +190,6 @@ public class ParmID implements Comparable<ParmID>, Serializable,
this.parmName = parmName;
this.parmLevel = level;
this.dbId = new DatabaseID(parmModel);
encodeIdentifier();
}
/**
@ -164,7 +200,6 @@ public class ParmID implements Comparable<ParmID>, Serializable,
*/
public ParmID(String parmIdentifier) {
decodeIdentifier(parmIdentifier);
encodeIdentifier();
}
/**
@ -179,7 +214,6 @@ public class ParmID implements Comparable<ParmID>, Serializable,
this.parmName = parmName;
this.parmLevel = defaultLevel();
this.dbId = dbId;
encodeIdentifier();
}
/**
@ -196,7 +230,15 @@ public class ParmID implements Comparable<ParmID>, Serializable,
this.parmName = parmName;
this.parmLevel = level;
this.dbId = dbId;
encodeIdentifier();
}
/**
* Returns the id field, auto-generated surrogate key.
*
* @return
*/
public int getId() {
return id;
}
/**
@ -211,7 +253,7 @@ public class ParmID implements Comparable<ParmID>, Serializable,
if (DEFAULT_LEVEL.equals(parmLevel)) {
return parmName;
}
return this.compositeName;
return getCompositeName();
}
/**
@ -251,18 +293,20 @@ public class ParmID implements Comparable<ParmID>, Serializable,
if (DEFAULT_LEVEL.equals(this.parmLevel)) {
expressionName = parmName;
} else {
expressionName = compositeName;
expressionName = getCompositeName();
}
} else {
expressionName = compositeName + "_" + dbID.getSiteId() + "_"
expressionName = getCompositeName() + "_" + dbID.getSiteId() + "_"
+ dbID.getDbType() + "_" + dbID.getModelName();
if (includeTime) {
Date modelDate = dbID.getModelDate();
if (modelDate == null) {
expressionName += "_00000000_0000";
} else {
expressionName += "_"
+ MODEL_TIME_FORMAT.format(dbID.getModelDate());
synchronized (MODEL_TIME_FORMAT) {
expressionName += "_"
+ MODEL_TIME_FORMAT.format(dbID.getModelDate());
}
}
}
}
@ -279,11 +323,12 @@ public class ParmID implements Comparable<ParmID>, Serializable,
*/
public boolean isValid() {
if (parmName == null || parmLevel == null || dbId == null) {
if ((parmName == null) || (parmLevel == null) || (dbId == null)) {
return false;
}
if (parmName.length() < 1 || parmLevel.length() < 1 || !dbId.isValid()) {
if ((parmName.length() < 1) || (parmLevel.length() < 1)
|| !dbId.isValid()) {
return false;
}
@ -358,6 +403,10 @@ public class ParmID implements Comparable<ParmID>, Serializable,
*/
public String getCompositeName() {
if (compositeName == null) {
encodeIdentifier();
}
return compositeName;
}
@ -369,6 +418,7 @@ public class ParmID implements Comparable<ParmID>, Serializable,
if (shortParmId == null) {
encodeIdentifier();
}
return shortParmId;
}
@ -377,6 +427,10 @@ public class ParmID implements Comparable<ParmID>, Serializable,
*/
public String getParmId() {
if (parmId == null) {
encodeIdentifier();
}
return parmId;
}
@ -387,7 +441,7 @@ public class ParmID implements Comparable<ParmID>, Serializable,
*/
@Override
public int hashCode() {
return parmId.hashCode();
return getParmId().hashCode();
}
/*
@ -463,8 +517,8 @@ public class ParmID implements Comparable<ParmID>, Serializable,
* @param parmID
* @return
*/
public static String shortSerializer(ParmID parmID) {
return parmID.toString();
public static String shortSerializer(ParmID localParmID) {
return localParmID.toString();
}
/**

View file

@ -0,0 +1,95 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.common.dataplugin.gfe.exception;
/**
* GFE Exception for requesting a parm that isn't known to the system.
*
* <pre>
* SOFTWARE HISTORY
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Apr 08, 2013 1949 rjpeter Initial Creation
* </pre>
*
* @author rjpeter
* @version 1.0
*/
public class UnknownParmIdException extends GfeException {
private static final long serialVersionUID = 1L;
/**
* Constructs a new runtime exception with <code>null</code> as its detail
* message. The cause is not initialized, and may subsequently be
* initialized by a call to {@link #initCause}.
*/
public UnknownParmIdException() {
super();
}
/**
* Constructs a new exception with the specified detail message. The cause
* is not initialized, and may subsequently be initialized by a call to
* {@link #initCause}.
*
* @param message
* the detail message. The detail message is saved for later
* retrieval by the {@link #getMessage()} method.
*/
public UnknownParmIdException(String aCause) {
super(aCause);
}
/**
* Parser exception set with a cause and an existing exception. Used for
* exception chaining to preserve state.
*
* @param aCause
* the detail message (which is saved for later retrieval by the
* {@link #getMessage()} method).
* @param anException
* the cause (which is saved for later retrieval by the
* {@link #getCause()} method). (A <tt>null</tt> value is
* permitted, and indicates that the cause is nonexistent or
* unknown.)
*/
public UnknownParmIdException(String aCause, Throwable anException) {
super(aCause, anException);
}
/**
* Constructs a new exception with the specified cause and a detail message
* of <tt>(cause==null ? null : cause.toString())</tt> (which typically
* contains the class and detail message of <tt>cause</tt>). This
* constructor is useful for exceptions that are little more than wrappers
* for other throwables.
*
* @param cause
* the cause (which is saved for later retrieval by the
* {@link #getCause()} method). (A <tt>null</tt> value is
* permitted, and indicates that the cause is nonexistent or
* unknown.)
*/
public UnknownParmIdException(Throwable anException) {
super(anException);
}
}

View file

@ -1,79 +0,0 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.common.dataplugin.gfe.request;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.DatabaseID;
/**
* Request object for getting the latest insert time for a given database ID
*
* <pre>
*
* SOFTWARE HISTORY
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* 8/16/2010 6349 bphillip Initial creation
*
* </pre>
*
* @author bphillip
* @version 1.0
*/
public class GetLatestDbTimeRequest extends AbstractGfeRequest {
/** The database ID to get the latest insert time for */
private DatabaseID dbId;
/**
* Creates a new GetLatestDbTimeRequest
*
* @param dbId
* The database ID to get the latest insert time for
*/
public GetLatestDbTimeRequest(DatabaseID dbId) {
this.dbId = dbId;
}
/**
* Creates a new GetLatestDbTimeRequest
*
* @param dbId
* The database ID to get the latest insert time for
*/
public GetLatestDbTimeRequest(String dbId) {
this.dbId = new DatabaseID(dbId);
}
/**
* @return the dbId
*/
public DatabaseID getDbId() {
return dbId;
}
/**
* @param dbId
* the dbId to set
*/
public void setDbId(DatabaseID dbId) {
this.dbId = dbId;
}
}

View file

@ -1,95 +0,0 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.common.dataplugin.gfe.request;
/**
* Request object for getting the latest database ID for a given model name and
* site ID.
*
* <pre>
*
* SOFTWARE HISTORY
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Aug 17, 2010 dgilling Initial creation
*
* </pre>
*
* @author dgilling
* @version 1.0
*/
public class GetLatestModelDbIdRequest extends AbstractGfeRequest {
/**
* The site identifier to perform the request for.
*/
private String siteId;
/**
* The model name to perform the request for.
*/
private String modelName;
/**
* Creates a new GetLatestModelDbIdRequest object given a model name and
* site identifier.
*
* @param siteId
* The site identifier to search for.
* @param modelName
* The name of the model to search for.
*/
public GetLatestModelDbIdRequest(String siteId, String modelName) {
this.siteId = siteId;
this.modelName = modelName;
}
/**
* @return the siteId
*/
public String getSiteId() {
return siteId;
}
/**
* @param siteId
* the siteId to set
*/
public void setSiteId(String siteId) {
this.siteId = siteId;
}
/**
* @return the modelName
*/
public String getModelName() {
return modelName;
}
/**
* @param modelName
* the modelName to set
*/
public void setModelName(String modelName) {
this.modelName = modelName;
}
}

View file

@ -0,0 +1,125 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.common.dataplugin.gfe.serialize;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.ParmID;
import com.raytheon.uf.common.dataplugin.gfe.server.lock.Lock;
import com.raytheon.uf.common.dataplugin.gfe.server.lock.LockTable;
import com.raytheon.uf.common.message.WsId;
import com.raytheon.uf.common.serialization.IDeserializationContext;
import com.raytheon.uf.common.serialization.ISerializationContext;
import com.raytheon.uf.common.serialization.ISerializationTypeAdapter;
import com.raytheon.uf.common.serialization.SerializationException;
/**
* SerializeAdapter for LockTable. Strips out parmId and common WsIds to reduce
* serialization size and reflection calls.
*
* <pre>
* SOFTWARE HISTORY
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Apr 02, 2013 1949 rjpeter Initial creation
* </pre>
*
* @author rjpeter
* @version 1.0
*/
public class LockTableAdapter implements ISerializationTypeAdapter<LockTable> {
/*
* (non-Javadoc)
*
* @see
* com.raytheon.uf.common.serialization.ISerializationTypeAdapter#serialize
* (com.raytheon.uf.common.serialization.ISerializationContext,
* java.lang.Object)
*/
@Override
public void serialize(ISerializationContext serializer, LockTable lockTable)
throws SerializationException {
// get all the unique WsIds
int index = 0;
// use linked hash so wsIds stay in order
LinkedHashMap<WsId, Integer> wsIds = new LinkedHashMap<WsId, Integer>();
wsIds.put(lockTable.getWsId(), index++);
List<Lock> locks = lockTable.getLocks();
WsId curWsId = null;
for (Lock lock : locks) {
curWsId = lock.getWsId();
if (!wsIds.containsKey(curWsId)) {
wsIds.put(curWsId, index++);
}
}
// write the parm
serializer.writeObject(lockTable.getParmId());
// write the unique wsIds
serializer.writeI32(index);
for (WsId id : wsIds.keySet()) {
serializer.writeObject(id);
}
serializer.writeI32(locks.size());
for (Lock lock : locks) {
serializer.writeI64(lock.getStartTime());
serializer.writeI64(lock.getEndTime());
serializer.writeI32(wsIds.get(lock.getWsId()));
}
}
/*
* (non-Javadoc)
*
* @see
* com.raytheon.uf.common.serialization.ISerializationTypeAdapter#deserialize
* (com.raytheon.uf.common.serialization.IDeserializationContext)
*/
@Override
public LockTable deserialize(IDeserializationContext deserializer)
throws SerializationException {
ParmID parmId = (ParmID) deserializer.readObject();
int numWsIds = deserializer.readI32();
WsId[] ids = new WsId[numWsIds];
for (int i = 0; i < numWsIds; i++) {
ids[i] = (WsId) deserializer.readObject();
}
int numLocks = deserializer.readI32();
List<Lock> locks = new ArrayList<Lock>(numLocks);
long startDate = 0;
long endDate = 0;
int wsIdIndex = 0;
for (int i = 0; i < numLocks; i++) {
startDate = deserializer.readI64();
endDate = deserializer.readI64();
wsIdIndex = deserializer.readI32();
locks.add(new Lock(parmId, ids[wsIdIndex], startDate, endDate));
}
return new LockTable(parmId, locks, ids[0]);
}
}

View file

@ -20,31 +20,30 @@
package com.raytheon.uf.common.dataplugin.gfe.server.lock;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.ManyToOne;
import javax.persistence.PrimaryKeyJoinColumn;
import javax.persistence.SequenceGenerator;
import javax.persistence.Table;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlElement;
import javax.persistence.Transient;
import javax.persistence.UniqueConstraint;
import org.hibernate.annotations.Cache;
import org.hibernate.annotations.CacheConcurrencyStrategy;
import org.hibernate.annotations.Index;
import org.hibernate.annotations.Type;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.ParmID;
import com.raytheon.uf.common.dataplugin.persist.PersistableDataObject;
import com.raytheon.uf.common.message.WsId;
import com.raytheon.uf.common.serialization.ISerializableObject;
import com.raytheon.uf.common.serialization.annotations.DynamicSerialize;
import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
import com.raytheon.uf.common.time.TimeRange;
/**
* Represents a lock on a record
* Represents a lock on a record. Class is Immutable and is only serializable as
* part of a LockTable.
*
* <pre>
* SOFTWARE HISTORY
@ -52,57 +51,56 @@ import com.raytheon.uf.common.time.TimeRange;
* ------------ ---------- ----------- --------------------------
* 04/08/08 #875 bphillip Initial Creation
* 06/17/08 #940 bphillip Implemented GFE Locking
*
* 03/28/13 1949 rjpeter Normalized database structure, made immutable.
* </pre>
*
* @author bphillip
* @version 1.0
*/
@Entity
@Table(name = "gfelocktable")
@Cache(usage = CacheConcurrencyStrategy.TRANSACTIONAL)
@XmlAccessorType(XmlAccessType.NONE)
@DynamicSerialize
public class Lock extends PersistableDataObject implements Cloneable,
ISerializableObject {
@Table(name = "gfe_locks", uniqueConstraints = { @UniqueConstraint(columnNames = {
"parmId_id", "startTime", "endTime" }) })
public class Lock {
private static final long serialVersionUID = -7839912817664285509L;
/** The key for the database */
/**
* Auto-generated surrogate key
*/
@Id
@GeneratedValue
private int key;
@SequenceGenerator(name = "GFE_DBID_GENERATOR", sequenceName = "gfe_lock_seq")
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "GFE_DBID_GENERATOR")
private int id;
/** The parmID of the lock */
@Column
@Type(type = "com.raytheon.uf.common.dataplugin.gfe.db.type.ParmIdType")
@XmlElement
@Index(name = "lock_parmId_idx")
@DynamicSerializeElement
/**
* The parmID of the lock.
*/
@ManyToOne(fetch = FetchType.EAGER, optional = false)
@PrimaryKeyJoinColumn
private ParmID parmId;
/** The workstationID of the lock holder */
@Column
@Column(nullable = false)
@Type(type = "com.raytheon.uf.common.dataplugin.gfe.db.type.WsIdType")
@XmlElement
@DynamicSerializeElement
private WsId wsId;
/** The start time of the lock */
@Column
@XmlAttribute
@DynamicSerializeElement
private long startTime;
/** The end time of the lock */
@Column
@XmlAttribute
@DynamicSerializeElement
private long endTime;
/**
* Used as the hibernate field so that Database has a human readable field.
*/
@Column(name = "startTime", nullable = false)
private Date startDate;
/**
* Creates a new Lock. Use of this constructor is discouraged. It is used by
* JiBX
* Used as the hibernate field so that Database has a human readable field.
*/
@Column(name = "endTime", nullable = false)
private Date endDate;
@Transient
private transient TimeRange tr;
/**
* Creates a new Lock. Use of this constructor is discouraged.
*/
public Lock() {
@ -111,72 +109,74 @@ public class Lock extends PersistableDataObject implements Cloneable,
/**
* Creates a new Lock
*
* @param parmId
* The parmID of the lock.
* @param timeRange
* The time range over which the lock applies
* @param wsId
* The workstation ID of the lock owner
*/
public Lock(TimeRange timeRange, WsId wsId) {
this.startTime = timeRange.getStart().getTime();
this.endTime = timeRange.getEnd().getTime();
public Lock(ParmID parmId, TimeRange timeRange, WsId wsId) {
this.parmId = parmId;
this.startDate = new Date(timeRange.getStart().getTime());
this.endDate = new Date(timeRange.getEnd().getTime());
this.wsId = wsId;
}
/**
* Creates a new Lock
*
* @param parmId
* The parmID of the lock.
* @param timeRange
* The time range over which the lock applies
* @param wsId
* The workstation ID of the lock owner
*/
public Lock(ParmID parmId, WsId wsId, long startTime, long endTime) {
this.parmId = parmId;
this.wsId = wsId;
this.startDate = new Date(startTime);
this.endDate = new Date(endTime);
}
public WsId getWsId() {
return wsId;
}
public void setWsId(WsId wsId) {
this.wsId = wsId;
}
public TimeRange getTimeRange() {
return new TimeRange(startTime, endTime);
}
if (tr == null) {
tr = new TimeRange(startDate, endDate);
}
public void setTimeRange(TimeRange timeRange) {
this.startTime = timeRange.getStart().getTime();
this.endTime = timeRange.getEnd().getTime();
return tr;
}
public ParmID getParmId() {
return parmId;
}
public void setParmId(ParmID parmId) {
this.parmId = parmId;
}
public int getKey() {
return key;
}
public void setKey(int key) {
this.key = key;
public int getId() {
return id;
}
public long getStartTime() {
return startTime;
}
public void setStartTime(long startTime) {
this.startTime = startTime;
return startDate.getTime();
}
public long getEndTime() {
return endTime;
return endDate.getTime();
}
public void setEndTime(long endTime) {
this.endTime = endTime;
public Date getStartDate() {
return startDate;
}
public Date getEndDate() {
return endDate;
}
@Override
public Lock clone() {
Lock newLock = new Lock(this.getTimeRange(), wsId);
return newLock;
}
public String toString() {
StringBuffer buffer = new StringBuffer();
buffer.append("TR: ");
@ -185,5 +185,4 @@ public class Lock extends PersistableDataObject implements Cloneable,
buffer.append(this.wsId.toPrettyString());
return buffer.toString();
}
}

View file

@ -24,15 +24,12 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.ParmID;
import com.raytheon.uf.common.dataplugin.gfe.serialize.LockTableAdapter;
import com.raytheon.uf.common.message.WsId;
import com.raytheon.uf.common.serialization.ISerializableObject;
import com.raytheon.uf.common.serialization.annotations.DynamicSerialize;
import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
import com.raytheon.uf.common.serialization.annotations.DynamicSerializeTypeAdapter;
import com.raytheon.uf.common.time.TimeRange;
/**
@ -44,15 +41,15 @@ import com.raytheon.uf.common.time.TimeRange;
* ------------ ---------- ----------- --------------------------
* 04/08/08 #875 bphillip Initial Creation
* 06/17/08 #940 bphillip Implemented GFE Locking
*
* 04/23/13 #1949 rjpeter Added serialization adapter
* </pre>
*
* @author bphillip
* @version 1.0
*/
@XmlAccessorType(XmlAccessType.NONE)
@DynamicSerialize
@DynamicSerializeTypeAdapter(factory = LockTableAdapter.class)
public class LockTable implements Cloneable, ISerializableObject {
/** Enumeration denoting status of the lock */
@ -66,18 +63,12 @@ public class LockTable implements Cloneable, ISerializableObject {
};
/** List of locks contained in this lock table */
@XmlElement
@DynamicSerializeElement
private List<Lock> locks;
/** The workstation ID of the owner of this lock table */
@XmlElement
@DynamicSerializeElement
private WsId wsId;
/** The parm ID for which this lock table holds lock information for */
@XmlElement
@DynamicSerializeElement
private ParmID parmId;
/**
@ -215,35 +206,20 @@ public class LockTable implements Cloneable, ISerializableObject {
* @return The lock status of the specified time range
*/
public LockStatus checkLock(TimeRange timeRange, WsId requestorId) {
for (int i = 0; i < locks.size(); i++) {
if (timeRange.overlaps(locks.get(i).getTimeRange())) {
if (!requestorId.equals(locks.get(i).getWsId())) {
for (Lock lock : locks) {
TimeRange tr = lock.getTimeRange();
if (timeRange.overlaps(tr)) {
if (!requestorId.equals(lock.getWsId())) {
return LockStatus.LOCKED_BY_OTHER;
} else if (locks.get(i).getTimeRange()
.contains(timeRange.getStart())
&& (locks.get(i).getTimeRange().getEnd()
.after(timeRange.getEnd()) || locks.get(i)
.getTimeRange().getEnd()
.equals(timeRange.getEnd()))) {
} else if (tr.contains(timeRange.getStart())
&& (tr.getEnd().after(timeRange.getEnd()) || tr
.getEnd().equals(timeRange.getEnd()))) {
return LockStatus.LOCKED_BY_ME;
}
}
}
return LockStatus.LOCKABLE;
}
public void removeLock(Lock lockToRemove) {
int removeIndex = -1;
for (Lock lock : locks) {
if (lock.getKey() == lockToRemove.getKey()) {
removeIndex = locks.indexOf(lock);
}
}
if (removeIndex != -1) {
locks.remove(removeIndex);
}
return LockStatus.LOCKABLE;
}
public void removeLocks(Collection<Lock> locksToRemove) {
@ -294,13 +270,9 @@ public class LockTable implements Cloneable, ISerializableObject {
@Override
public LockTable clone() {
LockTable newTable = new LockTable();
newTable.setWsId(this.wsId);
newTable.setParmId(this.parmId);
for (Lock lock : locks) {
newTable.addLock(lock.clone());
}
return newTable;
// locks are immutable so this is safe
List<Lock> lockList = new ArrayList<Lock>(locks);
return new LockTable(this.parmId, lockList, this.wsId);
}
public static LockMode getLockMode(String modeName) {

View file

@ -27,6 +27,7 @@ import com.raytheon.uf.common.dataplugin.gfe.server.notify.GfeNotification;
import com.raytheon.uf.common.serialization.ISerializableObject;
import com.raytheon.uf.common.serialization.annotations.DynamicSerialize;
import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
import com.raytheon.uf.common.util.CollectionUtil;
/**
* Encapsulates messages sent from the server to the client.
@ -36,7 +37,7 @@ import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* 06/24/08 #875 bphillip Initial Creation
*
* 04/24/13 #1949 rjpeter Create lists on demand
* </pre>
*
* @author bphillip
@ -47,14 +48,14 @@ public class ServerResponse<T> implements ISerializableObject {
/** Messages indicating an error */
@DynamicSerializeElement
private ArrayList<ServerMsg> messages = new ArrayList<ServerMsg>();
private List<ServerMsg> messages = null;
/** List of return objects from uEngine tasks */
@DynamicSerializeElement
private T payload;
@DynamicSerializeElement
private List<GfeNotification> notifications = new ArrayList<GfeNotification>();
private List<GfeNotification> notifications = null;
/**
* Constructs and empty ServerResponse
@ -64,10 +65,14 @@ public class ServerResponse<T> implements ISerializableObject {
}
public boolean isOkay() {
return messages.isEmpty();
return (messages == null) || messages.isEmpty();
}
public void addMessage(String message) {
if (messages == null) {
messages = new ArrayList<ServerMsg>();
}
messages.add(new ServerMsg(message));
}
@ -78,11 +83,21 @@ public class ServerResponse<T> implements ISerializableObject {
* The ServerResponse to add
*/
public void addMessages(ServerResponse<?> ssr) {
for (ServerMsg message : ssr.getMessages()) {
messages.add(message);
List<ServerMsg> ssrMsgs = ssr.getMessages();
if (!CollectionUtil.isNullOrEmpty(ssrMsgs)) {
if (messages == null) {
messages = new ArrayList<ServerMsg>(ssrMsgs.size());
}
messages.addAll(ssrMsgs);
}
for (GfeNotification notify : ssr.getNotifications()) {
notifications.add(notify);
List<GfeNotification> ssrNotifs = ssr.getNotifications();
if (!CollectionUtil.isNullOrEmpty(ssrNotifs)) {
if (notifications == null) {
notifications = new ArrayList<GfeNotification>(ssrNotifs.size());
}
notifications.addAll(ssrNotifs);
}
}
@ -91,7 +106,11 @@ public class ServerResponse<T> implements ISerializableObject {
*
* @return The messages
*/
public ArrayList<ServerMsg> getMessages() {
public List<ServerMsg> getMessages() {
if (messages == null) {
messages = new ArrayList<ServerMsg>(0);
}
return messages;
}
@ -100,7 +119,7 @@ public class ServerResponse<T> implements ISerializableObject {
*
* @param messages
*/
public void setMessages(ArrayList<ServerMsg> messages) {
public void setMessages(List<ServerMsg> messages) {
this.messages = messages;
}
@ -112,7 +131,7 @@ public class ServerResponse<T> implements ISerializableObject {
public String message() {
if (!isOkay()) {
StringBuffer buf = new StringBuffer();
for (ServerMsg message : messages) {
for (ServerMsg message : getMessages()) {
buf.append(message);
buf.append("\n");
}
@ -122,6 +141,7 @@ public class ServerResponse<T> implements ISerializableObject {
}
}
@Override
public String toString() {
return message();
}
@ -135,6 +155,10 @@ public class ServerResponse<T> implements ISerializableObject {
}
public List<GfeNotification> getNotifications() {
if (notifications == null) {
notifications = new ArrayList<GfeNotification>(0);
}
return notifications;
}
@ -143,6 +167,10 @@ public class ServerResponse<T> implements ISerializableObject {
}
public void addNotifications(GfeNotification notify) {
this.notifications.add(notify);
if (notifications == null) {
notifications = new ArrayList<GfeNotification>();
}
notifications.add(notify);
}
}

View file

@ -54,6 +54,7 @@ import com.raytheon.uf.common.time.TimeRange;
* 01/30/2008 chammack Stubbed-out class based on AWIPS I
* 02/22/2008 879 rbell Legacy conversion, extended ScalarSlice
* 06/10/2009 2159 rjpeter Updated checkDims to check dirGrid for null
* 04/23/2013 1949 rjpeter Updated wind checks to keep float precision.
* </pre>
*
* @author chammack
@ -66,6 +67,8 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
private static final transient IUFStatusHandler statusHandler = UFStatus
.getHandler(VectorGridSlice.class);
private static final float DEG_IN_CIRCLE = 360.0f;
@DynamicSerializeElement
protected Grid2DFloat dirGrid;
@ -170,7 +173,7 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
* @return dirGrid
*/
public Grid2DFloat getDirGrid() {
if (useCache && dirCacheId != null) {
if (useCache && (dirCacheId != null)) {
try {
@SuppressWarnings("unchecked")
ICache<IGrid2D> diskCache = CacheFactory.getInstance()
@ -232,8 +235,8 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
Grid2DFloat rhsDirGrid = ((VectorGridSlice) rhs).getDirGrid();
if (rhsDirGrid != null) {
if (thisDirGrid.getXdim() != rhsDirGrid.getXdim()
|| thisDirGrid.getYdim() != rhsDirGrid.getYdim()) {
if ((thisDirGrid.getXdim() != rhsDirGrid.getXdim())
|| (thisDirGrid.getYdim() != rhsDirGrid.getYdim())) {
throw new IllegalArgumentException(
"Supplied grid is not of same dimension");
}
@ -276,7 +279,7 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
}
Grid2DFloat dGrid = getDirGrid();
if (dGrid == null || !dGrid.isValid()) {
if ((dGrid == null) || !dGrid.isValid()) {
return "Direction grid is invalid";
}
@ -357,15 +360,13 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
+ v.get(i, j) * v.get(i, j)));
float dir = (float) Math.toDegrees(Math.atan2(u.get(i, j),
v.get(i, j)));
if (dir < 0) {
dir += 360.0;
while (dir < 0.0f) {
dir += DEG_IN_CIRCLE;
}
if (dir >= 360.0) {
dir -= 360.0;
}
if (dir == 360.0) {
dir = 360.0f;
while (dir >= DEG_IN_CIRCLE) {
dir -= DEG_IN_CIRCLE;
}
dirGrid.set(i, j, dir);
}
}
@ -404,15 +405,13 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
+ v.get(i, j) * v.get(i, j)));
float dir = (float) Math.toDegrees(Math.atan2(u.get(i, j),
v.get(i, j)));
if (dir < 0) {
dir += 360.0;
while (dir < 0.0f) {
dir += DEG_IN_CIRCLE;
}
if (dir >= 360.0) {
dir -= 360.0;
}
if (dir == 360.0) {
dir = 360.0f;
while (dir >= DEG_IN_CIRCLE) {
dir -= DEG_IN_CIRCLE;
}
dirGrid.set(i, j, dir);
}
}
@ -475,8 +474,8 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
Grid2DFloat mGrid = getMagGrid();
Grid2DFloat dGrid = getDirGrid();
if (mGrid.getXdim() != dGrid.getXdim()
|| mGrid.getYdim() != dGrid.getYdim()) {
if ((mGrid.getXdim() != dGrid.getXdim())
|| (mGrid.getYdim() != dGrid.getYdim())) {
return "Magnitude and Direction grids have different dimensions";
}
@ -497,13 +496,10 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
for (int i = 0; i < size; i++) {
float thisDir = dir.get(i);
while (thisDir < 0.0f) {
thisDir += 360.0f;
thisDir += DEG_IN_CIRCLE;
}
while (thisDir > 360.0f) {
thisDir -= 360.0f;
}
if (thisDir == 360.0f) {
thisDir = 0.0f;
while (thisDir >= DEG_IN_CIRCLE) {
thisDir -= DEG_IN_CIRCLE;
}
dir.put(i, thisDir);
}
@ -518,15 +514,15 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
public ScalarGridSlice verticalMotion(VectorGridSlice gs, Grid2DBit editArea) {
Grid2DFloat mGrid = getMagGrid();
if (mGrid.getXdim() != editArea.getXdim()
|| mGrid.getYdim() != editArea.getYdim()) {
if ((mGrid.getXdim() != editArea.getXdim())
|| (mGrid.getYdim() != editArea.getYdim())) {
throw new IllegalArgumentException(
"This and editArea grids have different dimensions");
}
Grid2DFloat gsmGrid = gs.getMagGrid();
if (mGrid.getXdim() != gsmGrid.getXdim()
|| mGrid.getYdim() != gsmGrid.getYdim()) {
if ((mGrid.getXdim() != gsmGrid.getXdim())
|| (mGrid.getYdim() != gsmGrid.getYdim())) {
throw new IllegalArgumentException(
"This and supplied grids have different dimensions");
}
@ -766,8 +762,8 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
Grid2DFloat thisMagGrid = getMagGrid();
Grid2DFloat rhsMagGrid = rhs.getMagGrid();
if (thisMagGrid.getXdim() != rhsMagGrid.getXdim()
|| thisMagGrid.getYdim() != rhsMagGrid.getYdim()) {
if ((thisMagGrid.getXdim() != rhsMagGrid.getXdim())
|| (thisMagGrid.getYdim() != rhsMagGrid.getYdim())) {
throw new IllegalArgumentException(
"This and supplied GridSlice are different dimensions");
}
@ -815,8 +811,8 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
Grid2DFloat thisMagGrid = getMagGrid();
Grid2DFloat rhsMagGrid = rhs.getMagGrid();
if (thisMagGrid.getXdim() != rhsMagGrid.getXdim()
|| thisMagGrid.getYdim() != rhsMagGrid.getYdim()) {
if ((thisMagGrid.getXdim() != rhsMagGrid.getXdim())
|| (thisMagGrid.getYdim() != rhsMagGrid.getYdim())) {
throw new IllegalArgumentException(
"This and supplied GridSlice are different dimensions");
}
@ -864,7 +860,7 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
Grid2DBit magBits = new Grid2DBit(mGrid.getXdim(), mGrid.getYdim());
Grid2DBit dirBits = new Grid2DBit(mGrid.getXdim(), mGrid.getYdim());
if (mag != 0 || (dir == 0 && mag == 0)) { // Test Magnitude
if ((mag != 0) || ((dir == 0) && (mag == 0))) { // Test Magnitude
for (int i = 0; i < mGrid.getXdim(); i++) {
for (int j = 0; j < mGrid.getYdim(); j++) {
if (mGrid.get(i, j) == mag) {
@ -888,8 +884,8 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
if (cross360) {
for (int i = 0; i < mGrid.getXdim(); i++) {
for (int j = 0; j < mGrid.getYdim(); j++) {
if (dGrid.get(i, j) >= lower
|| dGrid.get(i, j) <= upper) {
if ((dGrid.get(i, j) >= lower)
|| (dGrid.get(i, j) <= upper)) {
dirBits.set(i, j);
}
}
@ -897,15 +893,15 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
} else {
for (int i = 0; i < mGrid.getXdim(); i++) {
for (int j = 0; j < mGrid.getYdim(); j++) {
if (dGrid.get(i, j) >= lower
&& dGrid.get(i, j) <= upper) {
if ((dGrid.get(i, j) >= lower)
&& (dGrid.get(i, j) <= upper)) {
dirBits.set(i, j);
}
}
}
}
}
if (mag != 0 && dir != 0) {
if ((mag != 0) && (dir != 0)) {
// "AND" magnitude and direction
bits = magBits.and(dirBits);
} else if (dir != 0) {
@ -935,7 +931,7 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
Grid2DBit magBits = new Grid2DBit(mGrid.getXdim(), mGrid.getYdim());
Grid2DBit dirBits = new Grid2DBit(mGrid.getXdim(), mGrid.getYdim());
if (mag != 0 || (dir == 0 && mag == 0)) { // Test Magnitude
if ((mag != 0) || ((dir == 0) && (mag == 0))) { // Test Magnitude
for (int i = 0; i < mGrid.getXdim(); i++) {
for (int j = 0; j < mGrid.getYdim(); j++) {
if (mGrid.get(i, j) > mag) {
@ -953,7 +949,7 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
}
}
}
if (mag != 0 && dir != 0) {
if ((mag != 0) && (dir != 0)) {
// "AND" magnitude and direction
bits = magBits.and(dirBits);
} else if (dir != 0) {
@ -977,7 +973,7 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
Grid2DBit magBits = new Grid2DBit(mGrid.getXdim(), mGrid.getYdim());
Grid2DBit dirBits = new Grid2DBit(mGrid.getXdim(), mGrid.getYdim());
if (mag != 0 || (dir == 0 && mag == 0)) { // Test Magnitude
if ((mag != 0) || ((dir == 0) && (mag == 0))) { // Test Magnitude
for (int i = 0; i < mGrid.getXdim(); i++) {
for (int j = 0; j < mGrid.getYdim(); j++) {
if (mGrid.get(i, j) >= mag) {
@ -995,7 +991,7 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
}
}
}
if (mag != 0 && dir != 0) {
if ((mag != 0) && (dir != 0)) {
// "AND" magnitude and direction
bits = magBits.and(dirBits);
} else if (dir != 0) {
@ -1019,7 +1015,7 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
Grid2DBit magBits = new Grid2DBit(mGrid.getXdim(), mGrid.getYdim());
Grid2DBit dirBits = new Grid2DBit(mGrid.getXdim(), mGrid.getYdim());
if (mag != 0 || (dir == 0 && mag == 0)) { // Test Magnitude
if ((mag != 0) || ((dir == 0) && (mag == 0))) { // Test Magnitude
for (int i = 0; i < mGrid.getXdim(); i++) {
for (int j = 0; j < mGrid.getYdim(); j++) {
if (mGrid.get(i, j) < mag) {
@ -1037,7 +1033,7 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
}
}
}
if (mag != 0 && dir != 0) {
if ((mag != 0) && (dir != 0)) {
// "AND" magnitude and direction
bits = magBits.and(dirBits);
} else if (dir != 0) {
@ -1061,7 +1057,7 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
Grid2DBit magBits = new Grid2DBit(mGrid.getXdim(), mGrid.getYdim());
Grid2DBit dirBits = new Grid2DBit(mGrid.getXdim(), mGrid.getYdim());
if (mag != 0 || (dir == 0 && mag == 0)) { // Test Magnitude
if ((mag != 0) || ((dir == 0) && (mag == 0))) { // Test Magnitude
for (int i = 0; i < mGrid.getXdim(); i++) {
for (int j = 0; j < mGrid.getYdim(); j++) {
if (mGrid.get(i, j) <= mag) {
@ -1079,7 +1075,7 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
}
}
}
if (mag != 0 && dir != 0) {
if ((mag != 0) && (dir != 0)) {
// "AND" magnitude and direction
bits = magBits.and(dirBits);
} else if (dir != 0) {
@ -1124,7 +1120,7 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
Grid2DBit magBits = new Grid2DBit(mGrid.getXdim(), mGrid.getYdim());
Grid2DBit dirBits = new Grid2DBit(mGrid.getXdim(), mGrid.getYdim());
if (mag != 0 || (dir == 0 && mag == 0)) { // Test Magnitude
if ((mag != 0) || ((dir == 0) && (mag == 0))) { // Test Magnitude
for (int i = 0; i < mGrid.getXdim(); i++) {
for (int j = 0; j < mGrid.getYdim(); j++) {
if (Math.abs(mGrid.get(i, j) - mag) <= fuzz) {
@ -1148,8 +1144,8 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
if (cross360) {
for (int i = 0; i < mGrid.getXdim(); i++) {
for (int j = 0; j < mGrid.getYdim(); j++) {
if (dGrid.get(i, j) >= lower
|| dGrid.get(i, j) <= upper) {
if ((dGrid.get(i, j) >= lower)
|| (dGrid.get(i, j) <= upper)) {
dirBits.set(i, j);
}
}
@ -1157,15 +1153,15 @@ public class VectorGridSlice extends ScalarGridSlice implements Cloneable,
} else {
for (int i = 0; i < mGrid.getXdim(); i++) {
for (int j = 0; j < mGrid.getYdim(); j++) {
if (dGrid.get(i, j) >= lower
&& dGrid.get(i, j) <= upper) {
if ((dGrid.get(i, j) >= lower)
&& (dGrid.get(i, j) <= upper)) {
dirBits.set(i, j);
}
}
}
}
}
if (mag != 0 && dir != 0) {
if ((mag != 0) && (dir != 0)) {
// "AND" magnitude and direction
bits = magBits.and(dirBits);
} else if (dir != 0) {

View file

@ -41,7 +41,6 @@ import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlElement;
import org.apache.commons.beanutils.PropertyUtils;
import org.hibernate.annotations.Index;
import com.raytheon.uf.common.dataplugin.annotations.DataURI;
import com.raytheon.uf.common.dataplugin.annotations.DataURIUtil;
@ -314,9 +313,9 @@ public abstract class PluginDataObject extends PersistableDataObject implements
Field currentField = null;
String currentUriToken = null;
for (int i = 0; i < dataURIFields.length; i++) {
for (Field dataURIField : dataURIFields) {
currentUriToken = uriTokens[uriIndex];
currentField = dataURIFields[i];
currentField = dataURIField;
if (currentField.getAnnotation(DataURI.class).embedded()) {
// The current dataURI token refers to a field in an embedded
@ -484,7 +483,6 @@ public abstract class PluginDataObject extends PersistableDataObject implements
return dataTime;
}
@Index(name = "dataURI_idx")
public String getDataURI() {
return this.dataURI;
}
@ -493,7 +491,6 @@ public abstract class PluginDataObject extends PersistableDataObject implements
return SerializationUtil.marshalToXml(this);
}
@Index(name = "insertTimeIndex")
public Calendar getInsertTime() {
return insertTime;
}

View file

@ -56,7 +56,7 @@ import com.raytheon.uf.common.time.util.TimeUtil;
* duration is 0, use TimeUtil constants.
* 04/04/2013 #1787 randerso Removed a bunch of isValid checks to the logic
* works as intended by the original A1 implementation.
*
* 04/24/2013 1949 rjpeter Updated clone to deep copy by millis.
* </pre>
*
* <B>Original Documentation:</B>
@ -260,10 +260,10 @@ public class TimeRange implements Serializable, Comparable<TimeRange>,
if (days > 0) {
sb.append(days + "d ");
}
if (hours > 0 || min > 0 || sec > 0) {
if ((hours > 0) || (min > 0) || (sec > 0)) {
sb.append(hours + "h ");
}
if (min > 0 || sec > 0) {
if ((min > 0) || (sec > 0)) {
sb.append(min + "m ");
}
if (sec > 0) {
@ -340,7 +340,7 @@ public class TimeRange implements Serializable, Comparable<TimeRange>,
*/
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof TimeRange)) {
if ((obj == null) || !(obj instanceof TimeRange)) {
return false;
}
@ -368,8 +368,8 @@ public class TimeRange implements Serializable, Comparable<TimeRange>,
public boolean contains(Date time) {
if (getDuration() != 0) {
// the end time is not part of the time range (hence the < operator)
return time.getTime() >= start.getTime()
&& time.getTime() < end.getTime();
return (time.getTime() >= start.getTime())
&& (time.getTime() < end.getTime());
} else {
// Special case for zero duration time range
return time.equals(start);
@ -416,8 +416,8 @@ public class TimeRange implements Serializable, Comparable<TimeRange>,
} else if (timeRange.getDuration() == 0) {
return contains(timeRange.getStart());
} else {
return timeRange.start.compareTo(start) >= 0
&& timeRange.end.compareTo(end) <= 0;
return (timeRange.start.compareTo(start) >= 0)
&& (timeRange.end.compareTo(end) <= 0);
}
}
@ -545,7 +545,7 @@ public class TimeRange implements Serializable, Comparable<TimeRange>,
*/
@Override
public TimeRange clone() {
return new TimeRange(this.start, this.end);
return new TimeRange(this.start.getTime(), this.end.getTime());
}
@Override

View file

@ -259,7 +259,7 @@ public class CoreDao extends HibernateDaoSupport {
* @param obj
* The object to delete
*/
public <T> void delete(final PersistableDataObject<T> obj) {
public <T> void delete(final Object obj) {
txTemplate.execute(new TransactionCallbackWithoutResult() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
@ -311,10 +311,11 @@ public class CoreDao extends HibernateDaoSupport {
id.getDataURI()));
List<?> list = getHibernateTemplate().findByCriteria(
criteria);
if (list.size() > 0)
if (list.size() > 0) {
return (PluginDataObject) list.get(0);
else
} else {
return null;
}
}
});
return retVal;
@ -847,7 +848,7 @@ public class CoreDao extends HibernateDaoSupport {
logger.error("Unable to close JDBC statement!", e1);
}
if (exception == null && transactional) {
if ((exception == null) && transactional) {
trans.commit();
}
try {

View file

@ -0,0 +1,90 @@
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#
# Adapter for com.raytheon.uf.common.dataplugin.gfe.server.lock.LockTable
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 04/22/13 rjpeter Initial Creation.
#
#
#
from dynamicserialize.dstypes.com.raytheon.uf.common.dataplugin.gfe.server.lock import LockTable
from dynamicserialize.dstypes.com.raytheon.uf.common.dataplugin.gfe.server.lock import Lock
ClassAdapter = 'com.raytheon.uf.common.dataplugin.gfe.server.lock.LockTable'
def serialize(context, lockTable):
index=0
wsIds = {lockTable.getWsId().toString() : index}
index += 1
locks = lockTable.getLocks()
lockWsIdIndex = []
for lock in locks:
wsIdString = lock.getWsId().toString()
if wsIds.has_key(wsIdString):
lockWsIdIndex.append(wsIds[wsIdString])
else:
lockWsIdIndex.append(index)
wsIds[wsIdString] = index
index += 1
context.writeObject(lockTable.getParmId())
context.writeI32(index)
for wsId in sorted(wsIds, key=wsIds.get):
context.writeObject(wsId)
context.writeI32(len(locks))
for lock, wsIndex in zip(locks, lockWsIdIndex):
serializer.writeI64(lock.getStartTime())
serializer.writeI64(lock.getEndTime())
serializer.writeI32(wsIndex)
def deserialize(context):
parmId = context.readObject()
numWsIds = context.readI32()
wsIds = []
for x in xrange(numWsIds):
wsIds.append(context.readObject())
numLocks = context.readI32()
locks = []
for x in xrange(numLocks):
lock = Lock()
lock.setParmId(parmId)
lock.setStartTime(context.readI64())
lock.setEndTime(context.readI64())
lock.setWsId(wsIds[context.readI32()])
locks.append(lock)
lockTable = LockTable()
lockTable.setParmId(parmId)
lockTable.setWsId(wsIds[0])
lockTable.setLocks(locks)
return lockTable

View file

@ -29,7 +29,7 @@
# ------------ ---------- ----------- --------------------------
# 08/31/10 njensen Initial Creation.
# 03/20/13 #1774 randerso Added TimeConstraintsAdapter
#
# 04/22/13 #1949 rjpeter Added LockTableAdapter
#
#
@ -53,6 +53,7 @@ __all__ = [
'FloatBufferAdapter',
'ByteBufferAdapter',
'TimeConstraintsAdapter',
'LockTableAdapter'
# 'GridDataHistoryAdapter',
]