13.4.1-8 baseline
Former-commit-id: d130c847d4f9d0b491f9326c8b5140fb59174927
This commit is contained in:
parent
4c6ff79e71
commit
19866e7032
36 changed files with 554 additions and 563 deletions
|
@ -22,6 +22,7 @@ package com.raytheon.uf.viz.core.comm;
|
|||
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.URLEncoder;
|
||||
|
||||
import javax.jms.ConnectionFactory;
|
||||
|
||||
import org.apache.qpid.client.AMQConnectionFactory;
|
||||
|
@ -41,6 +42,7 @@ import com.raytheon.uf.viz.core.VizApp;
|
|||
* Nov 2, 2009 #3067 chammack Send all jms connections through failover:// to properly reconnect
|
||||
* Nov 2, 2011 #7391 bkowal Ensure that the generated WsId is properly formatted to be
|
||||
* included in a url.
|
||||
* May 09, 2013 1814 rjpeter Updated prefetch to 10.
|
||||
* </pre>
|
||||
*
|
||||
* @author chammack
|
||||
|
@ -50,7 +52,7 @@ public class JMSConnection {
|
|||
|
||||
private static JMSConnection instance;
|
||||
|
||||
private String jndiProviderUrl;
|
||||
private final String jndiProviderUrl;
|
||||
|
||||
private AMQConnectionFactory factory;
|
||||
|
||||
|
@ -76,17 +78,18 @@ public class JMSConnection {
|
|||
// reconnect
|
||||
this.factory = new AMQConnectionFactory(
|
||||
"amqp://guest:guest@"
|
||||
+ URLEncoder.encode(VizApp.getWsId().toString(), "UTF-8")
|
||||
+ URLEncoder.encode(VizApp.getWsId().toString(),
|
||||
"UTF-8")
|
||||
+ "/edex?brokerlist='"
|
||||
+ this.jndiProviderUrl
|
||||
+ "?connecttimeout='5000'&heartbeat='0''&maxprefetch='0'&sync_publish='all'&failover='nofailover'");
|
||||
+ "?connecttimeout='5000'&heartbeat='0''&maxprefetch='10'&sync_publish='all'&failover='nofailover'");
|
||||
} catch (URLSyntaxException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
} catch (UnsupportedEncodingException e1) {
|
||||
// TODO Auto-generated catch block
|
||||
e1.printStackTrace();
|
||||
}
|
||||
// TODO Auto-generated catch block
|
||||
e1.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -81,7 +81,7 @@ public abstract class FFMPTable extends Composite {
|
|||
|
||||
/** DR14406: For columns with more words */
|
||||
protected static final int EXTRA_COLUMN_WIDTH = 28;
|
||||
|
||||
|
||||
private static final String NAME = "Name";
|
||||
|
||||
protected String currentPfaf = null;
|
||||
|
@ -401,23 +401,24 @@ public abstract class FFMPTable extends Composite {
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the sorted column is a column that will contain a filter.
|
||||
// Check the gui config to see if colorCell is true. If false then do
|
||||
// not apply filter
|
||||
|
||||
// Check if the sorted column is a column that will contain a
|
||||
// filter. Check the gui config to see if colorCell is true. If
|
||||
// false then do not apply filter
|
||||
for (FFMPTableColumnXML xml : ffmpTableCols) {
|
||||
if (xml.getColumnName().contains(sortedThreshCol.name())) {
|
||||
if (ffmpConfig.isColorCell(sortedThreshCol)) {
|
||||
// Only filter if colorCell is true
|
||||
isAFilterCol = true;
|
||||
filterNum = ffmpConfig.getFilterValue(sortedThreshCol);
|
||||
reverseFilter = ffmpConfig.isReverseFilter(sortedThreshCol);
|
||||
reverseFilter = ffmpConfig
|
||||
.isReverseFilter(sortedThreshCol);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
table.removeAll();
|
||||
|
||||
if (tableData == null) {
|
||||
|
@ -445,12 +446,12 @@ public abstract class FFMPTable extends Composite {
|
|||
*/
|
||||
if (!sortedColumnName.equalsIgnoreCase(NAME)) {
|
||||
float dataVal = cellData[sortColIndex].getValueAsFloat();
|
||||
|
||||
|
||||
// DR 14250 fix: any value not a number will be omitted
|
||||
if (Float.isNaN(dataVal)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
if (isAFilterCol) {
|
||||
if (reverseFilter) {
|
||||
if (dataVal > filterNum) {
|
||||
|
@ -470,7 +471,7 @@ public abstract class FFMPTable extends Composite {
|
|||
tableIndex = indexArray.indexOf(t);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* VIRTUAL TABLE
|
||||
*
|
||||
|
@ -617,15 +618,13 @@ public abstract class FFMPTable extends Composite {
|
|||
if (tableData == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
String sortCol = (String) tc.getData();
|
||||
|
||||
int sortDir = getColumnAttributeData(sortCol).getSortDir();
|
||||
int columnIndex = getColumnIndex(sortCol);
|
||||
|
||||
tableData.setSortColumnAndDirection(columnIndex, sortDir);
|
||||
|
||||
tableData.sortData();
|
||||
tableData.sortData(columnIndex, sortDir);
|
||||
|
||||
FfmpTableConfigData ffmpTableCfgData = FfmpTableConfig.getInstance()
|
||||
.getTableConfigData(this.siteKey);
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
**/
|
||||
package com.raytheon.uf.viz.monitor.ffmp.ui.dialogs;
|
||||
|
||||
import org.eclipse.swt.SWT;
|
||||
import org.eclipse.swt.graphics.Color;
|
||||
|
||||
import com.raytheon.uf.common.dataplugin.ffmp.FFMPRecord.FIELDS;
|
||||
|
@ -39,6 +38,7 @@ import com.raytheon.uf.viz.monitor.ffmp.ui.dialogs.FFMPConfig.ThreshColNames;
|
|||
* ------------ ---------- ----------- --------------------------
|
||||
* Apr 6, 2009 lvenable Initial creation
|
||||
* Apr 12, 2013 1902 mpduff Optimized the color assignments.
|
||||
* May 7, 2013 1986 njensen Optimized sortBy
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -205,30 +205,19 @@ public class FFMPTableCellData {
|
|||
return this.hoverText;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sort by method.
|
||||
*
|
||||
* @param direction
|
||||
* Sort direction.
|
||||
* @return Object that is a string or number.
|
||||
*/
|
||||
public Object sortByObject(int direction) {
|
||||
if (cellText != null) {
|
||||
return String.format("%-20S", cellText);
|
||||
} else if (value.isNaN() == false) {
|
||||
public float sortByNumber() {
|
||||
if (!value.isNaN()) {
|
||||
if (displayAsInt == true) {
|
||||
return new Float(Math.round(value));
|
||||
return (float) Math.round(value);
|
||||
}
|
||||
|
||||
return new Float(value);
|
||||
} else if (value.isNaN() == true) {
|
||||
if (direction == SWT.DOWN) {
|
||||
return Float.MAX_VALUE * -1.0f;
|
||||
}
|
||||
return Float.MAX_VALUE;
|
||||
return value.floatValue();
|
||||
} else {
|
||||
// NaN is not displayed in the table when sorting by
|
||||
// this column, so the value to return here is not especially
|
||||
// important
|
||||
return Float.NEGATIVE_INFINITY;
|
||||
}
|
||||
|
||||
return "Unknown";
|
||||
}
|
||||
|
||||
public String displayString() {
|
||||
|
|
|
@ -38,6 +38,7 @@ import com.raytheon.uf.viz.monitor.ui.dialogs.ISortColumn;
|
|||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Apr 7, 2009 lvenable Initial creation
|
||||
* May 7, 2013 1986 njensen Optimized sortData()
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -151,19 +152,6 @@ public class FFMPTableData implements ISortColumn {
|
|||
return tableRows;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the sort column and direction.
|
||||
*
|
||||
* @param columnIndex
|
||||
* Column index.
|
||||
* @param direction
|
||||
* Sort direction.
|
||||
*/
|
||||
public void setSortColumnAndDirection(int columnIndex, int direction) {
|
||||
currentSortColumnIndex = columnIndex;
|
||||
currentSortDirection = direction;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the sort column index.
|
||||
*
|
||||
|
@ -186,8 +174,21 @@ public class FFMPTableData implements ISortColumn {
|
|||
|
||||
/**
|
||||
* Sort the table data.
|
||||
*
|
||||
* @param columnIndex
|
||||
* the column to sort on
|
||||
* @param direction
|
||||
* the direction to sort by
|
||||
*/
|
||||
public void sortData() {
|
||||
Collections.sort(tableRows);
|
||||
public void sortData(int columnIndex, int direction) {
|
||||
if (columnIndex != currentSortColumnIndex
|
||||
|| direction != currentSortDirection) {
|
||||
currentSortColumnIndex = columnIndex;
|
||||
currentSortDirection = direction;
|
||||
Collections.sort(tableRows);
|
||||
if (getSortDirection() == SWT.DOWN) {
|
||||
Collections.reverse(tableRows);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
**/
|
||||
package com.raytheon.uf.viz.monitor.ffmp.ui.dialogs;
|
||||
|
||||
import org.eclipse.swt.SWT;
|
||||
|
||||
import com.raytheon.uf.viz.monitor.ui.dialogs.ISortColumn;
|
||||
|
||||
/**
|
||||
|
@ -34,6 +32,7 @@ import com.raytheon.uf.viz.monitor.ui.dialogs.ISortColumn;
|
|||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Apr 7, 2009 lvenable Initial creation
|
||||
* May 7, 2013 1986 njensen Sped up compareTo()
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -138,36 +137,16 @@ public class FFMPTableRowData implements Comparable<FFMPTableRowData> {
|
|||
@Override
|
||||
public int compareTo(FFMPTableRowData otherObj) {
|
||||
int selectedIndex = sortCB.getSortColumn();
|
||||
int direction = sortCB.getSortDirection();
|
||||
int x = 0;
|
||||
|
||||
Object thisData = rowCells[selectedIndex].sortByObject(direction);
|
||||
Object otherData = otherObj.rowCells[selectedIndex]
|
||||
.sortByObject(direction);
|
||||
|
||||
if (thisData instanceof String) {
|
||||
x = ((String) thisData).compareTo((String) otherData);
|
||||
} else if (thisData instanceof Number) {
|
||||
double thisNumber = (Float) thisData;
|
||||
double otherNumber = (Float) otherData;
|
||||
|
||||
if (thisNumber < otherNumber) {
|
||||
x = -1;
|
||||
} else if (thisNumber > otherNumber) {
|
||||
x = 1;
|
||||
} else {
|
||||
x = 0;
|
||||
}
|
||||
FFMPTableCellData selectedCell = rowCells[selectedIndex];
|
||||
if (selectedCell.getCellText() != null) {
|
||||
String thisText = selectedCell.getCellText();
|
||||
String otherText = otherObj.rowCells[selectedIndex].getCellText();
|
||||
return thisText.compareTo(otherText);
|
||||
} else {
|
||||
float thisFloat = selectedCell.sortByNumber();
|
||||
float otherFloat = otherObj.rowCells[selectedIndex].sortByNumber();
|
||||
return Float.compare(thisFloat, otherFloat);
|
||||
}
|
||||
|
||||
if (direction == SWT.DOWN) {
|
||||
if (x < 0) {
|
||||
return 1;
|
||||
} else if (x > 0) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,6 +77,7 @@ import com.raytheon.uf.viz.monitor.ffmp.ui.dialogs.FfmpTableConfigData;
|
|||
* Apr 15, 2013 1911 dhladky Fixed forced FFG for centered aggregates.
|
||||
* Apr 24, 2013 1946 mpduff Fixed FFFG value for ALL when an aggregate is forced
|
||||
* Apr 26, 2013 1954 bsteffen Minor code cleanup throughout FFMP.
|
||||
* May 7, 2013 1986 njensen Removed unnecessary sort
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -98,12 +99,10 @@ public class FFMPDataGenerator {
|
|||
|
||||
private final Date paintRefTime;
|
||||
|
||||
|
||||
private final Object centeredAggregationKey;
|
||||
|
||||
private final String huc;
|
||||
|
||||
|
||||
private final double sliderTime;
|
||||
|
||||
private boolean isWorstCase = false;
|
||||
|
@ -305,7 +304,6 @@ public class FFMPDataGenerator {
|
|||
}
|
||||
}
|
||||
}
|
||||
tData.sortData();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
|
@ -583,7 +581,8 @@ public class FFMPDataGenerator {
|
|||
forced = forceUtil.isForced();
|
||||
}
|
||||
|
||||
if (!forcedPfafs.isEmpty() || !pfafList.isEmpty() && centeredAggregationKey == null) {
|
||||
if (!forcedPfafs.isEmpty() || !pfafList.isEmpty()
|
||||
&& centeredAggregationKey == null) {
|
||||
FFMPBasinData basinData = guidRecords.get(guidType).getBasinData(
|
||||
ALL);
|
||||
guidance = basinData.getAverageGuidanceValue(pfafList, resource
|
||||
|
@ -1014,7 +1013,6 @@ public class FFMPDataGenerator {
|
|||
|
||||
monitor.setQpeWindow(new FFMPTimeWindow(tableTime, qpeTime));
|
||||
|
||||
|
||||
if (isWorstCase || (centeredAggregationKey != null)) {
|
||||
// make sure that "ALL" is loaded
|
||||
localHuc = ALL;
|
||||
|
|
|
@ -44,6 +44,7 @@ import com.raytheon.uf.viz.core.localization.CAVELocalizationNotificationObserve
|
|||
import com.raytheon.uf.viz.core.localization.LocalizationConstants;
|
||||
import com.raytheon.uf.viz.core.localization.LocalizationInitializer;
|
||||
import com.raytheon.uf.viz.core.localization.LocalizationManager;
|
||||
import com.raytheon.uf.viz.core.notification.jobs.NotificationManagerJob;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -55,6 +56,7 @@ import com.raytheon.uf.viz.core.localization.LocalizationManager;
|
|||
* ------------ ---------- ----------- --------------------------
|
||||
* Sep 29, 2008 #1433 chammack Initial creation
|
||||
* Jan 12, 2012 #27 rferrel Added createAlertVisualization
|
||||
* May 08, 2013 1939 rjpeter Updated to start NotificationManagerJob.
|
||||
* </pre>
|
||||
*
|
||||
* @author chammack
|
||||
|
@ -128,6 +130,10 @@ public class AlertVizApplication implements IStandaloneComponent {
|
|||
|
||||
// Job is not running on port, launch UI.
|
||||
AlertVisualization av = createAlertVisualization(true, display);
|
||||
|
||||
// open JMS connection to allow alerts to be received
|
||||
NotificationManagerJob.connect();
|
||||
|
||||
Throwable t = null;
|
||||
try {
|
||||
while (!display.isDisposed()) {
|
||||
|
@ -153,17 +159,18 @@ public class AlertVizApplication implements IStandaloneComponent {
|
|||
display.dispose();
|
||||
if (t != null) {
|
||||
// Killed because of error, set exit status to non zero value
|
||||
return IApplication.EXIT_RELAUNCH;
|
||||
return IApplication.EXIT_RELAUNCH;
|
||||
}
|
||||
}
|
||||
|
||||
return av.getExitStatus();
|
||||
}
|
||||
|
||||
|
||||
protected AlertVisualization createAlertVisualization(
|
||||
boolean runningStandalone, final Display display) {
|
||||
return new AlertVisualization(runningStandalone, display);
|
||||
boolean runningStandalone, final Display display) {
|
||||
return new AlertVisualization(runningStandalone, display);
|
||||
}
|
||||
|
||||
protected void initializeObservers() {
|
||||
CAVELocalizationNotificationObserver.register();
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
<!-- specify the connection to the broker (qpid) -->
|
||||
<!-- MaxPrefetch set at 0, due to DataPool routers getting messages backed up behind long running tasks -->
|
||||
<bean id="amqConnectionFactory" class="org.apache.qpid.client.AMQConnectionFactory">
|
||||
<constructor-arg type="java.lang.String" value="amqp://guest:guest@/edex?brokerlist='tcp://${broker.addr}?retries='9999'&connecttimeout='5000'&connectdelay='5000''&maxprefetch='0'&sync_publish='all'&sync_ack='true'"/>
|
||||
<constructor-arg type="java.lang.String" value="amqp://guest:guest@/edex?brokerlist='tcp://${broker.addr}?retries='9999'&connecttimeout='5000'&connectdelay='5000'&heartbeat='0''&maxprefetch='0'&sync_publish='all'&sync_ack='true'"/>
|
||||
</bean>
|
||||
|
||||
<bean id="jmsPooledConnectionFactory" class="com.raytheon.uf.common.jms.JmsPooledConnectionFactory">
|
||||
|
@ -239,14 +239,14 @@
|
|||
<route id="alertVizNotify">
|
||||
<from uri="vm:edex.alertVizNotification" />
|
||||
<bean ref="serializationUtil" method="transformToThrift" />
|
||||
<to uri="jms-generic:topic:edex.alerts.msg?deliveryPersistent=false" />
|
||||
<to uri="jms-generic:topic:edex.alerts.msg?deliveryPersistent=false&timeToLive=60000" />
|
||||
</route>
|
||||
|
||||
<!-- Route to send text products to alarm/alert -->
|
||||
<route id="alarmAlertNotify">
|
||||
<from uri="vm:edex.alarmAlertNotification" />
|
||||
<bean ref="serializationUtil" method="transformToThrift" />
|
||||
<to uri="jms-generic:topic:edex.alarms.msg?deliveryPersistent=false" />
|
||||
<to uri="jms-generic:topic:edex.alarms.msg?deliveryPersistent=false&timeToLive=60000" />
|
||||
</route>
|
||||
|
||||
<route id="siteActivationRoute">
|
||||
|
@ -267,7 +267,7 @@
|
|||
<method bean="siteActivateNotifyFilter" method="isSiteActivateNotification" />
|
||||
<bean ref="siteActivationMonitor" method="handleNotification"/>
|
||||
<bean ref="serializationUtil" method="transformToThrift" />
|
||||
<to uri="jms-generic:topic:edex.alerts.siteActivate" />
|
||||
<to uri="jms-generic:topic:edex.alerts.siteActivate?timeToLive=60000" />
|
||||
</filter>
|
||||
</route>
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.hibernate.AnnotationException;
|
|||
import com.raytheon.uf.common.dataplugin.PluginException;
|
||||
import com.raytheon.uf.common.serialization.ISerializableObject;
|
||||
import com.raytheon.uf.common.serialization.SerializableManager;
|
||||
import com.raytheon.uf.common.util.StringUtil;
|
||||
import com.raytheon.uf.edex.core.EDEXUtil;
|
||||
import com.raytheon.uf.edex.core.props.PropertiesFactory;
|
||||
import com.raytheon.uf.edex.database.DatabasePluginProperties;
|
||||
|
@ -67,8 +68,8 @@ import com.raytheon.uf.edex.database.plugin.PluginVersionDao;
|
|||
* 10/8/2008 1532 bphillip Initial checkin
|
||||
* 2/9/2009 1990 bphillip Fixed index creation
|
||||
* 03/20/09 njensen Implemented IPluginRegistryChanged
|
||||
* Mar 02, 2013 1970 bgonzale Updated createIndexTableNamePattern to match text preceeding
|
||||
* %TABLE%.
|
||||
* Mar 02, 2013 1970 bgonzale Added check for abstract entities in sql index naming.
|
||||
* Removed unused private method populateSchema.
|
||||
* </pre>
|
||||
*
|
||||
* @author bphillip
|
||||
|
@ -86,6 +87,8 @@ public class SchemaManager implements IDatabasePluginRegistryChanged {
|
|||
*/
|
||||
private static final long pluginLockTimeOutMillis = 120000;
|
||||
|
||||
private static final String TABLE = "%TABLE%";
|
||||
|
||||
/** The singleton instance */
|
||||
private static SchemaManager instance;
|
||||
|
||||
|
@ -102,7 +105,7 @@ public class SchemaManager implements IDatabasePluginRegistryChanged {
|
|||
.compile("^create (?:table |index |sequence )(?:[A-Za-z_0-9]*\\.)?(.+?)(?: .*)?$");
|
||||
|
||||
private Pattern createIndexTableNamePattern = Pattern
|
||||
.compile("^create index \\w*?%TABLE%.+? on (.+?) .*$");
|
||||
.compile("^create index %TABLE%.+? on (.+?) .*$");
|
||||
|
||||
/**
|
||||
* Gets the singleton instance
|
||||
|
@ -128,52 +131,6 @@ public class SchemaManager implements IDatabasePluginRegistryChanged {
|
|||
.getEnvValue("PLUGINDIR");
|
||||
}
|
||||
|
||||
private PluginSchema populateSchema(String pluginName, String database,
|
||||
PluginSchema schema, List<String> tableNames) {
|
||||
List<String> ddls = null;
|
||||
|
||||
for (String sql : ddls) {
|
||||
for (String table : tableNames) {
|
||||
if (sql.startsWith("create table " + table.toLowerCase() + " ")) {
|
||||
schema.addCreateSql(sql);
|
||||
break;
|
||||
} else if (sql.startsWith("drop table " + table.toLowerCase()
|
||||
+ ";")) {
|
||||
sql = sql.replace("drop table ", "drop table if exists ");
|
||||
schema.addDropSql(sql.replace(";", " cascade;"));
|
||||
break;
|
||||
} else if (sql.startsWith("create index")
|
||||
&& sql.contains(" on " + table.toLowerCase())) {
|
||||
if (sql.contains("%TABLE%")) {
|
||||
sql = sql.replaceFirst("%TABLE%", table.toLowerCase());
|
||||
}
|
||||
String dropIndexSql = sql.replace("create index",
|
||||
"drop index if exists");
|
||||
dropIndexSql = dropIndexSql.substring(0,
|
||||
dropIndexSql.indexOf(" on "))
|
||||
+ ";";
|
||||
sql = dropIndexSql + sql;
|
||||
schema.addCreateSql(sql);
|
||||
break;
|
||||
} else if (sql.startsWith("alter table " + table.toLowerCase()
|
||||
+ " ")
|
||||
&& sql.contains(" drop ")) {
|
||||
schema.addDropSql(sql);
|
||||
break;
|
||||
} else if (sql.startsWith("alter table " + table.toLowerCase()
|
||||
+ " ")
|
||||
&& sql.contains(" add ")) {
|
||||
if (sql.contains("foreign key")) {
|
||||
sql = sql.replace(";", " ON DELETE CASCADE;");
|
||||
}
|
||||
schema.addCreateSql(sql);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return schema;
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs all scripts for a particular plugin
|
||||
*
|
||||
|
@ -349,8 +306,12 @@ public class SchemaManager implements IDatabasePluginRegistryChanged {
|
|||
if (sql.startsWith("create index")) {
|
||||
Matcher matcher = createIndexTableNamePattern.matcher(sql);
|
||||
if (matcher.matches()) {
|
||||
createSql.set(i,
|
||||
sql.replace("%TABLE%", matcher.group(1)));
|
||||
createSql.set(i, StringUtil.replace(sql, TABLE,
|
||||
matcher.group(1)));
|
||||
} else if (sql.contains(TABLE)) {
|
||||
// replace %TABLE% in sql statements with an empty
|
||||
// string
|
||||
createSql.set(i, StringUtil.replace(sql, TABLE, ""));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@
|
|||
<filter>
|
||||
<method bean="gfeNotifyFilter" method="isGfeNotification" />
|
||||
<bean ref="serializationUtil" method="transformToThrift" />
|
||||
<to uri="jms-generic:topic:edex.alerts.gfe" />
|
||||
<to uri="jms-generic:topic:edex.alerts.gfe?timeToLive=60000" />
|
||||
</filter>
|
||||
<doCatch>
|
||||
<exception>java.lang.Throwable</exception>
|
||||
|
|
|
@ -118,7 +118,7 @@
|
|||
|
||||
<!-- Convert the topic into a queue so only one consumer gets each message and we still have competing consumers. -->
|
||||
<route id="gfeDataURINotificationQueueRoute">
|
||||
<from uri="jms-gfe-notify:topic:edex.alerts?durableSubscriptionName=gfeNotificationSubscription" />
|
||||
<from uri="jms-gfe-notify:topic:edex.alerts?clientId=gfeNotify&durableSubscriptionName=gfeNotificationSubscription" />
|
||||
<doTry>
|
||||
<to uri="jms-generic:queue:gfeDataURINotification"/>
|
||||
<doCatch>
|
||||
|
|
|
@ -997,23 +997,29 @@ public class GFEDao extends DefaultPluginDao {
|
|||
sess = getHibernateTemplate().getSessionFactory()
|
||||
.openStatelessSession();
|
||||
tx = sess.beginTransaction();
|
||||
// use intersection of time range, UPDATE statement don't auto join
|
||||
// table so have to manually select the id
|
||||
Query query = sess
|
||||
.createQuery("UPDATE GridDataHistory SET lastSentTime = ?"
|
||||
+ " WHERE parent.parmId = ? AND parent.dataTime.validPeriod.start >= ?"
|
||||
+ " AND parent.dataTime.validPeriod.end >= ?");
|
||||
query.setParameter(0, parmId);
|
||||
query.setTimestamp(1, tr.getStart());
|
||||
+ " WHERE parent.id in (SELECT id FROM GFERecord "
|
||||
+ " WHERE parmId = ?"
|
||||
+ " AND dataTime.validPeriod.start < ?"
|
||||
+ " AND dataTime.validPeriod.end > ?)");
|
||||
query.setTimestamp(0, sentTime);
|
||||
query.setParameter(1, parmId);
|
||||
query.setTimestamp(2, tr.getEnd());
|
||||
query.setTimestamp(3, tr.getStart());
|
||||
query.executeUpdate();
|
||||
|
||||
// use intersection of time range
|
||||
query = sess
|
||||
.createQuery("SELECT hist.parent.dataTime.validPeriod, hist "
|
||||
+ "FROM GridDataHistory hist"
|
||||
+ " WHERE hist.parent.parmId = ? AND hist.parent.dataTime.validPeriod.start >= ?"
|
||||
+ " AND hist.parent.dataTime.validPeriod.end >= ?");
|
||||
+ " WHERE hist.parent.parmId = ? AND hist.parent.dataTime.validPeriod.start < ?"
|
||||
+ " AND hist.parent.dataTime.validPeriod.end > ?");
|
||||
query.setParameter(0, parmId);
|
||||
query.setTimestamp(1, tr.getStart());
|
||||
query.setTimestamp(2, tr.getEnd());
|
||||
query.setTimestamp(1, tr.getEnd());
|
||||
query.setTimestamp(2, tr.getStart());
|
||||
rows = query.list();
|
||||
tx.commit();
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -75,8 +75,8 @@ public class GFELockDao extends CoreDao {
|
|||
* If errors occur during database interaction
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public Map<ParmID, LockTable> getLocks(final List<ParmID> parmIds, WsId wsId)
|
||||
throws DataAccessLayerException {
|
||||
public Map<ParmID, LockTable> getLocks(final Collection<ParmID> parmIds,
|
||||
WsId wsId) throws DataAccessLayerException {
|
||||
// Return if no parmIDs are provided
|
||||
if (parmIds.isEmpty()) {
|
||||
return Collections.emptyMap();
|
||||
|
|
|
@ -34,11 +34,10 @@ import jep.JepException;
|
|||
import com.raytheon.edex.plugin.gfe.config.GFESiteActivation;
|
||||
import com.raytheon.edex.plugin.gfe.config.IFPServerConfig;
|
||||
import com.raytheon.edex.plugin.gfe.config.IFPServerConfigManager;
|
||||
import com.raytheon.edex.plugin.gfe.db.dao.GFEDao;
|
||||
import com.raytheon.edex.plugin.gfe.exception.GfeConfigurationException;
|
||||
import com.raytheon.edex.plugin.gfe.server.GridParmManager;
|
||||
import com.raytheon.edex.plugin.gfe.server.database.GridDatabase;
|
||||
import com.raytheon.edex.plugin.gfe.util.SendNotifications;
|
||||
import com.raytheon.uf.common.dataplugin.PluginException;
|
||||
import com.raytheon.uf.common.dataplugin.gfe.GridDataHistory;
|
||||
import com.raytheon.uf.common.dataplugin.gfe.db.objects.ParmID;
|
||||
import com.raytheon.uf.common.dataplugin.gfe.server.message.ServerResponse;
|
||||
|
@ -48,7 +47,6 @@ import com.raytheon.uf.common.status.IUFStatusHandler;
|
|||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.time.TimeRange;
|
||||
import com.raytheon.uf.edex.core.EDEXUtil;
|
||||
import com.raytheon.uf.edex.database.plugin.PluginFactory;
|
||||
|
||||
/**
|
||||
* Class to for running the isc scripts
|
||||
|
@ -200,28 +198,25 @@ public class IscSendJob implements Runnable {
|
|||
}
|
||||
|
||||
try {
|
||||
// TODO: Interact with IFPGridDatabase
|
||||
GFEDao dao = (GFEDao) PluginFactory.getInstance().getPluginDao(
|
||||
"gfe");
|
||||
|
||||
ServerResponse<List<TimeRange>> sr = GridParmManager
|
||||
.getGridInventory(id);
|
||||
if (!sr.isOkay()) {
|
||||
statusHandler.error("Error getting inventory for " + id);
|
||||
return;
|
||||
}
|
||||
|
||||
WsId wsId = new WsId(InetAddress.getLocalHost(), "ISC", "ISC");
|
||||
|
||||
List<GridHistoryUpdateNotification> notifications = new ArrayList<GridHistoryUpdateNotification>(
|
||||
1);
|
||||
Map<TimeRange, List<GridDataHistory>> histories = dao
|
||||
GridDatabase gridDb = GridParmManager.getDb(id.getDbId());
|
||||
ServerResponse<Map<TimeRange, List<GridDataHistory>>> sr = gridDb
|
||||
.updateSentTime(id, tr, new Date());
|
||||
notifications.add(new GridHistoryUpdateNotification(id,
|
||||
histories, wsId, siteId));
|
||||
SendNotifications.send(notifications);
|
||||
} catch (PluginException e) {
|
||||
statusHandler.error("Error creating GFE dao!", e);
|
||||
if (sr.isOkay()) {
|
||||
WsId wsId = new WsId(InetAddress.getLocalHost(), "ISC",
|
||||
"ISC");
|
||||
List<GridHistoryUpdateNotification> notifications = new ArrayList<GridHistoryUpdateNotification>(
|
||||
1);
|
||||
Map<TimeRange, List<GridDataHistory>> histories = sr
|
||||
.getPayload();
|
||||
notifications.add(new GridHistoryUpdateNotification(id,
|
||||
histories, wsId, siteId));
|
||||
SendNotifications.send(notifications);
|
||||
|
||||
} else {
|
||||
statusHandler
|
||||
.error("Error updating last sent times in GFERecords: "
|
||||
+ sr.getMessages());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
statusHandler.error(
|
||||
"Error updating last sent times in GFERecords.", e);
|
||||
|
|
|
@ -479,6 +479,24 @@ public abstract class GridDatabase {
|
|||
+ this.getClass().getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the sent time for all histories of passed parmId during the
|
||||
* timeRange. The histories are then returned in a map by timeRange.
|
||||
*
|
||||
* @param parmId
|
||||
* the parmId to use
|
||||
* @param tr
|
||||
* the time range to update sent time for
|
||||
* @param sentTime
|
||||
* the sent time to update to
|
||||
* @return
|
||||
*/
|
||||
public ServerResponse<Map<TimeRange, List<GridDataHistory>>> updateSentTime(
|
||||
final ParmID parmId, TimeRange tr, Date sentTime) {
|
||||
throw new UnsupportedOperationException("Not implemented for class "
|
||||
+ this.getClass().getName());
|
||||
}
|
||||
|
||||
public ServerResponse<?> saveGridSlices(ParmID parmId, TimeRange tr,
|
||||
List<IGridSlice> sliceData, WsId requestor,
|
||||
List<TimeRange> skipDelete) {
|
||||
|
|
|
@ -2555,6 +2555,7 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
public ServerResponse<?> updatePublishTime(List<GridDataHistory> history,
|
||||
Date publishTime) {
|
||||
ServerResponse<?> sr = new ServerResponse<String>();
|
||||
|
||||
GFEDao dao = null;
|
||||
try {
|
||||
dao = (GFEDao) PluginFactory.getInstance().getPluginDao("gfe");
|
||||
|
@ -2566,6 +2567,7 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
"Unable to update grid history!", e);
|
||||
sr.addMessage("Error updating history");
|
||||
}
|
||||
|
||||
return sr;
|
||||
}
|
||||
|
||||
|
@ -2592,4 +2594,35 @@ public class IFPGridDatabase extends GridDatabase {
|
|||
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the sent time for all histories of passed parmId during the
|
||||
* timeRange. The histories are then returned in a map by timeRange.
|
||||
*
|
||||
* @param parmId
|
||||
* the parmId to use
|
||||
* @param tr
|
||||
* the time range to update sent time for
|
||||
* @param sentTime
|
||||
* the sent time to update to
|
||||
* @return
|
||||
*/
|
||||
@Override
|
||||
public ServerResponse<Map<TimeRange, List<GridDataHistory>>> updateSentTime(
|
||||
final ParmID parmId, TimeRange tr, Date sentTime) {
|
||||
ServerResponse<Map<TimeRange, List<GridDataHistory>>> sr = new ServerResponse<Map<TimeRange, List<GridDataHistory>>>();
|
||||
try {
|
||||
ParmID dbParmId = getCachedParmID(parmId);
|
||||
GFEDao dao = new GFEDao();
|
||||
sr.setPayload(dao.updateSentTime(dbParmId, tr, sentTime));
|
||||
} catch (UnknownParmIdException e) {
|
||||
sr.addMessage(e.getLocalizedMessage());
|
||||
} catch (Exception e) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
"Unable to update grid history last sent time", e);
|
||||
sr.addMessage("Unable to update grid history last sent time");
|
||||
}
|
||||
|
||||
return sr;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,12 +119,41 @@ public class LockManager {
|
|||
}
|
||||
|
||||
// extract the ParmIds from the request list
|
||||
List<ParmID> parmIds = new ArrayList<ParmID>();
|
||||
Set<ParmID> parmIds = new HashSet<ParmID>();
|
||||
try {
|
||||
sr.addMessages(extractParmIds(request, parmIds, siteID));
|
||||
List<ParmID> nonIfpParmIds = new LinkedList<ParmID>();
|
||||
|
||||
sr.setPayload(new ArrayList<LockTable>(dao.getLocks(parmIds,
|
||||
requestor).values()));
|
||||
// remove parm IDs that are not persisted to database
|
||||
Iterator<ParmID> iter = parmIds.iterator();
|
||||
while (iter.hasNext()) {
|
||||
ParmID id = iter.next();
|
||||
if (id.getId() == 0) {
|
||||
nonIfpParmIds.add(id);
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
|
||||
List<LockTable> payLoad = null;
|
||||
|
||||
if (!parmIds.isEmpty()) {
|
||||
Map<ParmID, LockTable> lockMap = dao.getLocks(parmIds,
|
||||
requestor);
|
||||
payLoad = new ArrayList<LockTable>(lockMap.size()
|
||||
+ nonIfpParmIds.size());
|
||||
payLoad.addAll(lockMap.values());
|
||||
} else {
|
||||
payLoad = new ArrayList<LockTable>(nonIfpParmIds.size());
|
||||
}
|
||||
|
||||
if (!nonIfpParmIds.isEmpty()) {
|
||||
for (ParmID id : nonIfpParmIds) {
|
||||
payLoad.add(new LockTable(id, new ArrayList<Lock>(0),
|
||||
requestor));
|
||||
}
|
||||
}
|
||||
|
||||
sr.setPayload(payLoad);
|
||||
} catch (Exception e) {
|
||||
logger.error("Error getting lock tables for " + parmIds, e);
|
||||
sr.addMessage("Error getting lock tables for " + parmIds);
|
||||
|
@ -214,12 +243,22 @@ public class LockManager {
|
|||
return sr;
|
||||
}
|
||||
|
||||
List<ParmID> parmIds = new LinkedList<ParmID>();
|
||||
Set<ParmID> parmIds = new HashSet<ParmID>();
|
||||
Map<ParmID, LockTable> lockTableMap;
|
||||
try {
|
||||
// extract the ParmIds from the requests
|
||||
sr.addMessages(extractParmIdsFromLockReq(req, parmIds, siteID));
|
||||
|
||||
Iterator<ParmID> iter = parmIds.iterator();
|
||||
while (iter.hasNext()) {
|
||||
ParmID id = iter.next();
|
||||
// non persisted parm IDs cannot be locked
|
||||
if (id.getId() == 0) {
|
||||
sr.addMessage("ParmID " + id + " is not a lockable parm");
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
|
||||
// get the lock tables specific to the extracted parmIds
|
||||
lockTableMap = dao.getLocks(parmIds, requestor);
|
||||
} catch (Exception e) {
|
||||
|
@ -681,14 +720,14 @@ public class LockManager {
|
|||
* @throws GfeException
|
||||
*/
|
||||
private ServerResponse<?> extractParmIds(List<LockTableRequest> ltrList,
|
||||
List<ParmID> parmIds, String siteID) throws GfeException {
|
||||
Set<ParmID> parmIds, String siteID) throws GfeException {
|
||||
|
||||
ServerResponse<?> sr = new ServerResponse<String>();
|
||||
// process each request
|
||||
for (LockTableRequest ltr : ltrList) {
|
||||
if (ltr.isParmRequest()) {
|
||||
ParmID parmId = ltr.getParmId();
|
||||
// append parm (if not already in the list)
|
||||
// append parm (if not already in the set)
|
||||
if (!parmIds.contains(parmId)) {
|
||||
parmIds.add(GridParmManager.getDb(parmId.getDbId())
|
||||
.getCachedParmID(parmId));
|
||||
|
@ -697,11 +736,7 @@ public class LockManager {
|
|||
// get all the parmIds for that databaseId
|
||||
List<ParmID> pids = GridParmManager.getParmList(ltr.getDbId())
|
||||
.getPayload();
|
||||
for (ParmID id : pids) {
|
||||
if (!parmIds.contains(id)) {
|
||||
parmIds.add(id);
|
||||
}
|
||||
}
|
||||
parmIds.addAll(pids);
|
||||
} else {
|
||||
// get all the parms for all the databases
|
||||
List<DatabaseID> dbids = GridParmManager.getDbInventory(siteID)
|
||||
|
@ -709,11 +744,7 @@ public class LockManager {
|
|||
for (int j = 0; j < dbids.size(); j++) {
|
||||
List<ParmID> pids = GridParmManager.getParmList(
|
||||
dbids.get(j)).getPayload();
|
||||
for (ParmID id : pids) {
|
||||
if (!parmIds.contains(id)) {
|
||||
parmIds.add(id);
|
||||
}
|
||||
}
|
||||
parmIds.addAll(pids);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -738,7 +769,7 @@ public class LockManager {
|
|||
* If errors occur
|
||||
*/
|
||||
private ServerResponse<?> extractParmIdsFromLockReq(List<LockRequest> lrs,
|
||||
List<ParmID> parmIds, String siteID) throws GfeException {
|
||||
Set<ParmID> parmIds, String siteID) throws GfeException {
|
||||
ServerResponse<?> sr = new ServerResponse<String>();
|
||||
|
||||
// process each request
|
||||
|
|
|
@ -204,7 +204,7 @@ public class GfeIngestNotificationFilter {
|
|||
|
||||
// if we don't have the other component for this
|
||||
// fcstHour
|
||||
if (otherTimes == null
|
||||
if ((otherTimes == null)
|
||||
|| !otherTimes.remove(fcstHour)) {
|
||||
// need to wait for other component
|
||||
ParmID compPid = new ParmID(d2dParamName,
|
||||
|
@ -371,7 +371,8 @@ public class GfeIngestNotificationFilter {
|
|||
throws Exception {
|
||||
byte[] message = SerializationUtil.transformToThrift(notifications);
|
||||
EDEXUtil.getMessageProducer().sendAsyncUri(
|
||||
"jms-generic:topic:gfeGribNotification", message);
|
||||
"jms-generic:topic:gfeGribNotification?timeToLive=60000",
|
||||
message);
|
||||
SendNotifications.send(notifications);
|
||||
}
|
||||
|
||||
|
|
|
@ -77,6 +77,8 @@ from com.raytheon.uf.edex.database.cluster import ClusterTask
|
|||
# 03/12/13 1759 dgilling Remove unnecessary command line
|
||||
# processing.
|
||||
# 04/24/13 1941 dgilling Re-port WECache to match A1.
|
||||
# 05/08/13 1988 dgilling Fix history handling bug in
|
||||
# __getDbGrid().
|
||||
#
|
||||
#
|
||||
|
||||
|
@ -730,12 +732,6 @@ class IscMosaic:
|
|||
grid = self._wec[tr]
|
||||
if grid is not None:
|
||||
destGrid, history = grid
|
||||
|
||||
tempHistory = []
|
||||
for hist in history:
|
||||
tempHistory.append(hist.getCodedString())
|
||||
history = tempHistory
|
||||
|
||||
self.__dbGrid = (destGrid, history, tr)
|
||||
else:
|
||||
self.logProblem("Unable to access grid for ",
|
||||
|
@ -743,6 +739,7 @@ class IscMosaic:
|
|||
return None
|
||||
|
||||
return (self.__dbGrid[0], self.__dbGrid[1])
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# calculate file start/end processing times
|
||||
# Returns (startTime, endTime) or None for processing
|
||||
|
|
|
@ -83,7 +83,7 @@ import com.raytheon.uf.edex.database.query.DatabaseQuery;
|
|||
* that sent notification to D2DParmIdCache.
|
||||
* 01/14/13 #1469 bkowal Removed the hdf5 data directory
|
||||
* 04/08/13 #1293 bkowal Removed references to hdffileid.
|
||||
*
|
||||
* 05/08/13 1814 rjpeter Added time to live to topic message
|
||||
* </pre>
|
||||
*
|
||||
* @author bphillip
|
||||
|
@ -101,7 +101,7 @@ public class GribDao extends PluginDao {
|
|||
|
||||
private static final String THINNED_PTS = "thinnedPts";
|
||||
|
||||
private static final String PURGE_MODEL_CACHE_TOPIC = "jms-generic:topic:purgeGribModelCache";
|
||||
private static final String PURGE_MODEL_CACHE_TOPIC = "jms-generic:topic:purgeGribModelCache?timeToLive=60000";
|
||||
|
||||
/**
|
||||
* Creates a new GribPyDao object
|
||||
|
@ -171,7 +171,7 @@ public class GribDao extends PluginDao {
|
|||
IPersistable obj) throws Exception {
|
||||
GribRecord gribRec = (GribRecord) obj;
|
||||
|
||||
if (gribRec.getMessageData() != null
|
||||
if ((gribRec.getMessageData() != null)
|
||||
&& !gribRec.getModelInfo().getParameterName().equals("Missing")) {
|
||||
AbstractStorageRecord storageRecord = null;
|
||||
AbstractStorageRecord localSection = null;
|
||||
|
@ -182,8 +182,8 @@ public class GribDao extends PluginDao {
|
|||
* Stores the binary data to the HDF5 data store
|
||||
*/
|
||||
if (gribRec.getMessageData() instanceof float[]) {
|
||||
if (gribRec.getSpatialObject() != null
|
||||
&& gribRec.getMessageData() != null) {
|
||||
if ((gribRec.getSpatialObject() != null)
|
||||
&& (gribRec.getMessageData() != null)) {
|
||||
long[] sizes = new long[] {
|
||||
(gribRec.getSpatialObject()).getNx(),
|
||||
(gribRec.getSpatialObject()).getNy() };
|
||||
|
@ -316,7 +316,7 @@ public class GribDao extends PluginDao {
|
|||
for (PluginDataObject record : records) {
|
||||
GribRecord rec = (GribRecord) record;
|
||||
GribModel model = rec.getModelInfo();
|
||||
if (model.getParameterName() == null
|
||||
if ((model.getParameterName() == null)
|
||||
|| model.getParameterName().equals("Missing")) {
|
||||
logger.info("Discarding record due to missing or unknown parameter mapping: "
|
||||
+ record);
|
||||
|
@ -327,7 +327,7 @@ public class GribDao extends PluginDao {
|
|||
if (level != null) {
|
||||
MasterLevel ml = level.getMasterLevel();
|
||||
|
||||
if (ml != null
|
||||
if ((ml != null)
|
||||
&& !LevelFactory.UNKNOWN_LEVEL.equals(ml.getName())) {
|
||||
validLevel = true;
|
||||
}
|
||||
|
@ -362,7 +362,7 @@ public class GribDao extends PluginDao {
|
|||
for (PluginDataObject record : records) {
|
||||
GribRecord rec = (GribRecord) record;
|
||||
GribModel model = rec.getModelInfo();
|
||||
if (model.getParameterName() == null
|
||||
if ((model.getParameterName() == null)
|
||||
|| model.getParameterName().equals("Missing")) {
|
||||
logger.info("Discarding record due to missing or unknown parameter mapping: "
|
||||
+ record);
|
||||
|
@ -373,7 +373,7 @@ public class GribDao extends PluginDao {
|
|||
if (level != null) {
|
||||
MasterLevel ml = level.getMasterLevel();
|
||||
|
||||
if (ml != null
|
||||
if ((ml != null)
|
||||
&& !LevelFactory.UNKNOWN_LEVEL.equals(ml.getName())) {
|
||||
validLevel = true;
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import java.util.Map;
|
|||
import java.util.Properties;
|
||||
|
||||
import com.raytheon.edex.plugin.redbook.common.blocks.RedbookBlock.RedbookBlockFactory;
|
||||
import com.raytheon.edex.plugin.redbook.decoder.RedbookFcstMap;
|
||||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.util.PropertiesUtil;
|
||||
|
@ -55,8 +54,8 @@ import com.raytheon.uf.common.util.ReflectionUtil;
|
|||
* @version 1.0
|
||||
*/
|
||||
public class RedbookBlockBuilder {
|
||||
private static final transient IUFStatusHandler statusHandler = UFStatus
|
||||
.getHandler(RedbookFcstMap.class);
|
||||
private static final IUFStatusHandler statusHandler = UFStatus
|
||||
.getHandler(RedbookBlockBuilder.class);
|
||||
|
||||
private static final int MIN_REMAINING = 4;
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@
|
|||
<method bean="textDecoder" method="separator" />
|
||||
<bean ref="textDecoder" method="transformToSimpleString" />
|
||||
<bean ref="serializationUtil" method="transformToThrift"/>
|
||||
<to uri="jms-text:topic:edex.alarms.msg" />
|
||||
<to uri="jms-text:topic:edex.alarms.msg?timeToLive=60000" />
|
||||
</split>
|
||||
</route>
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
<route id="utilityNotify">
|
||||
<from uri="vm://utilityNotify" />
|
||||
<bean ref="serializationUtil" method="transformToThrift" />
|
||||
<to uri="jms-generic:topic:edex.alerts.utility" />
|
||||
<to uri="jms-generic:topic:edex.alerts.utility?timeToLive=60000" />
|
||||
</route>
|
||||
|
||||
</camelContext>
|
||||
|
|
|
@ -19,21 +19,16 @@
|
|||
**/
|
||||
package com.raytheon.uf.common.dataplugin.ffmp;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.InputStream;
|
||||
import java.lang.ref.SoftReference;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.zip.GZIPInputStream;
|
||||
|
||||
import com.raytheon.uf.common.localization.IPathManager;
|
||||
import com.raytheon.uf.common.localization.LocalizationContext;
|
||||
|
@ -43,8 +38,6 @@ import com.raytheon.uf.common.localization.LocalizationFile;
|
|||
import com.raytheon.uf.common.localization.PathManagerFactory;
|
||||
import com.raytheon.uf.common.localization.exception.LocalizationOpFailedException;
|
||||
import com.raytheon.uf.common.serialization.SerializationUtil;
|
||||
import com.raytheon.uf.common.serialization.adapters.FloatWKBReader;
|
||||
import com.raytheon.uf.common.serialization.adapters.FloatWKBWriter;
|
||||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus.Priority;
|
||||
|
@ -71,6 +64,7 @@ import com.vividsolutions.jts.simplify.TopologyPreservingSimplifier;
|
|||
* Dec 9, 2010 rjpeter Initial creation
|
||||
* Apr 25, 2013 1954 bsteffen Decompress ffmp geometries to save time
|
||||
* loading them.
|
||||
* Apr 25, 2013 1954 bsteffen Undo last commit to avoid invalid geoms.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -134,44 +128,9 @@ public class HucLevelGeometriesFactory {
|
|||
|
||||
if (f.exists()) {
|
||||
try {
|
||||
File file = f.getFile();
|
||||
byte[] bytes = FileUtil.file2bytes(file, false);
|
||||
if (bytes[0] == (byte) 0x1f && bytes[1] == (byte) 0x8b) {
|
||||
// GZIP magic number is present, before 13.4.1 these
|
||||
// files were compressed and stored in a different
|
||||
// format, to maintain backwards compatibility we check
|
||||
// for compression and deserialize the old way. This
|
||||
// code can be removed any time after 13.5.1.
|
||||
System.out.println("Decompressing geometry files.");
|
||||
InputStream is = new ByteArrayInputStream(bytes);
|
||||
is = new GZIPInputStream(is, bytes.length);
|
||||
ByteArrayOutputStream os = new ByteArrayOutputStream(
|
||||
bytes.length * 3 / 2);
|
||||
byte[] buffer = new byte[1024 * 8];
|
||||
int numRead = 0;
|
||||
while ((numRead = is.read(buffer)) >= 0) {
|
||||
os.write(buffer, 0, numRead);
|
||||
}
|
||||
bytes = os.toByteArray();
|
||||
map = (Map<Long, Geometry>) SerializationUtil
|
||||
.transformFromThrift(Map.class, bytes);
|
||||
// save them back the new way.
|
||||
persistGeometryMap(dataKey, cwa, huc, map);
|
||||
} else {
|
||||
Map<Long, byte[]> serializableMap = (Map<Long, byte[]>) SerializationUtil
|
||||
.transformFromThrift(Map.class, bytes);
|
||||
FloatWKBReader reader = new FloatWKBReader(
|
||||
new GeometryFactory());
|
||||
map = new HashMap<Long, Geometry>(
|
||||
serializableMap.size());
|
||||
for (Entry<Long, byte[]> entry : serializableMap
|
||||
.entrySet()) {
|
||||
InputStream in = new ByteArrayInputStream(
|
||||
entry.getValue());
|
||||
Geometry geom = reader.readGeometry(in);
|
||||
map.put(entry.getKey(), geom);
|
||||
}
|
||||
}
|
||||
map = (Map<Long, Geometry>) SerializationUtil
|
||||
.transformFromThrift(FileUtil.file2bytes(
|
||||
f.getFile(), true));
|
||||
int sizeGuess = Math.max(
|
||||
Math.abs(pfafs.size() - map.size()), 10);
|
||||
pfafsToGenerate = new ArrayList<Long>(sizeGuess);
|
||||
|
@ -388,23 +347,13 @@ public class HucLevelGeometriesFactory {
|
|||
|
||||
protected synchronized void persistGeometryMap(String dataKey, String cwa,
|
||||
String huc, Map<Long, Geometry> map) throws Exception {
|
||||
|
||||
LocalizationContext lc = pathManager.getContext(
|
||||
LocalizationType.COMMON_STATIC, LocalizationLevel.SITE);
|
||||
LocalizationFile lf = pathManager.getLocalizationFile(lc,
|
||||
getGeomPath(dataKey, cwa, huc));
|
||||
FloatWKBWriter writer = new FloatWKBWriter();
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
|
||||
Map<Long, byte[]> serializableMap = new HashMap<Long, byte[]>();
|
||||
for (Entry<Long, Geometry> entry : map.entrySet()) {
|
||||
writer.writeGeometry(entry.getValue(), bos);
|
||||
serializableMap.put(entry.getKey(), bos.toByteArray());
|
||||
bos.reset();
|
||||
}
|
||||
byte[] bytes = SerializationUtil.transformToThrift(serializableMap);
|
||||
FileUtil.bytes2File(bytes, lf.getFile(), false);
|
||||
FileUtil.bytes2File(SerializationUtil.transformToThrift(map),
|
||||
lf.getFile(), true);
|
||||
lf.save();
|
||||
|
||||
}
|
||||
|
||||
protected synchronized String getGeomPath(String dataKey, String cwa,
|
||||
|
|
|
@ -65,6 +65,26 @@ import com.raytheon.uf.common.util.ConvertUtil;
|
|||
* plugin specific data type would be called SatelliteRecord.
|
||||
*
|
||||
* <pre>
|
||||
* Hibernate Annotation Requirements for "@Entity" annotated classes that are subclasses
|
||||
* of PluginDataObject
|
||||
*
|
||||
* 1) If it is not abstract and not a super class for "@Entity" annotated
|
||||
* subclasses, then add a SequenceGenerator annotation:
|
||||
* "@SequenceGenerator(initialValue = 1, name = PluginDataObject.ID_GEN, sequenceName = "
|
||||
* <tablename>seq")"
|
||||
*
|
||||
* 2) If it is abstract and a super class for @Entity annotated subclasses:
|
||||
*
|
||||
* - if there are "@ManyToOne" or "@OneToMany" relationships to the class, then
|
||||
* an "@Entity" annotation has to be used otherwise use a "@MappedSuperClass"
|
||||
* annotation
|
||||
*
|
||||
* - Add an "@Inheritance" annotation
|
||||
* "@Inheritance(strategy = InheritanceType.TABLE_PER_CLASS)"
|
||||
*
|
||||
* - Add an "@Sequence" annotation
|
||||
* "@SequenceGenerator(name = PluginDataObject.ID_GEN)"
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.commons.lang.StringUtils;
|
|||
* Oct 20, 2011 rferrel Initial creation
|
||||
* Jul 13, 2012 740 djohnson Add join.
|
||||
* Nov 09, 2012 1322 djohnson Add NEWLINE, createMessage.
|
||||
* Mar 02, 2013 1970 bgonzale Added fast string replacement method.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -185,4 +186,27 @@ public final class StringUtil {
|
|||
|
||||
return msg.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Fast replacement of all String target elements in String source with
|
||||
* String replacement.
|
||||
*
|
||||
* @param source
|
||||
* String that instances will be replaced in.
|
||||
* @param target
|
||||
* @param replacement
|
||||
* @return a new String equivalent to source with target Strings replaced by
|
||||
* String replacement
|
||||
*/
|
||||
public static String replace(final String source, final String target,
|
||||
final String replacement) {
|
||||
int targetIndex = 0;
|
||||
StringBuilder sb = new StringBuilder(source);
|
||||
|
||||
while ((targetIndex = sb.indexOf(target, targetIndex)) > -1) {
|
||||
sb.replace(targetIndex, targetIndex + target.length(), replacement);
|
||||
targetIndex += replacement.length();
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
<route id="vtecNotify">
|
||||
<from uri="vm:edex.vtecAlert" />
|
||||
<bean ref="serializationUtil" method="transformToThrift" />
|
||||
<to uri="jms-generic:topic:edex.alerts.vtec" />
|
||||
<to uri="jms-generic:topic:edex.alerts.vtec?timeToLive=60000" />
|
||||
</route>
|
||||
<route id="practiceVtecRoute">
|
||||
<from uri="jms-activetable:queue:practiceActiveTable?concurrentConsumers=1" />
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
|
||||
<bean class="com.raytheon.uf.edex.plugin.grid.dao.GridDao"
|
||||
factory-method="setPurgeModelCacheTopic">
|
||||
<constructor-arg value="jms-generic:topic:purgeGridInfoCache" />
|
||||
<constructor-arg value="jms-generic:topic:purgeGridInfoCache?timeToLive=60000" />
|
||||
</bean>
|
||||
|
||||
<camelContext id="grid-common-camel" xmlns="http://camel.apache.org/schema/spring"
|
||||
|
|
|
@ -36,8 +36,8 @@ import com.raytheon.uf.edex.plugin.grid.dao.GridDao;
|
|||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Nov 2, 2012 dgilling Initial creation
|
||||
*
|
||||
* Nov 2, 2012 dgilling Initial creation
|
||||
* May 08, 2013 1814 rjpeter Added time to live to topic.
|
||||
* </pre>
|
||||
*
|
||||
* @author dgilling
|
||||
|
@ -47,7 +47,7 @@ import com.raytheon.uf.edex.plugin.grid.dao.GridDao;
|
|||
public class DeleteAllGridDataHandler implements
|
||||
IRequestHandler<DeleteAllGridDataRequest> {
|
||||
|
||||
private static final String PLUGIN_PURGED_TOPIC = "jms-generic:topic:pluginPurged";
|
||||
private static final String PLUGIN_PURGED_TOPIC = "jms-generic:topic:pluginPurged?timeToLive=60000";
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
|
|
|
@ -45,7 +45,7 @@ import com.raytheon.uf.edex.database.purge.PurgeLogger;
|
|||
* ------------ ---------- ----------- --------------------------
|
||||
* Apr 19, 2012 #470 bphillip Initial creation
|
||||
* Jun 20, 2012 NC#606 ghull send purge-complete messages
|
||||
*
|
||||
* May 08, 2013 1814 rjpeter Added time to live to topic
|
||||
* </pre>
|
||||
*
|
||||
* @author bphillip
|
||||
|
@ -53,260 +53,264 @@ import com.raytheon.uf.edex.database.purge.PurgeLogger;
|
|||
*/
|
||||
public class PurgeJob extends Thread {
|
||||
|
||||
/** The type of purge */
|
||||
public enum PURGE_JOB_TYPE {
|
||||
PURGE_ALL, PURGE_EXPIRED
|
||||
}
|
||||
|
||||
public static final String PLUGIN_PURGED_TOPIC = "jms-generic:topic:pluginPurged";
|
||||
/** The type of purge */
|
||||
public enum PURGE_JOB_TYPE {
|
||||
PURGE_ALL, PURGE_EXPIRED
|
||||
}
|
||||
|
||||
private long startTime;
|
||||
public static final String PLUGIN_PURGED_TOPIC = "jms-generic:topic:pluginPurged?timeToLive=60000";
|
||||
|
||||
/** The cluster task name to use for purge jobs */
|
||||
public static final String TASK_NAME = "Purge Plugin Data";
|
||||
private long startTime;
|
||||
|
||||
/** The plugin associated with this purge job */
|
||||
private String pluginName;
|
||||
/** The cluster task name to use for purge jobs */
|
||||
public static final String TASK_NAME = "Purge Plugin Data";
|
||||
|
||||
/** The type of purge job being executed */
|
||||
private PURGE_JOB_TYPE purgeType;
|
||||
/** The plugin associated with this purge job */
|
||||
private final String pluginName;
|
||||
|
||||
/** Last time job has printed a timed out message */
|
||||
private long lastTimeOutMessage = 0;
|
||||
/** The type of purge job being executed */
|
||||
private final PURGE_JOB_TYPE purgeType;
|
||||
|
||||
/**
|
||||
* Creates a new Purge job for the specified plugin.
|
||||
*
|
||||
* @param pluginName
|
||||
* The plugin to be purged
|
||||
* @param purgeType
|
||||
* The type of purge to be executed
|
||||
*/
|
||||
public PurgeJob(String pluginName, PURGE_JOB_TYPE purgeType) {
|
||||
// Give the thread a name
|
||||
this.setName("Purge-" + pluginName.toUpperCase() + "-Thread");
|
||||
this.pluginName = pluginName;
|
||||
this.purgeType = purgeType;
|
||||
}
|
||||
/** Last time job has printed a timed out message */
|
||||
private final long lastTimeOutMessage = 0;
|
||||
|
||||
public void run() {
|
||||
/**
|
||||
* Creates a new Purge job for the specified plugin.
|
||||
*
|
||||
* @param pluginName
|
||||
* The plugin to be purged
|
||||
* @param purgeType
|
||||
* The type of purge to be executed
|
||||
*/
|
||||
public PurgeJob(String pluginName, PURGE_JOB_TYPE purgeType) {
|
||||
// Give the thread a name
|
||||
this.setName("Purge-" + pluginName.toUpperCase() + "-Thread");
|
||||
this.pluginName = pluginName;
|
||||
this.purgeType = purgeType;
|
||||
}
|
||||
|
||||
// Flag used to track if this job has failed
|
||||
boolean failed = false;
|
||||
startTime = System.currentTimeMillis();
|
||||
PurgeLogger.logInfo("Purging expired data...", pluginName);
|
||||
PluginDao dao = null;
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
try {
|
||||
dao = PluginFactory.getInstance().getPluginDao(pluginName);
|
||||
if (dao.getDaoClass() != null) {
|
||||
dao.purgeExpiredData();
|
||||
|
||||
PurgeLogger.logInfo("Data successfully Purged!", pluginName);
|
||||
// Flag used to track if this job has failed
|
||||
boolean failed = false;
|
||||
startTime = System.currentTimeMillis();
|
||||
PurgeLogger.logInfo("Purging expired data...", pluginName);
|
||||
PluginDao dao = null;
|
||||
|
||||
EDEXUtil.getMessageProducer().sendAsyncUri( PLUGIN_PURGED_TOPIC, pluginName );
|
||||
|
||||
} else {
|
||||
Method m = dao.getClass().getMethod("purgeExpiredData",
|
||||
new Class[] {});
|
||||
if (m != null) {
|
||||
if (m.getDeclaringClass().equals(PluginDao.class)) {
|
||||
PurgeLogger
|
||||
.logWarn(
|
||||
"Unable to purge data. This plugin does not specify a record class and does not implement a custom purger.",
|
||||
pluginName);
|
||||
} else {
|
||||
if (this.purgeType.equals(PURGE_JOB_TYPE.PURGE_EXPIRED)) {
|
||||
dao.purgeExpiredData();
|
||||
} else {
|
||||
dao.purgeAllData();
|
||||
}
|
||||
try {
|
||||
dao = PluginFactory.getInstance().getPluginDao(pluginName);
|
||||
if (dao.getDaoClass() != null) {
|
||||
dao.purgeExpiredData();
|
||||
|
||||
PurgeLogger.logInfo("Data successfully Purged!", pluginName);
|
||||
PurgeLogger.logInfo("Data successfully Purged!", pluginName);
|
||||
|
||||
EDEXUtil.getMessageProducer().sendAsyncUri( PLUGIN_PURGED_TOPIC, pluginName );
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
failed = true;
|
||||
// keep getting next exceptions with sql exceptions to ensure
|
||||
// we can see the underlying error
|
||||
PurgeLogger
|
||||
.logError("Error purging expired data!\n", pluginName, e);
|
||||
Throwable t = e.getCause();
|
||||
while (t != null) {
|
||||
if (t instanceof SQLException) {
|
||||
SQLException se = ((SQLException) t).getNextException();
|
||||
PurgeLogger.logError("Next exception:", pluginName, se);
|
||||
}
|
||||
t = t.getCause();
|
||||
}
|
||||
} finally {
|
||||
ClusterTask purgeLock = PurgeManager.getInstance().getPurgeLock();
|
||||
try {
|
||||
/*
|
||||
* Update the status accordingly if the purge failed or
|
||||
* succeeded
|
||||
*/
|
||||
PurgeDao purgeDao = new PurgeDao();
|
||||
PurgeJobStatus status = purgeDao
|
||||
.getJobForPlugin(this.pluginName);
|
||||
if (status == null) {
|
||||
PurgeLogger.logError(
|
||||
"Purge job completed but no status object found!",
|
||||
this.pluginName);
|
||||
} else {
|
||||
if (failed) {
|
||||
status.incrementFailedCount();
|
||||
if (status.getFailedCount() >= PurgeManager
|
||||
.getInstance().getFatalFailureCount()) {
|
||||
PurgeLogger
|
||||
.logFatal(
|
||||
"Purger for this plugin has reached or exceeded consecutive failure limit of "
|
||||
+ PurgeManager
|
||||
.getInstance()
|
||||
.getFatalFailureCount()
|
||||
+ ". Data will no longer being purged for this plugin.",
|
||||
pluginName);
|
||||
} else {
|
||||
PurgeLogger.logError("Purge job has failed "
|
||||
+ status.getFailedCount()
|
||||
+ " consecutive times.", this.pluginName);
|
||||
// Back the start time off by half an hour to try to
|
||||
// purgin soon, don't want to start immediately so
|
||||
// it doesn't ping pong between servers in a time
|
||||
// out scenario
|
||||
Date startTime = status.getStartTime();
|
||||
startTime.setTime(startTime.getTime() - (1800000));
|
||||
}
|
||||
} else {
|
||||
status.setFailedCount(0);
|
||||
}
|
||||
EDEXUtil.getMessageProducer().sendAsyncUri(PLUGIN_PURGED_TOPIC,
|
||||
pluginName);
|
||||
|
||||
/*
|
||||
* This purger thread has exceeded the time out duration but
|
||||
* finally finished. Output a message and update the status
|
||||
*/
|
||||
int deadPurgeJobAge = PurgeManager.getInstance()
|
||||
.getDeadPurgeJobAge();
|
||||
Calendar purgeTimeOutLimit = Calendar.getInstance();
|
||||
purgeTimeOutLimit.setTimeZone(TimeZone.getTimeZone("GMT"));
|
||||
purgeTimeOutLimit.add(Calendar.MINUTE, -deadPurgeJobAge);
|
||||
if (startTime < purgeTimeOutLimit.getTimeInMillis()) {
|
||||
PurgeLogger
|
||||
.logInfo(
|
||||
"Purge job has recovered from timed out state!!",
|
||||
pluginName);
|
||||
}
|
||||
status.setRunning(false);
|
||||
purgeDao.update(status);
|
||||
/*
|
||||
* Log execution times
|
||||
*/
|
||||
long executionTime = getAge();
|
||||
long execTimeInMinutes = executionTime / 60000;
|
||||
if (execTimeInMinutes > 0) {
|
||||
PurgeLogger.logInfo("Purge run time: " + executionTime
|
||||
+ " ms (" + execTimeInMinutes + " minutes)",
|
||||
this.pluginName);
|
||||
} else {
|
||||
PurgeLogger.logInfo("Purge run time: " + executionTime
|
||||
+ " ms", this.pluginName);
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
PurgeLogger
|
||||
.logError(
|
||||
"An unexpected error occurred upon completion of the purge job",
|
||||
this.pluginName, e);
|
||||
} finally {
|
||||
ClusterLockUtils.unlock(purgeLock, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Method m = dao.getClass().getMethod("purgeExpiredData",
|
||||
new Class[] {});
|
||||
if (m != null) {
|
||||
if (m.getDeclaringClass().equals(PluginDao.class)) {
|
||||
PurgeLogger
|
||||
.logWarn(
|
||||
"Unable to purge data. This plugin does not specify a record class and does not implement a custom purger.",
|
||||
pluginName);
|
||||
} else {
|
||||
if (this.purgeType.equals(PURGE_JOB_TYPE.PURGE_EXPIRED)) {
|
||||
dao.purgeExpiredData();
|
||||
} else {
|
||||
dao.purgeAllData();
|
||||
}
|
||||
|
||||
public void printTimedOutMessage(int deadPurgeJobAge) {
|
||||
// only print message every 5 minutes
|
||||
if (System.currentTimeMillis() - lastTimeOutMessage > 300000) {
|
||||
PurgeLogger.logFatal(
|
||||
"Purger running time has exceeded timeout duration of "
|
||||
+ deadPurgeJobAge
|
||||
+ " minutes. Current running time: "
|
||||
+ (getAge() / 60000) + " minutes", pluginName);
|
||||
printStackTrace();
|
||||
}
|
||||
}
|
||||
PurgeLogger.logInfo("Data successfully Purged!",
|
||||
pluginName);
|
||||
|
||||
/**
|
||||
* Prints the stack trace for this job thread.
|
||||
*/
|
||||
public void printStackTrace() {
|
||||
StringBuffer buffer = new StringBuffer();
|
||||
buffer.append("Stack trace for Purge Job Thread:\n");
|
||||
buffer.append(getStackTrace(this));
|
||||
// If this thread is blocked, output the stack traces for the other
|
||||
// blocked threads to assist in determining the source of the
|
||||
// deadlocked
|
||||
// threads
|
||||
if (this.getState().equals(State.BLOCKED)) {
|
||||
buffer.append("\tDUMPING OTHER BLOCKED THREADS\n");
|
||||
buffer.append(getBlockedStackTraces());
|
||||
EDEXUtil.getMessageProducer().sendAsyncUri(
|
||||
PLUGIN_PURGED_TOPIC, pluginName);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
failed = true;
|
||||
// keep getting next exceptions with sql exceptions to ensure
|
||||
// we can see the underlying error
|
||||
PurgeLogger
|
||||
.logError("Error purging expired data!\n", pluginName, e);
|
||||
Throwable t = e.getCause();
|
||||
while (t != null) {
|
||||
if (t instanceof SQLException) {
|
||||
SQLException se = ((SQLException) t).getNextException();
|
||||
PurgeLogger.logError("Next exception:", pluginName, se);
|
||||
}
|
||||
t = t.getCause();
|
||||
}
|
||||
} finally {
|
||||
ClusterTask purgeLock = PurgeManager.getInstance().getPurgeLock();
|
||||
try {
|
||||
/*
|
||||
* Update the status accordingly if the purge failed or
|
||||
* succeeded
|
||||
*/
|
||||
PurgeDao purgeDao = new PurgeDao();
|
||||
PurgeJobStatus status = purgeDao
|
||||
.getJobForPlugin(this.pluginName);
|
||||
if (status == null) {
|
||||
PurgeLogger.logError(
|
||||
"Purge job completed but no status object found!",
|
||||
this.pluginName);
|
||||
} else {
|
||||
if (failed) {
|
||||
status.incrementFailedCount();
|
||||
if (status.getFailedCount() >= PurgeManager
|
||||
.getInstance().getFatalFailureCount()) {
|
||||
PurgeLogger
|
||||
.logFatal(
|
||||
"Purger for this plugin has reached or exceeded consecutive failure limit of "
|
||||
+ PurgeManager
|
||||
.getInstance()
|
||||
.getFatalFailureCount()
|
||||
+ ". Data will no longer being purged for this plugin.",
|
||||
pluginName);
|
||||
} else {
|
||||
PurgeLogger.logError("Purge job has failed "
|
||||
+ status.getFailedCount()
|
||||
+ " consecutive times.", this.pluginName);
|
||||
// Back the start time off by half an hour to try to
|
||||
// purgin soon, don't want to start immediately so
|
||||
// it doesn't ping pong between servers in a time
|
||||
// out scenario
|
||||
Date startTime = status.getStartTime();
|
||||
startTime.setTime(startTime.getTime() - (1800000));
|
||||
}
|
||||
} else {
|
||||
status.setFailedCount(0);
|
||||
}
|
||||
|
||||
}
|
||||
PurgeLogger.logError(buffer.toString(), this.pluginName);
|
||||
/*
|
||||
* This purger thread has exceeded the time out duration but
|
||||
* finally finished. Output a message and update the status
|
||||
*/
|
||||
int deadPurgeJobAge = PurgeManager.getInstance()
|
||||
.getDeadPurgeJobAge();
|
||||
Calendar purgeTimeOutLimit = Calendar.getInstance();
|
||||
purgeTimeOutLimit.setTimeZone(TimeZone.getTimeZone("GMT"));
|
||||
purgeTimeOutLimit.add(Calendar.MINUTE, -deadPurgeJobAge);
|
||||
if (startTime < purgeTimeOutLimit.getTimeInMillis()) {
|
||||
PurgeLogger
|
||||
.logInfo(
|
||||
"Purge job has recovered from timed out state!!",
|
||||
pluginName);
|
||||
}
|
||||
status.setRunning(false);
|
||||
purgeDao.update(status);
|
||||
/*
|
||||
* Log execution times
|
||||
*/
|
||||
long executionTime = getAge();
|
||||
long execTimeInMinutes = executionTime / 60000;
|
||||
if (execTimeInMinutes > 0) {
|
||||
PurgeLogger.logInfo("Purge run time: " + executionTime
|
||||
+ " ms (" + execTimeInMinutes + " minutes)",
|
||||
this.pluginName);
|
||||
} else {
|
||||
PurgeLogger.logInfo("Purge run time: " + executionTime
|
||||
+ " ms", this.pluginName);
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
PurgeLogger
|
||||
.logError(
|
||||
"An unexpected error occurred upon completion of the purge job",
|
||||
this.pluginName, e);
|
||||
} finally {
|
||||
ClusterLockUtils.unlock(purgeLock, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
public void printTimedOutMessage(int deadPurgeJobAge) {
|
||||
// only print message every 5 minutes
|
||||
if (System.currentTimeMillis() - lastTimeOutMessage > 300000) {
|
||||
PurgeLogger.logFatal(
|
||||
"Purger running time has exceeded timeout duration of "
|
||||
+ deadPurgeJobAge
|
||||
+ " minutes. Current running time: "
|
||||
+ (getAge() / 60000) + " minutes", pluginName);
|
||||
printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the stack traces for all other threads in the BLOCKED state in the
|
||||
* JVM
|
||||
*
|
||||
* @return The stack traces for all other threads in the BLOCKED state in
|
||||
* the JVM
|
||||
*/
|
||||
private String getBlockedStackTraces() {
|
||||
StringBuffer buffer = new StringBuffer();
|
||||
Map<Thread, StackTraceElement[]> threads = Thread.getAllStackTraces();
|
||||
for (Thread t : threads.keySet()) {
|
||||
if (t.getState().equals(State.BLOCKED)) {
|
||||
if (t.getId() != this.getId()) {
|
||||
buffer.append(getStackTrace(t));
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Prints the stack trace for this job thread.
|
||||
*/
|
||||
public void printStackTrace() {
|
||||
StringBuffer buffer = new StringBuffer();
|
||||
buffer.append("Stack trace for Purge Job Thread:\n");
|
||||
buffer.append(getStackTrace(this));
|
||||
// If this thread is blocked, output the stack traces for the other
|
||||
// blocked threads to assist in determining the source of the
|
||||
// deadlocked
|
||||
// threads
|
||||
if (this.getState().equals(State.BLOCKED)) {
|
||||
buffer.append("\tDUMPING OTHER BLOCKED THREADS\n");
|
||||
buffer.append(getBlockedStackTraces());
|
||||
|
||||
return buffer.toString();
|
||||
}
|
||||
}
|
||||
PurgeLogger.logError(buffer.toString(), this.pluginName);
|
||||
|
||||
/**
|
||||
* Gets the stack trace for the given thread
|
||||
*
|
||||
* @param thread
|
||||
* The thread to get the stack trace for
|
||||
* @return The stack trace as a String
|
||||
*/
|
||||
private String getStackTrace(Thread thread) {
|
||||
StringBuffer buffer = new StringBuffer();
|
||||
StackTraceElement[] stack = Thread.getAllStackTraces().get(thread);
|
||||
buffer.append("\tThread ID: ").append(thread.getId())
|
||||
.append(" Thread state: ").append(this.getState())
|
||||
.append("\n");
|
||||
if (stack == null) {
|
||||
buffer.append("No stack trace could be retrieved for this thread");
|
||||
} else {
|
||||
for (int i = 0; i < stack.length; i++) {
|
||||
buffer.append("\t\t").append(stack[i]).append("\n");
|
||||
}
|
||||
}
|
||||
return buffer.toString();
|
||||
}
|
||||
}
|
||||
|
||||
public long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
/**
|
||||
* Gets the stack traces for all other threads in the BLOCKED state in the
|
||||
* JVM
|
||||
*
|
||||
* @return The stack traces for all other threads in the BLOCKED state in
|
||||
* the JVM
|
||||
*/
|
||||
private String getBlockedStackTraces() {
|
||||
StringBuffer buffer = new StringBuffer();
|
||||
Map<Thread, StackTraceElement[]> threads = Thread.getAllStackTraces();
|
||||
for (Thread t : threads.keySet()) {
|
||||
if (t.getState().equals(State.BLOCKED)) {
|
||||
if (t.getId() != this.getId()) {
|
||||
buffer.append(getStackTrace(t));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public long getAge() {
|
||||
return System.currentTimeMillis() - startTime;
|
||||
}
|
||||
return buffer.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the stack trace for the given thread
|
||||
*
|
||||
* @param thread
|
||||
* The thread to get the stack trace for
|
||||
* @return The stack trace as a String
|
||||
*/
|
||||
private String getStackTrace(Thread thread) {
|
||||
StringBuffer buffer = new StringBuffer();
|
||||
StackTraceElement[] stack = Thread.getAllStackTraces().get(thread);
|
||||
buffer.append("\tThread ID: ").append(thread.getId())
|
||||
.append(" Thread state: ").append(this.getState())
|
||||
.append("\n");
|
||||
if (stack == null) {
|
||||
buffer.append("No stack trace could be retrieved for this thread");
|
||||
} else {
|
||||
for (StackTraceElement element : stack) {
|
||||
buffer.append("\t\t").append(element).append("\n");
|
||||
}
|
||||
}
|
||||
return buffer.toString();
|
||||
}
|
||||
|
||||
public long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
public long getAge() {
|
||||
return System.currentTimeMillis() - startTime;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ import com.raytheon.uf.edex.database.DataAccessLayerException;
|
|||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* May 1, 2012 14715 rferrel Initial creation
|
||||
*
|
||||
* May 08, 2013 1814 rjpeter Added time to live to topic
|
||||
* </pre>
|
||||
*
|
||||
* @author rferrel
|
||||
|
@ -92,8 +92,7 @@ public class TafQueueRequestHandler implements IRequestHandler<TafQueueRequest>
|
|||
case GET_TAFS:
|
||||
response = new ServerResponse<String>();
|
||||
idList = (List<String>) request.getArgument();
|
||||
List<TafQueueRecord> records = (List<TafQueueRecord>) dao
|
||||
.getRecordsById(idList);
|
||||
List<TafQueueRecord> records = dao.getRecordsById(idList);
|
||||
makeTafs(records, response);
|
||||
break;
|
||||
case REMOVE_SELECTED:
|
||||
|
@ -111,7 +110,7 @@ public class TafQueueRequestHandler implements IRequestHandler<TafQueueRequest>
|
|||
+ " forecast(s) removed.");
|
||||
}
|
||||
makeList(state, dao, response);
|
||||
if (state == TafQueueState.PENDING && numRemoved > 0) {
|
||||
if ((state == TafQueueState.PENDING) && (numRemoved > 0)) {
|
||||
sendNotification(Type.REMOVE_SELECTED);
|
||||
}
|
||||
break;
|
||||
|
@ -193,6 +192,6 @@ public class TafQueueRequestHandler implements IRequestHandler<TafQueueRequest>
|
|||
throws SerializationException, EdexException {
|
||||
byte[] message = SerializationUtil.transformToThrift(type.toString());
|
||||
EDEXUtil.getMessageProducer().sendAsyncUri(
|
||||
"jms-generic:topic:tafQueueChanged", message);
|
||||
"jms-generic:topic:tafQueueChanged?timeToLive=60000", message);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
<bean id="userAuthenticationDataChangedHandler"
|
||||
class="com.raytheon.uf.edex.useradmin.services.UserAuthenticationDataChangedHandler">
|
||||
<constructor-arg type="java.lang.String"
|
||||
value="jms-generic:topic:user.authentication.changed?destinationResolver=#qpidDurableResolver" />
|
||||
value="jms-generic:topic:user.authentication.changed?timeToLive=60000&destinationResolver=#qpidDurableResolver" />
|
||||
</bean>
|
||||
|
||||
<bean factory-bean="handlerRegistry" factory-method="register">
|
||||
|
|
|
@ -80,7 +80,8 @@ cp -r ${LOCALIZATION_PATH}/cave_static/site/${CAPS_SITE}/gfe $caveDest/site
|
|||
log_msg 70
|
||||
|
||||
log_msg Copying cave site maps configuration for site ${CAPS_SITE} to temporary directory...
|
||||
cp -r ${LOCALIZATION_PATH}/cave_static/site/${CAPS_SITE}/bundles/maps $caveDest/site
|
||||
mkdir $caveDest/site/bundles
|
||||
cp -r ${LOCALIZATION_PATH}/cave_static/site/${CAPS_SITE}/bundles/maps $caveDest/site/bundles
|
||||
log_msg 75
|
||||
|
||||
log_msg Copying cave site colormaps configuration for site ${CAPS_SITE} to temporary directory...
|
||||
|
|
|
@ -10,6 +10,7 @@ package gov.noaa.nws.ncep.ui.nsharp.display.rsc;
|
|||
* Date Ticket# Engineer Description
|
||||
* ------- ------- -------- -----------
|
||||
* 04/23/2012 229 Chin Chen Initial coding
|
||||
* May 08, 2013 1847 bsteffen Allow painting with no Wind Data.
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -798,6 +799,9 @@ public class NsharpSkewTPaneResource extends NsharpAbstractPaneResource{
|
|||
//System.out.println( "layer#"+ i+ " RE_MAX_WIND =" + spd );
|
||||
}
|
||||
}
|
||||
if (layerStateList.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
//#2: apply minimum distance rule, i.e no two wind layer closer than the minimum distance, also make sure
|
||||
// relative max wind layer is picked.
|
||||
lastHeight = -9999;
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
- Date Ticket# Engineer Description
|
||||
- ============ ========== =========== ==========================
|
||||
- Mar 18, 2013 1814 rjpeter Initial Creation
|
||||
- May 08, 2013 1814 rjpeter Remove slow consumer disconnect
|
||||
-
|
||||
-->
|
||||
<virtualhosts>
|
||||
|
@ -39,11 +40,6 @@
|
|||
<class>org.apache.qpid.server.store.derby.DerbyMessageStore</class>
|
||||
<environment-path>${QPID_WORK}/messageStore</environment-path>
|
||||
</store>
|
||||
<slow-consumer-detection>
|
||||
<!-- Only check every 5 minutes -->
|
||||
<delay>5</delay>
|
||||
<timeunit>minutes</timeunit>
|
||||
</slow-consumer-detection>
|
||||
<queues>
|
||||
<!-- Define default exchange -->
|
||||
<exchange>amq.direct</exchange>
|
||||
|
@ -63,6 +59,8 @@
|
|||
<durable>true</durable>
|
||||
|
||||
<!-- Configure queues
|
||||
Queues created on demand for AWIPS II
|
||||
|
||||
<queue>
|
||||
<name>external.dropbox</name>
|
||||
<external..dropbox>
|
||||
|
@ -71,31 +69,6 @@
|
|||
</queue>
|
||||
-->
|
||||
</queues>
|
||||
<topics>
|
||||
<slow-consumer-detection>
|
||||
<!-- The maximum depth in bytes before -->
|
||||
<!-- the policy will be applied-->
|
||||
<depth>104857600</depth>
|
||||
|
||||
<!-- The maximum message age in milliseconds -->
|
||||
<!-- before the policy will be applied -->
|
||||
<messageAge>600000</messageAge>
|
||||
|
||||
<!-- The maximum number of message before -->
|
||||
<!-- which the policy will be applied-->
|
||||
<messageCount>5000</messageCount>
|
||||
|
||||
<!-- Policy Selection -->
|
||||
<policy>
|
||||
<name>topicDelete</name>
|
||||
<topicDelete>
|
||||
<!-- Uncomment to enable deletion of durable subscriptions that fall behind -->
|
||||
<!--delete-persistent/-->
|
||||
</topicDelete>
|
||||
</policy>
|
||||
</slow-consumer-detection>
|
||||
<!-- Slow Consumer disconnect could be configured per topic. Use global configuration for now -->
|
||||
</topics>
|
||||
</edex>
|
||||
</virtualhost>
|
||||
</virtualhosts>
|
||||
|
|
|
@ -3,7 +3,7 @@ diff -crB a/qpid-java.spec b/qpid-java.spec
|
|||
--- b/qpid-java.spec 2013-04-24 13:31:29.000000000 -0500
|
||||
***************
|
||||
*** 1,6 ****
|
||||
Name: qpid-java
|
||||
! Name: qpid-java
|
||||
Version: 0.18
|
||||
! Release: 2%{?dist}
|
||||
Summary: Java implementation of Apache Qpid
|
||||
|
@ -12,9 +12,9 @@ diff -crB a/qpid-java.spec b/qpid-java.spec
|
|||
--- 1,8 ----
|
||||
+ %define _awips2_directory "/awips2/qpid"
|
||||
+
|
||||
Name: qpid-java
|
||||
! Name: awips2-qpid-java
|
||||
Version: 0.18
|
||||
! Release: 5%{?dist}
|
||||
! Release: 1%{?dist}
|
||||
Summary: Java implementation of Apache Qpid
|
||||
License: Apache Software License
|
||||
Group: Development/Java
|
||||
|
|
Loading…
Add table
Reference in a new issue