Merge branch 'master_14.1.1' (-21) into 'master_14.2.1' (-8)

merge performed by Richard Peter

Conflicts:
	cave/build/static/common/cave/etc/gfe/userPython/procedures/MergeHazards.py
	cave/build/static/common/cave/etc/gfe/userPython/utilities/SmartScript.py
	edexOsgi/build.edex/esb/conf/log4j-ingest.xml
	edexOsgi/build.edex/esb/conf/log4j-registry.xml
	edexOsgi/build.edex/esb/conf/log4j.xml
	edexOsgi/com.raytheon.edex.plugin.airep/res/spring/airep-ingest.xml
	edexOsgi/com.raytheon.edex.plugin.gfe/res/spring/gfe-common.xml
	edexOsgi/com.raytheon.edex.plugin.gfe/res/spring/gfe-request.xml
	edexOsgi/com.raytheon.edex.plugin.gfe/res/spring/gfe-spring.xml
	edexOsgi/com.raytheon.edex.plugin.grib/res/spring.deprecated/grib-decode.xml
	edexOsgi/com.raytheon.edex.plugin.grib/res/spring.future/grib-decode.xml
	edexOsgi/com.raytheon.edex.plugin.grib/res/spring/grib-distribution.xml
	edexOsgi/com.raytheon.edex.plugin.modelsounding/res/spring/modelsounding-ingest.xml
	edexOsgi/com.raytheon.edex.plugin.pirep/res/spring/pirep-ingest.xml
	edexOsgi/com.raytheon.edex.plugin.shef/src/com/raytheon/edex/plugin/shef/alarms/AlertalarmStdTextProductUtil.java
	edexOsgi/com.raytheon.edex.plugin.shef/src/com/raytheon/edex/plugin/shef/alarms/Constants.java
	edexOsgi/com.raytheon.edex.plugin.taf/src/com/raytheon/edex/plugin/taf/common/ChangeGroup.java
	edexOsgi/com.raytheon.edex.plugin.taf/src/com/raytheon/edex/plugin/taf/common/TafRecord.java
	edexOsgi/com.raytheon.edex.plugin.warning/res/spring/warning-ingest.xml
	edexOsgi/com.raytheon.uf.common.jms/src/com/raytheon/uf/common/jms/JmsPooledConnectionFactory.java
	edexOsgi/com.raytheon.uf.common.jms/src/com/raytheon/uf/common/jms/JmsPooledSession.java
	edexOsgi/com.raytheon.uf.edex.cpgsrv/res/spring/cpgsrv-spring.xml
	edexOsgi/com.raytheon.uf.edex.grid.staticdata/res/spring/grid-staticdata-process.xml
	edexOsgi/com.raytheon.uf.edex.ohd/res/spring/ohd-common.xml
	edexOsgi/com.raytheon.uf.edex.ohd/res/spring/satpre-spring.xml
	ncep/gov.noaa.nws.ncep.common.dataplugin.nctaf/src/gov/noaa/nws/ncep/common/dataplugin/nctaf/NcTafRecord.java
	ncep/gov.noaa.nws.ncep.common.dataplugin.ncuair/src/gov/noaa/nws/ncep/common/dataplugin/ncuair/NcUairRecord.java
	ncep/gov.noaa.nws.ncep.edex.plugin.ncairep/res/spring/ncairep-ingest.xml
	ncep/gov.noaa.nws.ncep.edex.plugin.ncccfp/res/spring/ncccfp-ingest.xml
	ncep/gov.noaa.nws.ncep.edex.plugin.ncgrib/res/spring/ncgrib-distribution.xml
	ncep/gov.noaa.nws.ncep.edex.plugin.ncgrib/res/spring/ncgrib-ingest.xml
	ncep/gov.noaa.nws.ncep.edex.plugin.ncpirep/res/spring/ncpirep-ingest.xml
	ncep/gov.noaa.nws.ncep.edex.plugin.ncscat/res/spring/ncscat-ingest.xml
	ncep/gov.noaa.nws.ncep.edex.plugin.ntrans/res/spring/ntrans-ingest.xml
	rpms/awips2.core/Installer.ant/scripts/profile.d/awips2Ant.csh

Former-commit-id: 889200c9eb3fbb835f8fef067476799f30a7a7e0
This commit is contained in:
Steve Harris 2014-03-06 08:33:22 -06:00
commit ffcc34f8d4
153 changed files with 2205 additions and 1721 deletions

View file

@ -268,7 +268,7 @@
<sequential>
<property name="___memorySettingsVersion___"
value="3.0"/>
value="4.0"/>
<if>
<not>
<equals
@ -392,6 +392,37 @@
append="true" />
</actions>
</call>
<!-- site type specific overrides (wfo, ncep, etc) -->
<call path="//${cave.arch}/site-type-override">
<param name="site-type"
path="site-type/text()" />
<param name="max-memory"
path="ini-substitutions/max-memory/value/text()" />
<param name="max-perm"
path="ini-substitutions/max-perm/value/text()" />
<actions>
<!-- Create a site-type-specific ini file -->
<copy verbose="true"
file="/tmp/cave/cave.ini"
tofile="/tmp/cave/@{site-type}.ini"
overwrite="true" />
<!-- Update the ini file -->
<update.ini
ini.file="@{site-type}.ini"
jvm.arg="${cave-memory-settings.default-memory-setting.default-max-memory.jvm-arg}"
current.value="${cave-memory-settings.default-memory-setting.default-max-memory.value}"
new.value="@{max-memory}" />
<update.ini
ini.file="@{site-type}.ini"
jvm.arg="${cave-memory-settings.default-memory-setting.default-max-perm.jvm-arg}"
current.value="${cave-memory-settings.default-memory-setting.default-max-perm.value}"
new.value="@{max-perm}" />
</actions>
</call>
</xmltask>
<!-- Finish the iniLookup.sh script -->

View file

@ -2,7 +2,7 @@
<cave-memory-settings>
<!-- The version should be incremented whenever
the file layout changes. -->
<file-version>3.0</file-version>
<file-version>4.0</file-version>
<!-- Example Entry ... See Below. -->
<!--
@ -141,6 +141,7 @@
</max-perm>
</ini-substitutions>
</memory-setting>
</arch.x86>
<arch.x86_64>
@ -250,6 +251,61 @@
</max-perm>
</ini-substitutions>
</memory-setting>
<!-- memory default overrides for a particular site type (wfo, ncep, etc) -->
<!-- site-type names need to be all lower case -->
<site-type-override>
<site-type>wfo</site-type>
<ini-substitutions>
<max-memory>
<value>3072M</value>
</max-memory>
<max-perm>
<value>DEFAULT</value>
</max-perm>
</ini-substitutions>
</site-type-override>
<site-type-override>
<site-type>rfc</site-type>
<ini-substitutions>
<max-memory>
<value>2048M</value>
</max-memory>
<max-perm>
<value>DEFAULT</value>
</max-perm>
</ini-substitutions>
</site-type-override>
<site-type-override>
<site-type>ncep</site-type>
<ini-substitutions>
<max-memory>
<value>2048M</value>
</max-memory>
<max-perm>
<value>DEFAULT</value>
</max-perm>
</ini-substitutions>
</site-type-override>
<site-type-override>
<site-type>cwsu</site-type>
<ini-substitutions>
<max-memory>
<value>3072M</value>
</max-memory>
<max-perm>
<value>DEFAULT</value>
</max-perm>
</ini-substitutions>
</site-type-override>
</arch.x86_64>
</cave-memory-settings>

View file

@ -29,6 +29,7 @@
# Jan 30, 2014 #2593 bclement extracted generic part of getPidsOfMyRunningCaves into forEachRunningCave
# added methods for reading max memory from .ini files
# fixes for INI files with spaces
# Feb 20, 2014 #2780 bclement added site type ini file check
#
#
@ -68,7 +69,13 @@ function lookupINI()
if [ ${RC} -eq 0 ]; then
export CAVE_INI_ARG="--launcher.ini /awips2/cave/${ASSOCIATED_INI}"
else
export CAVE_INI_ARG="--launcher.ini /awips2/cave/cave.ini"
siteTypeIni="/awips2/cave/${SITE_TYPE}.ini"
if [[ -e ${siteTypeIni} ]]
then
export CAVE_INI_ARG="--launcher.ini ${siteTypeIni}"
else
export CAVE_INI_ARG="--launcher.ini /awips2/cave/cave.ini"
fi
fi
return 0
fi
@ -294,7 +301,7 @@ function logExitStatus()
hostPath="$basePath/$hostName/"
mkdir -p $hostPath
if [ -d "$hostPath" ]; then
cp $coreFile $hostPath
mv $coreFile $hostPath
fi
fi
}

View file

@ -13,7 +13,8 @@ Require-Bundle: com.raytheon.uf.viz.core,
com.raytheon.viz.core;bundle-version="1.12.1174",
com.raytheon.viz.ui;bundle-version="1.12.1174",
com.raytheon.viz.alerts;bundle-version="1.12.1174",
com.raytheon.uf.viz.thinclient;bundle-version="1.0.0"
com.raytheon.uf.viz.thinclient;bundle-version="1.0.0",
com.raytheon.viz.grid;bundle-version="1.12.1174"
Import-Package: com.raytheon.uf.common.comm,
com.raytheon.uf.common.datastorage,
com.raytheon.uf.viz.core.maps.rsc,

View file

@ -19,12 +19,17 @@
**/
package com.raytheon.uf.viz.thinclient.cave.refresh;
import java.util.ArrayList;
import java.util.Collection;
import org.eclipse.jface.preference.IPreferenceStore;
import com.raytheon.uf.viz.core.alerts.AlertMessage;
import com.raytheon.uf.viz.thinclient.Activator;
import com.raytheon.uf.viz.thinclient.preferences.ThinClientPreferenceConstants;
import com.raytheon.uf.viz.thinclient.refresh.TimedRefresher.RefreshTimerTask;
import com.raytheon.viz.alerts.jobs.AutoUpdater;
import com.raytheon.viz.alerts.observers.ProductAlertObserver;
/**
* Timer task responsible for refreshing IEditorParts that implement
@ -38,6 +43,7 @@ import com.raytheon.viz.alerts.jobs.AutoUpdater;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Nov 10, 2011 mschenke Initial creation
* Feb 21, 2014 DR 16744 D. Friedman Update all alert observers
*
* </pre>
*
@ -56,8 +62,17 @@ public class DataRefreshTask implements RefreshTimerTask {
public void run() {
IPreferenceStore store = Activator.getDefault().getPreferenceStore();
if (store.getBoolean(ThinClientPreferenceConstants.P_DISABLE_JMS)) {
new AutoUpdater().alertArrived(ThinClientDataUpdateTree
.getInstance().updateAllData());
Collection<AlertMessage> alerts = ThinClientDataUpdateTree
.getInstance().updateAllData();
// Make sure it gets to GridUpdater
ArrayList<String> s = new ArrayList<String>(alerts.size());
for (AlertMessage am : alerts) {
s.add(am.dataURI);
}
ProductAlertObserver.processDataURIAlerts(s);
new AutoUpdater().alertArrived(alerts);
}
}

View file

@ -29,8 +29,10 @@ import java.util.Set;
import java.util.TimeZone;
import com.raytheon.uf.common.dataplugin.PluginDataObject;
import com.raytheon.uf.common.dataquery.requests.DbQueryRequest;
import com.raytheon.uf.common.dataquery.requests.RequestConstraint;
import com.raytheon.uf.common.dataquery.requests.RequestConstraint.ConstraintType;
import com.raytheon.uf.common.dataquery.responses.DbQueryResponse;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.UFStatus;
import com.raytheon.uf.common.status.UFStatus.Priority;
@ -44,6 +46,8 @@ import com.raytheon.uf.viz.core.requests.ThriftClient;
import com.raytheon.uf.viz.core.rsc.AbstractRequestableResourceData;
import com.raytheon.uf.viz.core.rsc.AbstractResourceData;
import com.raytheon.uf.viz.core.rsc.updater.DataUpdateTree;
import com.raytheon.viz.grid.inv.RadarUpdater;
import com.raytheon.viz.grid.util.RadarAdapter;
/**
* TODO Add Description
@ -55,6 +59,7 @@ import com.raytheon.uf.viz.core.rsc.updater.DataUpdateTree;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Dec 13, 2011 bsteffen Initial creation
* Feb 21, 2014 DR 16744 D. Friedman Add radar/grid updates
*
* </pre>
*
@ -63,7 +68,7 @@ import com.raytheon.uf.viz.core.rsc.updater.DataUpdateTree;
*/
public class ThinClientDataUpdateTree extends DataUpdateTree {
private IUFStatusHandler statusHandler = UFStatus
private final IUFStatusHandler statusHandler = UFStatus
.getHandler(ThinClientDataUpdateTree.class);
private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat(
@ -95,8 +100,9 @@ public class ThinClientDataUpdateTree extends DataUpdateTree {
for (DataPair pair : getDataPairs()) {
AbstractResourceData resourceData = pair.data.getResourceData();
if (!(resourceData instanceof AbstractRequestableResourceData)
|| resourceData.isFrozen())
|| resourceData.isFrozen()) {
continue;
}
Map<String, RequestConstraint> metadata = pair.metadata;
metadata = new HashMap<String, RequestConstraint>(metadata);
metadata.put("insertTime", new RequestConstraint(time,
@ -115,9 +121,70 @@ public class ThinClientDataUpdateTree extends DataUpdateTree {
e);
}
}
getRadarUpdates(time, messages);
getGridUpdates(time, messages);
return messages;
}
/**
* Get radar update messages. This is needed to update the
* radar-as-gridded-data inventory.
*/
private void getRadarUpdates(String time, Set<AlertMessage> messages) {
Set<AlertMessage> radarMessages = new HashSet<AlertMessage>();
Map<String, RequestConstraint> metadata = RadarAdapter.getInstance()
.getUpdateConstraints();
metadata = new HashMap<String, RequestConstraint>(metadata);
metadata.put("insertTime", new RequestConstraint(time,
ConstraintType.GREATER_THAN));
try {
PluginDataObject[] pdos = DataCubeContainer.getData(metadata);
for (PluginDataObject pdo : pdos) {
AlertMessage am = new AlertMessage();
am.dataURI = pdo.getDataURI();
am.decodedAlert = RecordFactory.getInstance().loadMapFromUri(
am.dataURI);
radarMessages.add(am);
}
messages.addAll(radarMessages);
for (String dataURI : RadarUpdater.getInstance()
.convertRadarAlertsToGridDatauris(radarMessages)) {
AlertMessage am = new AlertMessage();
am.dataURI = dataURI;
am.decodedAlert = RecordFactory.getInstance().loadMapFromUri(
am.dataURI);
messages.add(am);
}
} catch (VizException e) {
statusHandler.handle(Priority.PROBLEM, e.getLocalizedMessage(), e);
}
}
/** Get gridded data update messages. */
private void getGridUpdates(String time, Set<AlertMessage> messages) {
Map<String, RequestConstraint> newQuery = new HashMap<String, RequestConstraint>();
DbQueryRequest dbRequest = new DbQueryRequest();
newQuery.put("pluginName", new RequestConstraint("grid"));
newQuery.put("insertTime", new RequestConstraint(time,
ConstraintType.GREATER_THAN));
dbRequest.setConstraints(newQuery);
dbRequest.addRequestField("dataURI");
DbQueryResponse response = null;
try {
response = (DbQueryResponse) ThriftClient.sendRequest(dbRequest);
for (String dataURI : response.getFieldObjects("dataURI",
String.class)) {
AlertMessage am = new AlertMessage();
am.dataURI = dataURI;
am.decodedAlert = RecordFactory.getInstance().loadMapFromUri(
am.dataURI);
messages.add(am);
}
} catch (VizException e) {
statusHandler.handle(Priority.PROBLEM, e.getLocalizedMessage(), e);
}
}
/**
* Get the estimated difference between the clock on the server and the
* local clock. The offset returned from this method will always be slightly

View file

@ -73,7 +73,8 @@ import com.raytheon.viz.aviation.resource.ResourceConfigMgr.ResourceTag;
* text height and width.
* 12/9/2010 7380 rferrel Adjust text size to be more like AWIPS I.
* 1/17/2011 7782 rferrel Added qcSkipCheck to mimic A1.
* 3/18/2011 7888 rferrel Added getLargeTF method.
* 3/18/2011 7888 rferrel Added getLargeTF method.
* 02/19/2014 16980 zhao added getter and setter for the Alt flag
*
* </pre>
*
@ -1368,4 +1369,12 @@ public class EditorTafTabComp extends Composite {
rtdRdo.setEnabled(editable);
corRdo.setEnabled(editable);
}
public boolean getAlt() {
return alt;
}
public void setAlt(boolean b) {
alt = b;
}
}

View file

@ -80,8 +80,10 @@ import org.eclipse.swt.widgets.Combo;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Control;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.Event;
import org.eclipse.swt.widgets.FileDialog;
import org.eclipse.swt.widgets.Label;
import org.eclipse.swt.widgets.Listener;
import org.eclipse.swt.widgets.Menu;
import org.eclipse.swt.widgets.MenuItem;
import org.eclipse.swt.widgets.MessageBox;
@ -227,6 +229,8 @@ import com.raytheon.viz.ui.dialogs.ICloseCallback;
* 08/09/2013 2033 mschenke Switched File.separator to IPathManager.SEPARATOR
* 09/04/2013 2322 lvenable Added CAVE style so this dialog is perspective independent
* 10/24/2013 16478 zhao add syntax check for extra '=' sign
* 02/12/2014 17076 lvenable Mark guidance tabs as not current so they get refreshed
* 02/19/2014 16980 zhao add code to ensure the Alt flag is false after the Alt kay is released
*
* </pre>
*
@ -674,6 +678,10 @@ public class TafViewerEditorDlg extends CaveSWTDialog implements ITafSettable,
populateTafViewer();
// Update the metar and mos guidance in the viewer tab.
updateViewerTab(stationName);
// Mark the tabs as not current so they get updated.
markTabsAsNotCurrent();
break;
case OPEN_RTN:
@ -769,7 +777,19 @@ public class TafViewerEditorDlg extends CaveSWTDialog implements ITafSettable,
}
}
@Override
/**
* Mark the tabs as not current so they get refreshed.
*/
private void markTabsAsNotCurrent() {
for (TabItem tbi : guidanceViewerFolder.getItems()) {
if (tbi.getControl() instanceof ViewerTab) {
((ViewerTab) tbi.getControl()).setDisplayCurrent(false);
}
}
}
@Override
public void clearAll() {
if (shell == null) {
return;
@ -1073,6 +1093,11 @@ public class TafViewerEditorDlg extends CaveSWTDialog implements ITafSettable,
// Create the File menu item with a File "dropdown" menu
Menu fileMenu = new Menu(menuBar);
fileMenuItem.setMenu(fileMenu);
fileMenu.addListener(SWT.Show, new Listener() {
public void handleEvent(Event event) {
setAltFlagForEditorTafTabComp();
}
});
// -------------------------------------------------
// Create all the items in the File dropdown menu
@ -1179,6 +1204,11 @@ public class TafViewerEditorDlg extends CaveSWTDialog implements ITafSettable,
// Create the Options menu item with a Options "dropdown" menu
Menu optionsMenu = new Menu(menuBar);
optionsMenuItem.setMenu(optionsMenu);
optionsMenu.addListener(SWT.Show, new Listener() {
public void handleEvent(Event event) {
setAltFlagForEditorTafTabComp();
}
});
// ----------------------------------------------------
// Create all the items in the Options dropdown menu
@ -1253,7 +1283,12 @@ public class TafViewerEditorDlg extends CaveSWTDialog implements ITafSettable,
// Create the File menu item with a File "dropdown" menu
Menu editMenu = new Menu(menuBar);
editMenuItem.setMenu(editMenu);
editMenu.addListener(SWT.Show, new Listener() {
public void handleEvent(Event event) {
setAltFlagForEditorTafTabComp();
}
});
// -------------------------------------------------
// Create all the items in the Edit dropdown menu
// -------------------------------------------------
@ -1327,6 +1362,19 @@ public class TafViewerEditorDlg extends CaveSWTDialog implements ITafSettable,
}
});
}
/**
* When respectively using alt+'f', alt+'e', alt+'o' and alt+'h'
* to open/display menus 'File', 'Edit', 'Options' and 'Help',
* the alt flag of the editorTafTabComp object is set to true;
* it needs to be re-set to false
* (DR16980)
*/
private void setAltFlagForEditorTafTabComp() {
if ( editorTafTabComp.getAlt() ) {
editorTafTabComp.setAlt(false);
}
}
/**
* Create the Help menu.
@ -1344,6 +1392,11 @@ public class TafViewerEditorDlg extends CaveSWTDialog implements ITafSettable,
// Create the File menu item with a File "dropdown" menu
Menu helpMenu = new Menu(menuBar);
helpMenuItem.setMenu(helpMenu);
helpMenu.addListener(SWT.Show, new Listener() {
public void handleEvent(Event event) {
setAltFlagForEditorTafTabComp();
}
});
// -------------------------------------------------
// Create all the items in the Help dropdown menu

View file

@ -11,6 +11,7 @@
# Apr 03,2012 436 randerso Converted to Python procedure to allow some
# level of site customization
# Apr 09,2012 436 randerso Merged RNK's MakeHazards_Elevation procedure
# Feb 12,2014 17058 ryu Extend converter for Collections$EmptyList objects.
#
# Author: randerso
# ----------------------------------------------------------------------------
@ -307,6 +308,8 @@ def converter(obj):
objtype = obj.jclassname
if objtype == "java.util.Date":
retVal = AbsTime.AbsTime(obj)
elif objtype == "java.util.Collections$EmptyList":
retVal = []
elif objtype == "com.raytheon.uf.common.time.TimeRange":
retVal = TimeRange.TimeRange(obj)
return retVal

View file

@ -1,61 +1,70 @@
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# MergeHazards
#
# Author: lefebvre
#
# This procedure reads all of the temporary hazard grids and selectively
# loads them in the the "Hazards" grid.
# ----------------------------------------------------------------------------
# The MenuItems list defines the GFE menu item(s) under which the
# Procedure is to appear.
# Possible items are: Populate, Edit, Consistency, Verify, Hazards
MenuItems = ["Hazards"]
#import Tkinter
import SmartScript
import string
import HazardUtils
import VTECTable
import LogStream
import numpy
from HazardUtils import MODEL
from HazardUtils import ELEMENT
from HazardUtils import LEVEL
######################### CONFIGURATION SECTION ######################
#
# This dictionary defines which hazards cannot be combined with other
# Hazards. The structure lists each hazard in the VTECTable followed
# by a list of VTEC codes that may not be combined with it at the same
# grid point. For example "DS.W" : ["DU.Y"] means that DS.W may not
# be combined with a DU.Y hazard at the same grid point.
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# MergeHazards
#
# Author: lefebvre
#
# This procedure reads all of the temporary hazard grids and selectively
# loads them in the the "Hazards" grid.
# ----------------------------------------------------------------------------
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# Dec 23, 2013 16893 ryu Check in njensen's change to removeTempHazards()
# to call SmartScript.unloadWEs().
#
########################################################################
# The MenuItems list defines the GFE menu item(s) under which the
# Procedure is to appear.
# Possible items are: Populate, Edit, Consistency, Verify, Hazards
MenuItems = ["Hazards"]
#import Tkinter
import SmartScript
import string
import HazardUtils
import VTECTable
import LogStream
import numpy
from HazardUtils import MODEL
from HazardUtils import ELEMENT
from HazardUtils import LEVEL
######################### CONFIGURATION SECTION ######################
#
# This dictionary defines which hazards cannot be combined with other
# Hazards. The structure lists each hazard in the VTECTable followed
# by a list of VTEC codes that may not be combined with it at the same
# grid point. For example "DS.W" : ["DU.Y"] means that DS.W may not
# be combined with a DU.Y hazard at the same grid point.
HazardsConflictDict = {
"AF.W" : ["AF.Y"],
"AF.Y" : ["AF.W"],
@ -187,302 +196,304 @@ HazardsConflictDict = {
"ZR.Y" : ["BZ.A", "LE.A", "WS.A", "BZ.W", "IS.W", "WS.W", "LE.W",
"WW.Y", "LE.Y"],
}
########################## END OF CONFIGURATION SECTION ########################
class Procedure(SmartScript.SmartScript):
def __init__(self, dbss):
SmartScript.SmartScript.__init__(self, dbss)
self._dbss = dbss
##
# Get the list of loaded temporary hazard parms
# @return: Temporary hazard parm names, i.e., ["hazAFY"]
# @rtype: List of Strings
def getHazardParmNames(self):
parms = self.loadedParms()
hazParms = []
for weName, level, dbID in parms:
if string.find(weName, "haz") == 0:
# TODO: Why is this back/forth xform needed?
key = self._hazUtils._tempWENameToKey(weName)
index = string.find(key, ":")
if index != -1:
mkey = key[0:index]
segNum = key[index+1:]
else:
mkey = key
segNum = ""
# append the hazard and a description
parmName = "haz" + key
parmName = string.replace(parmName, ".", "")
parmName = string.replace(parmName, ":", "")
hazParms.append(parmName)
return hazParms
##
# Unload (delete) all the temporary hazards
def removeTempHazards(self):
parms = self.loadedParms()
for weName, level, dbID in parms:
if string.find(weName, "haz") == 0:
self.unloadWE(MODEL, weName, level)
return
##
# The action performed when the user opts to cancel a merge.
# This was a callback under Tcl/tk; now displayDialog invokes
# it directly.
def cancelCommand(self):
LogStream.logEvent("MergeHazards: cancel")
return
##
# The action performed when the user opts to continue a merge.
# This was a callback under Tcl/tk; now displayDialog invokes
# it directly.
def continueCommand(self):
LogStream.logEvent("MergeHazards: continue")
parm = self.getParm(MODEL, ELEMENT, LEVEL)
parm.setMutable(True)
self.mergeHazardGrids()
return
##
# Displays a dialog box and queries the user to continue to merge or
# abort the merge
########################## END OF CONFIGURATION SECTION ########################
class Procedure(SmartScript.SmartScript):
def __init__(self, dbss):
SmartScript.SmartScript.__init__(self, dbss)
self._dbss = dbss
##
# Get the list of loaded temporary hazard parms
# @return: Temporary hazard parm names, i.e., ["hazAFY"]
# @rtype: List of Strings
def getHazardParmNames(self):
parms = self.loadedParms()
hazParms = []
for weName, level, dbID in parms:
if string.find(weName, "haz") == 0:
# TODO: Why is this back/forth xform needed?
key = self._hazUtils._tempWENameToKey(weName)
index = string.find(key, ":")
if index != -1:
mkey = key[0:index]
segNum = key[index+1:]
else:
mkey = key
segNum = ""
# append the hazard and a description
parmName = "haz" + key
parmName = string.replace(parmName, ".", "")
parmName = string.replace(parmName, ":", "")
hazParms.append(parmName)
return hazParms
##
# Unload (delete) all the temporary hazards
def removeTempHazards(self):
parms = self.loadedParms()
toRemovePairs = []
for weName, level, dbID in parms:
if string.find(weName, "haz") == 0:
toRemovePairs.append((weName, level))
self.unloadWEs(MODEL, toRemovePairs)
return
##
# The action performed when the user opts to cancel a merge.
# This was a callback under Tcl/tk; now displayDialog invokes
# it directly.
def cancelCommand(self):
LogStream.logEvent("MergeHazards: cancel")
return
##
# The action performed when the user opts to continue a merge.
# This was a callback under Tcl/tk; now displayDialog invokes
# it directly.
def continueCommand(self):
LogStream.logEvent("MergeHazards: continue")
parm = self.getParm(MODEL, ELEMENT, LEVEL)
parm.setMutable(True)
self.mergeHazardGrids()
return
##
# Displays a dialog box and queries the user to continue to merge or
# abort the merge
def displayDialog(self, message):
from MessageBox import MessageBox
messageBox = MessageBox(style=MessageBox.ICON_WARNING)
messageBox.setText("MakeHazard")
messageBox.setMessage(message)
messageBox.setButtonLabels(["Continue Merge", "Cancel Merge"])
messageBox.setDefaultIndex(1)
if (messageBox.open() == 0):
self.continueCommand()
else:
self.cancelCommand()
return
##
# Returns the set of hazParms grids that overlap with the specified
# timeRange.
# @param hazParms: Hazard parm names to check
# @type hazParms: Sequence of string
# @param timeRange: The time range to check for overlap with
# @type timeRange: Python TimeRange
# @return: Byte grids and keys of the overlapping parms
# @rtype: 2-tuple: list of byte arrays, list of list of strings
def getOverlappingHazGrids(self, hazParms, timeRange):
byteGridList = []
keyList = []
for hazParm in hazParms:
trList = self._hazUtils._getWEInventory(hazParm)
for tr in trList:
if tr.overlaps(timeRange):
byteGrid, hazKey = self.getGrids(MODEL, hazParm, LEVEL,
tr, mode="First")
if isinstance(hazKey, str):
hazKey = eval(hazKey)
byteGridList.append(byteGrid)
keyList.append(hazKey)
return byteGridList, keyList
##
# Returns the first non-None key it finds in the keyList
# @param keyList: Keys to search
# @type keyList: Sequence of string
# @return: First key that is not "<None>"
# @rtype: string
def getHazardKey(self, keyList):
for k in keyList:
if k != "<None>":
return k
##
# Checks the specified hazard grids to see if they are conflicting
# Each grid is a tuple (byteGrid, key). Uses the configurable
# HazardConflictDict to determine whether two hazards can be combined
# at the same grid point. Returns an empty list if no conflict or
# the list of hazards if they do.
#
# This method should really only be used internally; it assumes that
# there is at most one key other than "<None>", and that it contains
# a single subkey.
#
# @param hazGrid1: The first hazard grid
# @type hazGrid1: 2-tuple: numpy array of int8, list of String
# @param hazGrid2: The second hazard grid
# @type hazGrid2: 2-tuple: numpy array of int8, list of String
# @return: conflicting hazard names or empty list
# @rtype: list
def conflictingHazards(self, hazGrid1, hazGrid2):
byteGrid1, hazKey1 = hazGrid1
byteGrid2, hazKey2 = hazGrid2
key1 = self.getHazardKey(hazKey1)
key2 = self.getHazardKey(hazKey2)
phenSig1 = key1[0:4] # remove the etn
phenSig2 = key2[0:4]
keyConflict = False
if phenSig1 == phenSig2 and key1 != key2:
keyConflict = True
elif HazardsConflictDict.has_key(phenSig1):
if phenSig2 in HazardsConflictDict[phenSig1]:
keyConflict = True
if keyConflict:
# calculate the overlap, adding the grids together will tell us if
# there is any overlap. Any grid points > 1 are overlapped
totalGrid = byteGrid1 + byteGrid2
overlapMask = numpy.greater(totalGrid, 1)
if numpy.any(overlapMask):
return [key1, key2]
return []
##
# See if there are any temporary hazards for the same position and time
# that conflict with one another.
#
# @param hazParms: Temporary hazard parm names to check.
# @type hazParms: sequence of string
# @return: The first conflict, or None if there are no conflicts
# @rtype: 2-tuple(TimeRange, list of string) or NoneType
def checkForHazardConflicts(self, hazParms):
timeList = []
for hazParm in hazParms:
trList = self._hazUtils._getWEInventory(hazParm)
for tr in trList:
if tr.startTime().unixTime() not in timeList:
timeList.append(tr.startTime().unixTime())
if tr.endTime().unixTime() not in timeList:
timeList.append(tr.endTime().unixTime())
timeList.sort() # sort the list
for t in xrange(len(timeList) - 1):
start = timeList[t]
end = timeList[t+1]
timeRange = self._hazUtils._makeTimeRange(start, end)
byteGridList = []
keyList = []
byteGridList, keyList = self.getOverlappingHazGrids(hazParms, timeRange)
# compare each grid to all other grids at this timeRange
for firstIndex in xrange(len(byteGridList) - 1):
for secondIndex in xrange(firstIndex + 1, len(byteGridList)):
grid1 = (byteGridList[firstIndex], keyList[firstIndex])
grid2 = (byteGridList[secondIndex], keyList[secondIndex])
conflictList = self.conflictingHazards(grid1, grid2)
if conflictList != []:
return (timeRange, conflictList)
# if we made it to here, all is well
return None
##
# Perform checks to see if it's OK to merge hazards. If there are no conflicting
# locks or incompatible hazards, do the merge. If there are conflicting locks,
# generate a status bar message and quit. If there incompatible
# hazards, show a warning and let the user decide whether to continue.
def checkForMerge(self):
# get the hazards selected by the forecaster
hazParms = self.getHazardParmNames()
# check for empty list of hazards
if hazParms == []:
self.statusBarMsg("No temporary grids to merge.", "S")
return
# FIXME: Lock race condition
# check for conflicting locks
if self._hazUtils._conflictingLocks(hazParms):
self.statusBarMsg("There are conflicting locks. " +
"Please resolve these before merging any hazards", "S")
return
conflicts = self.checkForHazardConflicts(hazParms)
if conflicts is None:
# if no conflicts, merge the grids
# We made the hazards parm immutable when we separated hazard grids.
# It has to be made mutable to do the merge.
parm = self.getParm(MODEL, ELEMENT, LEVEL)
parm.setMutable(True)
self.mergeHazardGrids()
else:
haz1 = string.replace(conflicts[1][0], ".", "")
haz2 = string.replace(conflicts[1][1], ".", "")
timeRange = str(conflicts[0])
msg = "Hazard conflict detected!\n\n"
msg += "Time: " + timeRange + " \n\n"
msg += "with Hazard grids haz" + haz1 + " and haz" + haz2 + ".\n"
LogStream.logEvent("Merge conflict: "+ msg)
self.displayDialog(msg)
return
##
# Performs the actual merge of the temp hazards grids into the "Hazards" grid.
def mergeHazardGrids(self):
# get the hazards selected by the forecaster
hazParms = self.getHazardParmNames()
self._hazUtils._removeAllHazardsGrids()
for hazParm in hazParms:
trList = self._hazUtils._getWEInventory(hazParm)
for tr in trList:
byteGrid, hazKey = self.getGrids(MODEL, hazParm, LEVEL, tr,
mode="First")
if isinstance(hazKey, str):
hazKey = eval(hazKey)
uniqueKeys = self._hazUtils._getUniqueKeys(byteGrid, hazKey)
for uKey in uniqueKeys:
if uKey == "<None>":
continue
subKeys = self._hazUtils._getSubKeys(uKey)
for subKey in subKeys:
# make the mask - find all areas that contain the subKey
mask = numpy.zeros(byteGrid.shape)
for haz in hazKey:
if string.find(haz, subKey) >= 0:
hazIndex = self.getIndex(haz, hazKey)
mask = numpy.logical_or(numpy.equal(byteGrid, hazIndex), mask)
# make the grid
self._hazUtils._addHazard(ELEMENT, tr, subKey, mask)
LogStream.logEvent("merge: " + \
str(self._hazUtils._printTime(tr.startTime().unixTime())) + " " + \
str(self._hazUtils._printTime(tr.endTime().unixTime())) + " " + \
subKey + "\n")
self.removeTempHazards()
return
##
# The main entry point of the procedure.
def execute(self):
self.setToolType("numeric")
self._hazUtils = HazardUtils.HazardUtils(self._dbss, None)
# see if the Hazards WE is loaded in the GFE, if not abort the tool
if not self._hazUtils._hazardsLoaded():
self.statusBarMsg("Hazards Weather Element must be loaded in " +\
"the GFE before running MergeHazards", "S")
self.cancel()
self.checkForMerge()
return
from MessageBox import MessageBox
messageBox = MessageBox(style=MessageBox.ICON_WARNING)
messageBox.setText("MakeHazard")
messageBox.setMessage(message)
messageBox.setButtonLabels(["Continue Merge", "Cancel Merge"])
messageBox.setDefaultIndex(1)
if (messageBox.open() == 0):
self.continueCommand()
else:
self.cancelCommand()
return
##
# Returns the set of hazParms grids that overlap with the specified
# timeRange.
# @param hazParms: Hazard parm names to check
# @type hazParms: Sequence of string
# @param timeRange: The time range to check for overlap with
# @type timeRange: Python TimeRange
# @return: Byte grids and keys of the overlapping parms
# @rtype: 2-tuple: list of byte arrays, list of list of strings
def getOverlappingHazGrids(self, hazParms, timeRange):
byteGridList = []
keyList = []
for hazParm in hazParms:
trList = self._hazUtils._getWEInventory(hazParm)
for tr in trList:
if tr.overlaps(timeRange):
byteGrid, hazKey = self.getGrids(MODEL, hazParm, LEVEL,
tr, mode="First")
if isinstance(hazKey, str):
hazKey = eval(hazKey)
byteGridList.append(byteGrid)
keyList.append(hazKey)
return byteGridList, keyList
##
# Returns the first non-None key it finds in the keyList
# @param keyList: Keys to search
# @type keyList: Sequence of string
# @return: First key that is not "<None>"
# @rtype: string
def getHazardKey(self, keyList):
for k in keyList:
if k != "<None>":
return k
##
# Checks the specified hazard grids to see if they are conflicting
# Each grid is a tuple (byteGrid, key). Uses the configurable
# HazardConflictDict to determine whether two hazards can be combined
# at the same grid point. Returns an empty list if no conflict or
# the list of hazards if they do.
#
# This method should really only be used internally; it assumes that
# there is at most one key other than "<None>", and that it contains
# a single subkey.
#
# @param hazGrid1: The first hazard grid
# @type hazGrid1: 2-tuple: numpy array of int8, list of String
# @param hazGrid2: The second hazard grid
# @type hazGrid2: 2-tuple: numpy array of int8, list of String
# @return: conflicting hazard names or empty list
# @rtype: list
def conflictingHazards(self, hazGrid1, hazGrid2):
byteGrid1, hazKey1 = hazGrid1
byteGrid2, hazKey2 = hazGrid2
key1 = self.getHazardKey(hazKey1)
key2 = self.getHazardKey(hazKey2)
phenSig1 = key1[0:4] # remove the etn
phenSig2 = key2[0:4]
keyConflict = False
if phenSig1 == phenSig2 and key1 != key2:
keyConflict = True
elif HazardsConflictDict.has_key(phenSig1):
if phenSig2 in HazardsConflictDict[phenSig1]:
keyConflict = True
if keyConflict:
# calculate the overlap, adding the grids together will tell us if
# there is any overlap. Any grid points > 1 are overlapped
totalGrid = byteGrid1 + byteGrid2
overlapMask = numpy.greater(totalGrid, 1)
if numpy.any(overlapMask):
return [key1, key2]
return []
##
# See if there are any temporary hazards for the same position and time
# that conflict with one another.
#
# @param hazParms: Temporary hazard parm names to check.
# @type hazParms: sequence of string
# @return: The first conflict, or None if there are no conflicts
# @rtype: 2-tuple(TimeRange, list of string) or NoneType
def checkForHazardConflicts(self, hazParms):
timeList = []
for hazParm in hazParms:
trList = self._hazUtils._getWEInventory(hazParm)
for tr in trList:
if tr.startTime().unixTime() not in timeList:
timeList.append(tr.startTime().unixTime())
if tr.endTime().unixTime() not in timeList:
timeList.append(tr.endTime().unixTime())
timeList.sort() # sort the list
for t in xrange(len(timeList) - 1):
start = timeList[t]
end = timeList[t+1]
timeRange = self._hazUtils._makeTimeRange(start, end)
byteGridList = []
keyList = []
byteGridList, keyList = self.getOverlappingHazGrids(hazParms, timeRange)
# compare each grid to all other grids at this timeRange
for firstIndex in xrange(len(byteGridList) - 1):
for secondIndex in xrange(firstIndex + 1, len(byteGridList)):
grid1 = (byteGridList[firstIndex], keyList[firstIndex])
grid2 = (byteGridList[secondIndex], keyList[secondIndex])
conflictList = self.conflictingHazards(grid1, grid2)
if conflictList != []:
return (timeRange, conflictList)
# if we made it to here, all is well
return None
##
# Perform checks to see if it's OK to merge hazards. If there are no conflicting
# locks or incompatible hazards, do the merge. If there are conflicting locks,
# generate a status bar message and quit. If there incompatible
# hazards, show a warning and let the user decide whether to continue.
def checkForMerge(self):
# get the hazards selected by the forecaster
hazParms = self.getHazardParmNames()
# check for empty list of hazards
if hazParms == []:
self.statusBarMsg("No temporary grids to merge.", "S")
return
# FIXME: Lock race condition
# check for conflicting locks
if self._hazUtils._conflictingLocks(hazParms):
self.statusBarMsg("There are conflicting locks. " +
"Please resolve these before merging any hazards", "S")
return
conflicts = self.checkForHazardConflicts(hazParms)
if conflicts is None:
# if no conflicts, merge the grids
# We made the hazards parm immutable when we separated hazard grids.
# It has to be made mutable to do the merge.
parm = self.getParm(MODEL, ELEMENT, LEVEL)
parm.setMutable(True)
self.mergeHazardGrids()
else:
haz1 = string.replace(conflicts[1][0], ".", "")
haz2 = string.replace(conflicts[1][1], ".", "")
timeRange = str(conflicts[0])
msg = "Hazard conflict detected!\n\n"
msg += "Time: " + timeRange + " \n\n"
msg += "with Hazard grids haz" + haz1 + " and haz" + haz2 + ".\n"
LogStream.logEvent("Merge conflict: "+ msg)
self.displayDialog(msg)
return
##
# Performs the actual merge of the temp hazards grids into the "Hazards" grid.
def mergeHazardGrids(self):
# get the hazards selected by the forecaster
hazParms = self.getHazardParmNames()
self._hazUtils._removeAllHazardsGrids()
for hazParm in hazParms:
trList = self._hazUtils._getWEInventory(hazParm)
for tr in trList:
byteGrid, hazKey = self.getGrids(MODEL, hazParm, LEVEL, tr,
mode="First")
if isinstance(hazKey, str):
hazKey = eval(hazKey)
uniqueKeys = self._hazUtils._getUniqueKeys(byteGrid, hazKey)
for uKey in uniqueKeys:
if uKey == "<None>":
continue
subKeys = self._hazUtils._getSubKeys(uKey)
for subKey in subKeys:
# make the mask - find all areas that contain the subKey
mask = numpy.zeros(byteGrid.shape)
for haz in hazKey:
if string.find(haz, subKey) >= 0:
hazIndex = self.getIndex(haz, hazKey)
mask = numpy.logical_or(numpy.equal(byteGrid, hazIndex), mask)
# make the grid
self._hazUtils._addHazard(ELEMENT, tr, subKey, mask)
LogStream.logEvent("merge: " + \
str(self._hazUtils._printTime(tr.startTime().unixTime())) + " " + \
str(self._hazUtils._printTime(tr.endTime().unixTime())) + " " + \
subKey + "\n")
self.removeTempHazards()
return
##
# The main entry point of the procedure.
def execute(self):
self.setToolType("numeric")
self._hazUtils = HazardUtils.HazardUtils(self._dbss, None)
# see if the Hazards WE is loaded in the GFE, if not abort the tool
if not self._hazUtils._hazardsLoaded():
self.statusBarMsg("Hazards Weather Element must be loaded in " +\
"the GFE before running MergeHazards", "S")
self.cancel()
self.checkForMerge()
return

View file

@ -1,19 +1,19 @@
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
@ -27,16 +27,25 @@
#
# Author: hansen
# ----------------------------------------------------------------------------
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 02/12/2014 #2591 randerso Added retry when loading combinations fails
import string, getopt, sys, time, os, types, math
import ModuleAccessor
import Utility, logging, traceback
import AbsTime
from java.lang import ThreadDeath
from com.raytheon.uf.common.dataplugin.gfe.reference import ReferenceID, ReferenceData
GridLoc = None
LatLonIds = []
MAX_TRIES = 2
# If someone imports TextFormatter and needs this instance
# they should either be fixed to use the IFPImporter module
# or turn this line on (which is a kludge but should make
@ -44,19 +53,19 @@ LatLonIds = []
#IFPImporter = IFPImporter.IFPImporter
class TextFormatter:
def __init__(self, dataManager):
def __init__(self, dataManager):
# Variable for unique combinations
self.__comboNumber = -1
self.dataMgr = dataManager
self.log = logging.getLogger("FormatterRunner.TextFormatter.TextFormatter")
pass
# def __del__(self):
# for i in LatLonIds:
# self.dataMgr.getRefManager().deleteRefSet(i, False)
def getForecast(self, fcstName, argDict):
" Create the forecast "
" Create the forecast "
ForecastNarrative = argDict["ForecastNarrative"]
ForecastTable = argDict["ForecastTable"]
@ -71,7 +80,7 @@ class TextFormatter:
argDict["getForecast"] = self.getForecast
argDict["getFcstDef"] = self.getFcstDef
argDict["dataMgr"] = self.dataMgr
self.__ut = argDict["utility"]
self.__ut = argDict["utility"]
# Get the Forecast Definition and type from the server
#print "finding", fcstName
@ -79,11 +88,11 @@ class TextFormatter:
#print "found ", found
if found == 0:
text = "Text Product Definition Not Found: " + fcstName + " " + \
traceback.format_exc()
traceback.format_exc()
self.log.error("Text Product Definition Not Found: Caught Exception: " + fcstName, exc_info=True)
raise Exception, text
forecastDef = argDict["forecastDef"]
fcstType = self.__ut.set(forecastDef,"type",None)
fcstType = self.__ut.set(forecastDef, "type", None)
argDict["fcstType"] = fcstType
if fcstType is None:
text = "Text Product Type Not Found: " + fcstName + " " + \
@ -101,16 +110,16 @@ class TextFormatter:
# Must have at least one edit area and time range specified
if fcstType != "smart" and fcstType != "component":
if argDict["editAreas"] == []:
text= "No Edit Areas Specified over which to generate product."
text=text+'\nTry setting "defaultEditAreas" in the product Definition'
text=text+'\nOr, if running from the command line, add a -r flag.'
text = "No Edit Areas Specified over which to generate product."
text = text + '\nTry setting "defaultEditAreas" in the product Definition'
text = text + '\nOr, if running from the command line, add a -r flag.'
text = text + '\n' + string.join(traceback.format_exc())
self.log.error("Caught Exception: " + text)
raise Exception, text
if argDict["rawRanges"] == []:
text= "No Time Ranges Specified over which to generate product."
text=text+'\nTry setting "defaultRanges" in the product Definition'
text=text+'\nOr, if running from the command line, add a -w flag.'
text = "No Time Ranges Specified over which to generate product."
text = text + '\nTry setting "defaultRanges" in the product Definition'
text = text + '\nOr, if running from the command line, add a -w flag.'
text = text + '\n' + string.join(traceback.format_exc())
self.log.error("Caught Exception: " + text)
raise Exception, text
@ -153,7 +162,7 @@ class TextFormatter:
argDict["module"] = module
product.setUp("T", argDict)
product._argDict = argDict
try:
text = product.generateForecast(argDict)
except RuntimeError, e:
@ -166,11 +175,11 @@ class TextFormatter:
# requirement for TEST phrasing for TEST products
if argDict.get('testMode', 0):
testMsg = "\nTHIS IS A TEST MESSAGE. DO NOT TAKE ACTION" +\
testMsg = "\nTHIS IS A TEST MESSAGE. DO NOT TAKE ACTION" + \
" BASED ON THIS TEST\nMESSAGE.\n"
#split by "$$"
segs = text.split('\n$$')
for i in xrange(len(segs)-1): #never the last one
for i in xrange(len(segs) - 1): #never the last one
if text.find(testMsg) == -1: #not found, add it in
segs[i] = segs[i] + testMsg
text = '\n$$'.join(segs) #put text back together again
@ -185,7 +194,7 @@ class TextFormatter:
if not forecastDef.get('lowerCase', 0):
text = text.upper()
else:
text="Text Product Type Invalid "+\
text = "Text Product Type Invalid " + \
"(must be 'table', 'component' or 'narrative'): ", fcstName, type
text = text + '\n' + string.join(traceback.format_exc())
self.log.error("Caught Exception: " + text)
@ -196,18 +205,18 @@ class TextFormatter:
def __createNarrativeDef(self, fcstName, timeRange):
return {
"methodList": [self.assembleChildWords],
"narrativeDef": [(fcstName, timeRange.duration()/3600)],
"narrativeDef": [(fcstName, timeRange.duration() / 3600)],
}
def __loop(self, argDict, forecast, forecastDef):
# Loop through product by edit areas and time ranges
begText = self.__ut.set(forecastDef,"beginningText","")
endText = self.__ut.set(forecastDef,"endingText","")
editAreaLoopBegText = self.__ut.set(forecastDef,"editAreaLoopBegText","")
timeRangeLoopBegText = self.__ut.set(forecastDef,"timeRangeLoopBegText","")
editAreaLoopEndText = self.__ut.set(forecastDef,"editAreaLoopEndText","")
timeRangeLoopEndText = self.__ut.set(forecastDef,"timeRangeLoopEndText","")
begText = self.__ut.set(forecastDef, "beginningText", "")
endText = self.__ut.set(forecastDef, "endingText", "")
editAreaLoopBegText = self.__ut.set(forecastDef, "editAreaLoopBegText", "")
timeRangeLoopBegText = self.__ut.set(forecastDef, "timeRangeLoopBegText", "")
editAreaLoopEndText = self.__ut.set(forecastDef, "editAreaLoopEndText", "")
timeRangeLoopEndText = self.__ut.set(forecastDef, "timeRangeLoopEndText", "")
outerLoop = self.__ut.set(forecastDef, "outerLoop", "EditArea")
editAreas = argDict["editAreas"]
@ -274,10 +283,10 @@ class TextFormatter:
#print "varDict", varDict
for item, default in [
("language","english"),
("appendFile",None),
("lineLength",69), # no command line option
("timePeriod",3),
("language", "english"),
("appendFile", None),
("lineLength", 69), # no command line option
("timePeriod", 3),
]:
try: # Try the varDict
#print "trying varDict", item
@ -362,19 +371,28 @@ class TextFormatter:
# (["Zones37","Zones38"], "/37/38"),"/37/38"),
# (["Zones57","Zones58","Zones59"],"57/58/59")
# ]
# RWA-05/19/11: added this check here to force Combinations files
# to be reloaded since we removed a similar check from ModuleAccessor
# to preserve the magicCodeChanges. Perhaps we should be doing something
# similar to magicCodeChanges for Combinations files as well.
if sys.modules.has_key(dfEditAreas):
del sys.modules[dfEditAreas]
accessor = ModuleAccessor.ModuleAccessor()
dfEditAreas = accessor.variable(dfEditAreas, "Combinations")
if dfEditAreas is None:
return "COMBINATION FILE NOT FOUND: " + \
self.__ut.set(forecastDef, "defaultEditAreas", [])
comboName = dfEditAreas
for retryCount in xrange(MAX_TRIES):
accessor = ModuleAccessor.ModuleAccessor()
dfEditAreas = accessor.variable(comboName, "Combinations")
if dfEditAreas is None:
if sys.modules.has_key(comboName):
comboMod = sys.modules[comboName]
if comboMod.__file__.endswith(".pyo"):
os.remove(comboMod.__file__)
comboMod = None
del sys.modules[comboName]
# if not last try, log and try again
if retryCount < MAX_TRIES - 1:
# log but don't pop up
self.log.error("Error loading combinations file: %s, retrying", comboName)
else:
return "COMBINATION FILE NOT FOUND: " + \
self.__ut.set(forecastDef, "defaultEditAreas", [])
else:
break
elif len(dfEditAreas) > 0:
refDataList = []
@ -411,7 +429,7 @@ class TextFormatter:
filterMethod = product.filterMethod
except:
allowedHazards = None
if allowedHazards is not None and allowedHazards != []:
# Set up editAreas as a list of combinations
# Cases:
@ -440,7 +458,7 @@ class TextFormatter:
"AreaDictionary")
editAreas = self._separateByTimeZone(editAreas,
areaDictName, argDict['creationTime'],
effectiveTZ = separateByTZ)
effectiveTZ=separateByTZ)
accurateCities = product.Definition.get('accurateCities', 0)
cityRefData = []
@ -464,7 +482,7 @@ class TextFormatter:
"contain entry for edit area: "
self.log.error(msg + `ean`)
continue
for city, llrec in citydict[ean].iteritems():
# Create a referenceData given lat, lon, dim
area = (llrec[0], llrec[1], 0)
@ -490,8 +508,8 @@ class TextFormatter:
filterMethod, argDict["databaseID"], stationID4,
argDict["vtecActiveTable"], argDict["vtecMode"],
sampleThreshold, creationTime=argDict["creationTime"], dataMgr=self.dataMgr,
accurateCities=accurateCities,
cityEditAreas=cityRefData)
accurateCities=accurateCities,
cityEditAreas=cityRefData)
# Store hazards object for later use
argDict["hazards"] = hazards
@ -540,7 +558,7 @@ class TextFormatter:
except:
trName = ""
if tr is not None:
rawRanges.append((tr,trName))
rawRanges.append((tr, trName))
elif len(dfRanges) == 0:
pass
else:
@ -548,13 +566,13 @@ class TextFormatter:
forecast = TimeRangeUtils.TimeRangeUtils()
for rangeName in dfRanges:
rawRange = forecast.getTimeRange(rangeName, argDict)
rawRanges.append((rawRange,rangeName))
rawRanges.append((rawRange, rangeName))
argDict["rawRanges"] = rawRanges
#print "rawRanges", rawRanges
# Row Label
areaType = self.__ut.set(forecastDef,"areaType","")
rowVariable = self.__ut.set(forecastDef,"rowVariable","EditArea")
areaType = self.__ut.set(forecastDef, "areaType", "")
rowVariable = self.__ut.set(forecastDef, "rowVariable", "EditArea")
if rowVariable == "EditArea":
rowLabel = areaType
elif rowVariable == "WeatherElement":
@ -566,7 +584,7 @@ class TextFormatter:
def __pairAreaWithLabel(self, chosenAreas, defaultEditAreas):
# Pair the chosen edit areas with associated labels from
# default list and return new list
dfEditAreas= []
dfEditAreas = []
for area in chosenAreas:
for name, label in defaultEditAreas:
if area == name:
@ -620,8 +638,8 @@ class TextFormatter:
def __getLatLonAreaName(self, latLonTuple):
lat, lon, dim = latLonTuple
name = "Ref" + '%s%s%s' % (lat, lon, dim)
name = name.replace(".","")
name = name.replace("-","")
name = name.replace(".", "")
name = name.replace("-", "")
return name
def getCombinations(self, combinations, argDict):
@ -635,7 +653,7 @@ class TextFormatter:
newArea = self.getEditArea(editArea, argDict)
if comboList.index(editArea) == 0:
comboNumber = self.getComboNumber()
label = "Combo"+`comboNumber`
label = "Combo" + `comboNumber`
refId = ReferenceID(label)
#global GridLoc
#GridLoc = newArea.getGloc()
@ -680,7 +698,7 @@ class TextFormatter:
try:
product = argDict["self"]
exec "fcstDef = product."+fcstName+"()"
exec "fcstDef = product." + fcstName + "()"
module = argDict["module"]
except:
# See if fcstName is variable in imported modules e.g. MyTable = {}
@ -699,7 +717,7 @@ class TextFormatter:
try:
# Look for fcstName = {}
# This can be removed eventually
exec "fcstDef = module."+fcstName
exec "fcstDef = module." + fcstName
except:
try:
# Try to instantiate smart text product class
@ -750,7 +768,7 @@ class TextFormatter:
def getEditArea(self, editAreaName, argDict):
# Returns an AFPS.ReferenceData object given an edit area name
# as defined in the GFE
# as defined in the GFE
# Apply suffix if appropriate
refID = ReferenceID(editAreaName)
#print "Getting edit area"
@ -779,7 +797,7 @@ class TextFormatter:
return tmp
def _separateByTimeZone(self, editAreaGroups, areaDictName, creationTime,
effectiveTZ = "effectiveTZ"):
effectiveTZ="effectiveTZ"):
#takes the list of areas, and based on the time zones breaks
#them up to ensure that each grouping using the same time zone.
#areaDictName is name of the area dictionary. creationTime is the
@ -817,7 +835,7 @@ class TextFormatter:
zoneTZ = localTZ
tzid = localTZid
#print "falling back to WFOtz: ", zoneTZ
self.log.warning("WARNING: Entry " + area +
self.log.warning("WARNING: Entry " + area +
" missing from AreaDictionary. Using default time zone.")
zones = tzDir.get(zoneTZ, [])
@ -835,7 +853,7 @@ class TextFormatter:
elif effectiveTZ == "actualTZ":
dict = tzDir
else:
self.log.error("Invalid effectiveTZ for separateByTZ() " +
self.log.error("Invalid effectiveTZ for separateByTZ() " +
effectiveTZ)
return editAreaGroups
keys = dict.keys()
@ -850,39 +868,39 @@ class TextFormatter:
#################################################################
def makeSquare(lat, lon, km):
" Make a list of square of given km around lat,lon"
latinc = km/222.0
loninc = math.cos(lat/57.17) * km / 222.0
latinc = km / 222.0
loninc = math.cos(lat / 57.17) * km / 222.0
latTop = lat + latinc
latBottom =lat - latinc
latBottom = lat - latinc
lonLeft = lon - loninc
lonRight = lon + loninc
points = []
points.append(`latTop`+","+ `lonRight`)
points.append(`latTop`+","+ `lonLeft`)
points.append(`latBottom`+","+ `lonLeft`)
points.append(`latBottom`+","+`lonRight`)
points.append(`latTop` + "," + `lonRight`)
points.append(`latTop` + "," + `lonLeft`)
points.append(`latBottom` + "," + `lonLeft`)
points.append(`latBottom` + "," + `lonRight`)
return points
def makePoint(point):
" Make a CartCoord2D from the point in format: x,y"
from com.vividsolutions.jts.geom import Coordinate
ind = string.find(point,",")
latStr = point[0:ind-1]
lonStr = point[ind+1:len(point)]
ind = string.find(point, ",")
latStr = point[0:ind - 1]
lonStr = point[ind + 1:len(point)]
lat = float(latStr)
lon = float(lonStr)
return Coordinate(lon,lat)
lon = float(lonStr)
return Coordinate(lon, lat)
def makeArea(gridLoc, pointList, refname=None):
" Make a Reference Area with a unique ReferenceID"
from com.vividsolutions.jts.geom import GeometryFactory, LinearRing, Coordinate, Polygon
from com.raytheon.uf.common.dataplugin.gfe.reference import ReferenceData_CoordinateType as CoordinateType
from com.raytheon.uf.common.dataplugin.gfe.reference import ReferenceData_CoordinateType as CoordinateType
geomFactory = GeometryFactory()
import jep
size = len(pointList)
if pointList[0] != pointList[size-1]: # closing the loop
if pointList[0] != pointList[size - 1]: # closing the loop
pointList.append(pointList[0])
pointArray = jep.jarray(len(pointList), Coordinate)
for i in range(len(pointList)):
@ -893,7 +911,7 @@ def makeArea(gridLoc, pointList, refname=None):
polyArray[0] = poly
region = geomFactory.createMultiPolygon(polyArray)
if refname is None:
refname = "Ref" + getTime()
refname = "Ref" + getTime()
refId = ReferenceID(refname)
refData = ReferenceData(gridLoc, refId, region, CoordinateType.LATLON)
# randerso: I don't think this is necessary
@ -913,8 +931,8 @@ def storeReferenceData(refSetMgr, refData, temp=True):
def getTime():
"Return an ascii string for the current time without spaces or :'s"
timeStr = `time.time()`
timeStr = string.replace(timeStr,".","_")
timeStr = `time.time()`
timeStr = string.replace(timeStr, ".", "_")
return timeStr
def getAbsTime(timeStr):
@ -926,7 +944,7 @@ def getAbsTime(timeStr):
hour = string.atoi(timeStr[9:11])
minute = string.atoi(timeStr[11:13])
return AFPSSup.AbsTimeYMD(year,month,day,hour,minute)
return AbsTime.absTimeYMD(year, month, day, hour, minute)
def usage():
print """

View file

@ -54,6 +54,7 @@
# Oct 31, 2013 2508 randerso Change to use DiscreteGridSlice.getKeys()
# Nov 07, 2013 2476 dgilling Fix _getGridsResult() for retrieving
# Wx/Discrete in First mode.
# Dec 23, 2013 16893 ryu Added unloadWEs() method (created by njensen)
#
########################################################################
import types, string, time, sys
@ -1500,7 +1501,6 @@ class SmartScript(BaseTool.BaseTool):
tzname = self.__dataMgr.getClient().getSiteTimeZone()
tz = dateutil.tz.gettz(tzname)
utczone = dateutil.tz.gettz('UTC')
gmdt = self._gmtime(date).replace(tzinfo=utczone)
tzdt = gmdt.astimezone(tz)
@ -1803,6 +1803,19 @@ class SmartScript(BaseTool.BaseTool):
parmJA[0] = parm
self.__parmMgr.deleteParm(parmJA)
def unloadWEs(self, model, elementLevelPairs, mostRecent=0):
jparms = []
for element, level in elementLevelPairs:
exprName = self.getExprName(model, element, level, mostRecent)
parm = self.__parmMgr.getParmInExpr(exprName, 1)
if parm:
jparms.append(parm)
if jparms:
parmJA = jep.jarray(len(jparms), jparms[0])
for i in xrange(len(jparms)):
parmJA[i] = jparms[i]
self.__parmMgr.deleteParm(parmJA)
def saveElements(self, elementList):
# Save the given Fcst elements to the server
# Example:

View file

@ -28,6 +28,7 @@ import java.util.Set;
import java.util.TimeZone;
import org.eclipse.jface.dialogs.IDialogConstants;
import org.eclipse.jface.dialogs.MessageDialog;
import org.eclipse.jface.resource.ImageRegistry;
import org.eclipse.swt.SWT;
import org.eclipse.swt.events.MenuAdapter;
@ -97,6 +98,7 @@ import com.raytheon.viz.ui.dialogs.ICloseCallback;
* up warnings.
* May 15, 2013 1842 dgilling Pass DataManager instance down to sub-
* components.
* Feb 12, 2014 2801 randerso Added prompting if formatter is run against non-normal database
*
* </pre>
*
@ -106,6 +108,12 @@ import com.raytheon.viz.ui.dialogs.ICloseCallback;
public class FormatterLauncherDialog extends CaveJFACEDialog implements
IProductTab {
// formatter data sources. Fcst must be first
private static enum FormatterDataSource {
Fcst, ISC, Official, Default,
}
private final transient IUFStatusHandler statusHandler = UFStatus
.getHandler(FormatterLauncherDialog.class);
@ -161,24 +169,9 @@ public class FormatterLauncherDialog extends CaveJFACEDialog implements
private Image failedImg;
/**
* Fcst data source menu item.
* data source menu items
*/
private MenuItem fcstMI = null;
/**
* Official data source menu item.
*/
private MenuItem officialMI = null;
/**
* ISC data source menu item.
*/
private MenuItem iscMI = null;
/**
* Default data source menu item.
*/
private MenuItem defaultMI = null;
private java.util.List<MenuItem> dataSourceMI;
/**
* Products menu.
@ -212,8 +205,6 @@ public class FormatterLauncherDialog extends CaveJFACEDialog implements
private DataManager dataMgr;
private String selectedDataSource = null;
private boolean doClose = false;
/**
@ -338,59 +329,47 @@ public class FormatterLauncherDialog extends CaveJFACEDialog implements
// Get the CAVE operating mode
CAVEMode mode = dataMgr.getOpMode();
// Forecast menu item, set text based on operating mode
fcstMI = new MenuItem(dataSourceMenu, SWT.RADIO);
if (mode.equals(CAVEMode.OPERATIONAL)) {
fcstMI.setText("Fcst");
} else if (mode.equals(CAVEMode.PRACTICE)) {
fcstMI.setText("Fcst_Prac");
fcstMI.setSelection(true);
} else {
fcstMI.setText("Fcst_Test");
fcstMI.setSelection(true);
}
fcstMI.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent event) {
selectedDataSource = dataMgr.getParmManager()
.getMutableDatabase().toString();
this.dataSourceMI = new ArrayList<MenuItem>();
// create menu items
for (FormatterDataSource source : FormatterDataSource.values()) {
MenuItem item = new MenuItem(dataSourceMenu, SWT.RADIO);
item.setData(source);
this.dataSourceMI.add(item);
String text = source.toString();
if (source.equals(FormatterDataSource.Fcst)) {
if (mode.equals(CAVEMode.PRACTICE)) {
text += "_Prac";
} else if (mode.equals(CAVEMode.TEST)) {
text += "_Test";
}
}
});
item.setText(text);
item.addSelectionListener(new SelectionAdapter() {
// Only show these menu items when in operational mode
if (mode.equals(CAVEMode.OPERATIONAL)) {
// ISC menu item
iscMI = new MenuItem(dataSourceMenu, SWT.RADIO);
iscMI.setText("ISC");
iscMI.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent event) {
getIscDataSource();
public void widgetSelected(SelectionEvent e) {
MenuItem item = (MenuItem) e.getSource();
if (item.getSelection()) {
statusHandler.handle(
Priority.EVENTB,
"User selected formatter data source: "
+ item.getText());
}
}
});
// Official menu item
officialMI = new MenuItem(dataSourceMenu, SWT.RADIO);
officialMI.setText("Official");
officialMI.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent event) {
selectedDataSource = getOfficialDataSource();
}
});
if (!mode.equals(CAVEMode.OPERATIONAL)) {
item.setSelection(true);
statusHandler.handle(Priority.EVENTB,
"Formatter default data source: " + item.getText());
break;
}
// Default menu item
defaultMI = new MenuItem(dataSourceMenu, SWT.RADIO);
defaultMI.setText("Default");
defaultMI.setSelection(true);
defaultMI.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent event) {
}
});
} else {
selectedDataSource = dataMgr.getParmManager().getMutableDatabase()
.toString();
if (source.equals(FormatterDataSource.Default)) {
item.setSelection(true);
statusHandler.handle(Priority.EVENTB,
"Formatter default data source: " + item.getText());
}
}
}
@ -731,32 +710,108 @@ public class FormatterLauncherDialog extends CaveJFACEDialog implements
* The name of the product
* @return The data source
*/
public String getSelectedDataSource(String productName) {
if (fcstMI.getSelection()) {
selectedDataSource = getFcstDataSource();
} else if (iscMI.getSelection()) {
selectedDataSource = getIscDataSource();
} else if (officialMI.getSelection()) {
selectedDataSource = getOfficialDataSource();
} else {
// Default value
ProductDefinition prodDef = textProductMgr
.getProductDefinition(productName);
String dataSource = (String) prodDef.get("database");
if (dataSource == null) {
dataSource = "Official";
}
if (dataSource.equals("ISC")) {
selectedDataSource = getIscDataSource();
} else if (dataSource.equals("Official")) {
selectedDataSource = getOfficialDataSource();
} else {
selectedDataSource = getFcstDataSource();
public DatabaseID getSelectedDataSource(String productName) {
FormatterDataSource menuDataSource = FormatterDataSource.Default;
for (MenuItem item : dataSourceMI) {
if (item.getSelection()) {
menuDataSource = (FormatterDataSource) item.getData();
break;
}
}
return selectedDataSource;
// Default value
ProductDefinition prodDef = textProductMgr
.getProductDefinition(productName);
String dbString = (String) prodDef.get("database");
FormatterDataSource productDataSource;
if (dbString == null) {
productDataSource = FormatterDataSource.Default;
} else {
try {
productDataSource = FormatterDataSource.valueOf(dbString);
} catch (IllegalArgumentException e) {
StringBuilder msg = new StringBuilder();
msg.append("The ");
msg.append(productName);
msg.append(" product definition contains an invalid database selection: \"");
msg.append(dbString);
msg.append("\". Valid values are: [");
for (FormatterDataSource src : FormatterDataSource.values()) {
if (!src.equals(FormatterDataSource.Default)) {
msg.append(src).append(", ");
}
}
msg.delete(msg.length() - 2, msg.length());
msg.append("]");
statusHandler.error(msg.toString());
return null;
}
}
FormatterDataSource dataSource;
if (menuDataSource.equals(FormatterDataSource.Default)) {
if (productDataSource.equals(FormatterDataSource.Default)) {
dataSource = FormatterDataSource.Official;
} else {
dataSource = productDataSource;
}
} else {
dataSource = menuDataSource;
}
if (!productDataSource.equals(FormatterDataSource.Default)) {
if (!dataSource.equals(productDataSource)) {
// A check should be made that a hazard formatter is actually
// being run on the database specified in the Local or
// Definition file (Definition["database"] entry). If the
// database being run is different, provide a warning to the
// forecaster that requires acknowledgment before running.
MessageDialog dlg = new MessageDialog(getShell(),
"Confirm Data Source", null,
"The product definition indicates the " + productName
+ " formatter should be run against the "
+ productDataSource
+ " database, but you have selected the "
+ dataSource
+ " database.\n\nDo you wish to continue?",
MessageDialog.WARNING, new String[] { "Yes", "No" }, 1);
int retVal = dlg.open();
if (retVal != 0) {
dataSource = null;
}
}
} else {
if (dataSource.equals(FormatterDataSource.ISC)) {
// If the database is not explicitly defined (default), provide
// a a warning to the forecaster that requires acknowledgment
// before running if the database being used is ISC
MessageDialog dlg = new MessageDialog(
getShell(),
"Confirm Data Source",
null,
"You are about to run the "
+ productName
+ " formatter against the ISC database.\n\nDo you wish to continue?",
MessageDialog.WARNING, new String[] { "Yes", "No" }, 1);
int retVal = dlg.open();
if (retVal != 0) {
dataSource = null;
}
}
}
DatabaseID selectedDbId;
if (dataSource == null) {
selectedDbId = null;
} else if (dataSource.equals(FormatterDataSource.ISC)) {
selectedDbId = getIscDataSource();
} else if (dataSource.equals(FormatterDataSource.Official)) {
selectedDbId = getOfficialDataSource();
} else {
selectedDbId = getFcstDataSource();
}
return selectedDbId;
}
/**
@ -1009,8 +1064,8 @@ public class FormatterLauncherDialog extends CaveJFACEDialog implements
*
* @return The FcstDataSource
*/
private String getFcstDataSource() {
return dataMgr.getParmManager().getMutableDatabase().toString();
private DatabaseID getFcstDataSource() {
return dataMgr.getParmManager().getMutableDatabase();
}
/**
@ -1021,13 +1076,13 @@ public class FormatterLauncherDialog extends CaveJFACEDialog implements
*
* @return The ISC Data Source
*/
private String getIscDataSource() {
private DatabaseID getIscDataSource() {
java.util.List<DatabaseID> dbs = dataMgr.getParmManager()
.getIscDatabases();
if (dbs.size() > 0) {
// Always return the last one in the list
return dbs.get(dbs.size() - 1).toString();
return dbs.get(dbs.size() - 1);
}
return null;
@ -1038,12 +1093,12 @@ public class FormatterLauncherDialog extends CaveJFACEDialog implements
*
* @return The Official Data source
*/
private String getOfficialDataSource() {
String source = null;
private DatabaseID getOfficialDataSource() {
DatabaseID source = null;
try {
ServerResponse<java.util.List<DatabaseID>> sr = dataMgr.getClient()
.getOfficialDBName();
source = sr.getPayload().get(0).toString();
source = sr.getPayload().get(0);
} catch (GFEServerException e) {
statusHandler.handle(Priority.PROBLEM,
"Unable to determine official db", e);

View file

@ -34,6 +34,7 @@ import org.eclipse.swt.widgets.Label;
import org.eclipse.swt.widgets.ProgressBar;
import org.eclipse.swt.widgets.TabFolder;
import com.raytheon.uf.common.dataplugin.gfe.db.objects.DatabaseID;
import com.raytheon.viz.gfe.Activator;
import com.raytheon.viz.gfe.core.DataManager;
import com.raytheon.viz.gfe.dialogs.FormatterLauncherDialog;
@ -64,6 +65,8 @@ import com.raytheon.viz.gfe.textformatter.TextProductManager;
* 05 SEP 2013 2329 randerso Added call to ZoneCombinerComp.applyZoneCombo when
* when run formatter button is clicked.
* 05 FEB 2014 2591 randerso Added dataManager to ZoneCombinerComp constructor
* Passed dataMgr instance to FormatterUtil.runFormatterScript
* 12 FEB 2014 2801 randerso Added prompting if formatter is run against non-normal database
*
* </pre>
*
@ -365,36 +368,37 @@ public class ProductAreaComp extends Composite implements
productEditorBtnSelected();
if (okToLoseText()) {
productEditorComp.clearProductText();
abortFormatterBtn.setEnabled(true);
// closeTabBtn.setEnabled(false);
runFormatterBtn.setEnabled(false);
String vtecMode = "";
if (formattingCbo.isVisible()) {
vtecMode = formattingCbo.getText();
} else {
int hazIndex = productName.indexOf("Hazard_");
if (hazIndex > -1) {
String category = productName.substring(
hazIndex + 7, hazIndex + 10);
vtecMode = textProductMgr
.getVtecMessageType(category);
if (vtecMode == null) {
vtecMode = "";
DatabaseID dbId = ((FormatterLauncherDialog) productTabCB)
.getSelectedDataSource(productName);
if (dbId != null) {
productEditorComp.clearProductText();
abortFormatterBtn.setEnabled(true);
// closeTabBtn.setEnabled(false);
runFormatterBtn.setEnabled(false);
String vtecMode = "";
if (formattingCbo.isVisible()) {
vtecMode = formattingCbo.getText();
} else {
int hazIndex = productName.indexOf("Hazard_");
if (hazIndex > -1) {
String category = productName.substring(
hazIndex + 7, hazIndex + 10);
vtecMode = textProductMgr
.getVtecMessageType(category);
if (vtecMode == null) {
vtecMode = "";
}
}
}
}
// Check the data source menus, if one is selected then
// use
// it, else use the default
String dbId = null;
zoneCombiner.applyZoneCombo();
dbId = ((FormatterLauncherDialog) productTabCB)
.getSelectedDataSource(productName);
FormatterUtil.runFormatterScript(textProductMgr,
productName, dbId, vtecMode,
ProductAreaComp.this);
// Get the source database
zoneCombiner.applyZoneCombo();
FormatterUtil.runFormatterScript(dataMgr,
textProductMgr, productName,
dbId.toString(), vtecMode,
ProductAreaComp.this);
}
}
}
});

View file

@ -22,6 +22,7 @@ package com.raytheon.viz.gfe.textformatter;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -223,12 +224,18 @@ public class CombinationsFileUtil {
// retrieve combinations file if it's changed
LocalizationFile lf = pm.getStaticLocalizationFile(FileUtil.join(
COMBO_DIR_PATH, comboName + ".py"));
File pyFile;
try {
pyFile = lf.getFile(true);
} catch (LocalizationException e) {
throw new GfeException("Error retrieving combinations file: "
+ comboName, e);
File pyFile = null;
if (lf != null) {
try {
pyFile = lf.getFile(true);
} catch (LocalizationException e) {
throw new GfeException("Error retrieving combinations file: "
+ comboName, e);
}
}
if (pyFile == null || !pyFile.exists()) {
return Collections.emptyList();
}
LocalizationContext baseContext = pm.getContext(

View file

@ -24,7 +24,6 @@ import java.util.TimeZone;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.UFStatus;
import com.raytheon.uf.common.status.UFStatus.Priority;
import com.raytheon.uf.common.time.SimulatedTime;
import com.raytheon.uf.common.time.TimeRange;
import com.raytheon.viz.core.mode.CAVEMode;
@ -42,6 +41,8 @@ import com.raytheon.viz.gfe.tasks.TaskManager;
* Sep 8, 2008 njensen Initial creation
* Jan 15, 2010 3395 ryu Fix &quot;issued by&quot; functionality
* Sep 05, 2013 2329 randerso Removed save of combinations file
* Feb 12, 2014 2591 randerso Passed dataMgr instance to FormatterUtil.runFormatterScript
* Removed call to TextProductManager.reloadModule
*
* </pre>
*
@ -59,6 +60,9 @@ public class FormatterUtil {
/**
* Runs a text formatter script for a given product
*
* @param dataMgr
* the DataManager instance to use
*
* @param productMgr
* the formatter instance to use
* @param productName
@ -70,22 +74,12 @@ public class FormatterUtil {
* @param finish
* listener to fire when formatter finishes generating product
*/
public static void runFormatterScript(TextProductManager productMgr,
String productName, String dbId, String vtecMode,
TextProductFinishListener finish) {
try {
String filename = productMgr.getCombinationsFileName(productName);
boolean mapRequired = productMgr.mapRequired(productName);
if (filename != null && mapRequired) {
productMgr.reloadModule(filename);
}
} catch (Exception e) {
statusHandler.handle(Priority.PROBLEM,
"Cannot generate combinations file", e);
}
public static void runFormatterScript(DataManager dataMgr,
TextProductManager productMgr, String productName, String dbId,
String vtecMode, TextProductFinishListener finish) {
int testMode = 0;
if (DataManager.getCurrentInstance().getOpMode().equals(CAVEMode.TEST)) {
if (dataMgr.getOpMode().equals(CAVEMode.TEST)) {
testMode = 1;
}
@ -106,8 +100,7 @@ public class FormatterUtil {
}
String name = productMgr.getModuleName(productName);
String varDict = productMgr.getVarDict(productName,
DataManager.getCurrentInstance(), dbId);
String varDict = productMgr.getVarDict(productName, dataMgr, dbId);
if (varDict != null) {
// run the formatter with the normal active table

View file

@ -55,9 +55,10 @@ import com.raytheon.viz.gfe.core.DataManager;
* SOFTWARE HISTORY
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* May 1, 2009 njensen Initial creation
* Jan 15, 2010 3395 ryu Fix &quot;issued by&quot; functionality
* Apr 24, 2013 1936 dgilling Remove unused imports.
* May 1, 2009 njensen Initial creation
* Jan 15, 2010 3395 ryu Fix &quot;issued by&quot; functionality
* Apr 24, 2013 1936 dgilling Remove unused imports.
* Feb 12, 2014 2591 randerso Removed reloadModule method
*
* </pre>
*
@ -269,17 +270,6 @@ public class TextProductManager {
return mapName;
}
protected void reloadModule(String moduleName) {
Map<String, Object> args = new HashMap<String, Object>(1);
args.put("moduleName", moduleName);
try {
script.execute("reloadModule", args);
} catch (JepException e) {
statusHandler.handle(Priority.PROBLEM,
"Exception reloading module " + moduleName, e);
}
}
public void setIssuedBy(String issuedBy) {
if (LocalizationManager.getInstance().getCurrentSite().equals(issuedBy)) {
this.issuedBy = "";

View file

@ -40,6 +40,7 @@ import com.raytheon.viz.grid.util.RadarProductCodeMapping;
* ------------ ---------- ----------- --------------------------
* Sep 20, 2012 bsteffen Initial creation
* Aug 30, 2013 2298 rjpeter Make getPluginName abstract
* Feb 21, 2014 DR 16744 D. Friedman Support thin client updates
*
* </pre>
*
@ -150,10 +151,14 @@ public class RadarUpdater implements IAlertObserver {
@Override
public void alertArrived(Collection<AlertMessage> alertMessages) {
ProductAlertObserver.processDataURIAlerts(convertRadarAlertsToGridDatauris(alertMessages));
}
public Set<String> convertRadarAlertsToGridDatauris(Collection<AlertMessage> alertMessages) {
RadarStation configuredRadar = RadarAdapter.getInstance()
.getConfiguredRadar();
if (configuredRadar == null) {
return;
return new HashSet<String>();
}
Set<String> datauris = new HashSet<String>();
for (AlertMessage alertMessage : alertMessages) {
@ -209,7 +214,7 @@ public class RadarUpdater implements IAlertObserver {
"Unable to generate updates for derived product", e);
}
}
ProductAlertObserver.processDataURIAlerts(datauris);
return datauris;
}
private CacheKey getCacheKey(RadarRequestableLevelNode rNode) {

View file

@ -20,6 +20,7 @@
package com.raytheon.viz.grid.util;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@ -81,6 +82,7 @@ import com.raytheon.viz.radar.util.StationUtils;
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Mar 23, 2010 #4473 rjpeter Initial creation
* Feb 21, 2014 DR 16744 D. Friedman Add getUpdateConstraints
*
* </pre>
*
@ -400,4 +402,23 @@ public class RadarAdapter {
return rval;
}
public Map<String, RequestConstraint> getUpdateConstraints() {
RadarProductCodeMapping rpcMap = RadarProductCodeMapping.getInstance();
HashSet<Integer> productCodes = new HashSet<Integer>();
for (String abbrev : rpcMap.getParameterAbbrevs()) {
productCodes.addAll(rpcMap.getProductCodesForAbbrev(abbrev));
}
Map<String, RequestConstraint> rcMap = new HashMap<String, RequestConstraint>();
rcMap.put(RadarAdapter.PLUGIN_NAME_QUERY, new RequestConstraint(
RADAR_SOURCE));
rcMap.put(ICAO_QUERY, new RequestConstraint(getConfiguredRadar()
.getRdaId().toLowerCase()));
rcMap.put(
PRODUCT_CODE_QUERY,
new RequestConstraint(Arrays.toString(new ArrayList<Integer>(
productCodes).toArray()),
RequestConstraint.ConstraintType.IN));
return rcMap;
}
}

View file

@ -178,7 +178,7 @@
</appender>
<appender name="ThreadBasedLog" class="com.raytheon.uf.common.status.logback.ThreadBasedAppender">
<threadPatterns>RadarLog:radarThreadPool.*;SatelliteLog:satelliteThreadPool.*;ShefLog:shefThreadPool.*;TextLog:textThreadPool.*;SmartInitLog:smartInit.*;PurgeLog:Purge.*;ArchiveLog:Archive.*</threadPatterns>
<threadPatterns>RadarLog:Ingest.Radar.*;SatelliteLog:Ingest.Satellite.*;ShefLog:Ingest.Shef.*;TextLog:Ingest.Text.*;SmartInitLog:smartInit.*;PurgeLog:Purge.*;ArchiveLog:Archive.*</threadPatterns>
<defaultAppender>asyncConsole</defaultAppender>
<appender-ref ref="asyncConsole"/>
<appender-ref ref="RadarLog"/>

View file

@ -90,11 +90,10 @@
</appender>
<appender name="ThreadBasedLog" class="com.raytheon.uf.common.status.logback.ThreadBasedAppender">
<threadPatterns>HarvesterLog:harvesterThreadPool.*,crawlerThreadPool.*,Crawler.*,RetrievalLog:retrievalThreadPool.*,retrievalThreadPool.*,Retrieval.*</threadPatterns>
<threadPatterns>HarvesterLog:harvester.*,crawlerThreadPool.*,Crawler.*</threadPatterns>
<defaultAppender>console</defaultAppender>
<appender-ref ref="console"/>
<appender-ref ref="HarvesterLog"/>
<appender-ref ref="RetrievalLog"/>
</appender>
<logger name="com.raytheon">

View file

@ -87,6 +87,7 @@
<exclude>.*datadelivery.*</exclude>
<exclude>.*bandwidth.*</exclude>
<includeMode>excludeDpaAndOgc</includeMode>
<exclude>obs-ingest-metarshef.xml</exclude>
<!-- ncep excludes until tested -->
<exclude>aww-ingest.xml</exclude>
<exclude>ncep-util-on-edex-ingest</exclude>
@ -117,10 +118,12 @@
<include>shef-ingest.xml</include>
<include>persist-ingest.xml</include>
<include>obs-common.xml</include>
<include>obs-ingest.xml</include>
<include>metartohmdb-plugin.xml</include>
<include>pointdata-common.xml</include>
<include>obs-ingest.xml</include>
<include>obs-ingest-metarshef.xml</include>
<include>metartohmdb-plugin.xml</include>
<include>pointdata-common.xml</include>
<include>shef-common.xml</include>
<include>ohd-common-database.xml</include>
<include>ohd-common.xml</include>
<include>alarmWhfs-spring.xml</include>
<include>arealffgGenerator-spring.xml</include>
@ -145,6 +148,7 @@
<exclude>fssobs-common.xml</exclude>
</mode>
<mode name="requestHydro">
<include>ohd-common-database.xml</include>
<include>ohd-common.xml</include>
<include>database-common.xml</include>
<include>ohd-request.xml</include>
@ -227,6 +231,7 @@
<include>shef-common.xml</include>
<include>satellite-common.xml</include>
<include>satellite-dataplugin-common.xml</include>
<include>ohd-common-database.xml</include>
<include>ohd-common.xml</include>
<include>management-common.xml</include>
<include>auth-common.xml</include>

View file

@ -27,17 +27,17 @@
<bean id="genericThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="${JMS_POOL_MIN}" />
<property name="maxPoolSize" value="${JMS_POOL_MAX}" />
<property name="corePoolSize" value="0" />
<property name="maxPoolSize" value="1" />
<property name="queueCapacity" value="0" />
</bean>
<bean id="jms-generic" class="org.apache.camel.component.jms.JmsComponent">
<bean id="jms-generic" class="com.raytheon.uf.edex.esb.camel.jms.DedicatedThreadJmsComponent">
<constructor-arg ref="jmsGenericConfig" />
<property name="taskExecutor" ref="genericThreadPool" />
</bean>
<bean id="jms-durable" class="org.apache.camel.component.jms.JmsComponent">
<bean id="jms-durable" class="com.raytheon.uf.edex.esb.camel.jms.DedicatedThreadJmsComponent">
<constructor-arg ref="jmsDurableConfig" />
<property name="taskExecutor" ref="genericThreadPool" />
</bean>
@ -63,13 +63,13 @@
<bean id="jmsConfig" class="org.apache.camel.component.jms.JmsConfiguration">
<property name="cacheLevelName" value="CACHE_NONE"/>
<property name="recoveryInterval" value="1000"/>
<property name="recoveryInterval" value="10000"/>
<property name="requestTimeout" value="5000"/>
<!-- receiveTimeout is amount of time thread waits to receive a message before recycling -->
<!-- receiveTimeout also affects how fast a JMSConsumer will shut down, because the
thread may be stuck polling for the duration of receiveTimeout before shutting down -->
<property name="receiveTimeout" value="10000"/>
<property name="receiveTimeout" value="10000"/>
<property name="transacted" value="false"/>
<!-- force maxMessagesPerTask so that the threads don't keep disconnecting and reconnecting.
@ -80,6 +80,7 @@
<property name="destinationResolver" ref="qpidNoDurableResolver" />
<property name="disableReplyTo" value="true" />
<property name="deliveryPersistent" value="false"/>
<!--
<property name="transacted" value="true" />
<property name="acknowledgementModeName" value="TRANSACTED"/>
@ -240,7 +241,7 @@
**
** http://camel.apache.org/enterprise-integration-patterns.html
-->
<camelContext id="camel" xmlns="http://camel.apache.org/schema/spring" errorHandlerRef="errorHandler">
<camelContext id="camel" xmlns="http://camel.apache.org/schema/spring" errorHandlerRef="errorHandler">
<!-- Route for edex to listen for utility updates -->
<route id="edexUtilityNotify">

View file

@ -9,7 +9,7 @@
<bean id="binlightningDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="binlightning" />
<constructor-arg value="jms-dist:queue:Ingest.binlightning" />
<constructor-arg value="jms-durable:queue:Ingest.binlightning" />
</bean>
<bean id="binlightningCamelRegistered" factory-bean="clusteredCamelContextMgr"

View file

@ -8,7 +8,7 @@
<bean id="bufrmosDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="bufrmos" />
<constructor-arg value="jms-dist:queue:Ingest.bufrmos" />
<constructor-arg value="jms-durable:queue:Ingest.bufrmos" />
</bean>
<bean id="bufrmosCamelRegistered" factory-bean="contextManager"

View file

@ -11,7 +11,7 @@
<bean id="bufruaDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="bufruaPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.bufrua" />
<constructor-arg value="jms-durable:queue:Ingest.bufrua" />
</bean>
<bean id="bufruaCamelRegistered" factory-bean="contextManager"

View file

@ -9,7 +9,7 @@
<bean id="ccfpDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="ccfp" />
<constructor-arg value="jms-dist:queue:Ingest.ccfp" />
<constructor-arg value="jms-durable:queue:Ingest.ccfp" />
</bean>
<bean id="ccfpCamelRegistered" factory-bean="contextManager"

View file

@ -53,7 +53,7 @@
</route>
<route id="notifyIfpServer">
<from uri="jms-generic:topic:edex.alerts.gfe"/>
<from uri="jms-generic:topic:edex.alerts.gfe?threadName=notifyIfpServer-edex.alerts.gfe" />
<doTry>
<bean ref="serializationUtil" method="transformFromThrift"/>
<bean ref="ifpServer" method="processNotification"/>

View file

@ -346,19 +346,13 @@
<!-- ISC Send Beans -->
<bean id="iscSendQueue" class="com.raytheon.edex.plugin.gfe.isc.IscSendQueue" factory-method="getInstance"/>
<bean id="jms-iscsend" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsIscSendConfig"/>
<property name="taskExecutor" ref="iscSendThreadPool"/>
</bean>
<bean id="jmsIscSendConfig" class="org.apache.camel.component.jms.JmsConfiguration"
factory-bean="jmsDurableConfig" factory-method="copy"/>
<bean id="iscSendThreadPool" class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="2"/>
<property name="maxPoolSize" value="2"/>
<property name="corePoolSize" value="1" />
<property name="maxPoolSize" value="1" />
</bean>
<bean id="iscSendSrvCfg" class="com.raytheon.edex.plugin.gfe.isc.SendIscSrvConfig">
<property name="executor" ref="iscSendThreadPool"/>
<!-- Threads should be 1 less than the size of the pool to account for the iscSendJobQueueAggr route. -->
<!-- Threads should be same size as the iscSendThreadPool -->
<property name="threads" value="1"/>
<property name="runningTimeOutMillis" value="300000"/>
<property name="threadSleepInterval" value="5000"/>
@ -370,16 +364,6 @@
<!-- ISC Receive Beans -->
<bean id="jms-iscrec" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsIscReceiveConfig"/>
<property name="taskExecutor" ref="iscReceiveThreadPool"/>
</bean>
<bean id="jmsIscReceiveConfig" class="org.apache.camel.component.jms.JmsConfiguration" factory-bean="jmsDurableConfig"
factory-method="copy"/>
<bean id="iscReceiveThreadPool" class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="2"/>
<property name="maxPoolSize" value="2"/>
</bean>
<bean id="IscReceiveSrv" class="com.raytheon.edex.plugin.gfe.isc.IscReceiveSrv">
<constructor-arg ref="iscDataRecPythonThreadPool"/>
</bean>
@ -455,7 +439,7 @@
</route>
<route id="gfeSiteActivationNotification">
<from uri="jms-generic:topic:edex.alerts.siteActivate"/>
<from uri="jms-generic:topic:edex.alerts.siteActivate?threadName=gfe-edex.alerts.siteActivate"/>
<bean ref="serializationUtil" method="transformFromThrift"/>
<bean ref="sbLockMgr" method="handleSiteActivationNotification"/>
</route>
@ -494,8 +478,7 @@
<!-- ISC Data Receive route -->
<route id="iscReceiveRoute">
<from
uri="jms-iscrec:queue:gfeIscDataReceive?concurrentConsumers=2"/>
<from uri="jms-durable:queue:gfeIscDataReceive?concurrentConsumers=2"/>
<doTry>
<pipeline>
<bean ref="serializationUtil" method="transformFromThrift"/>
@ -503,8 +486,7 @@
</pipeline>
<doCatch>
<exception>java.lang.Throwable</exception>
<to
uri="log:iscDataRec?level=ERROR"/>
<to uri="log:iscDataRec?level=ERROR"/>
</doCatch>
</doTry>
</route>
@ -515,7 +497,7 @@
autoStartup="false">
<route id="iscSendJobQueueAggr">
<from uri="jms-iscsend:queue:iscSendNotification"/>
<from uri="jms-durable:queue:iscSendNotification" />
<doTry>
<bean ref="serializationUtil" method="transformFromThrift"/>
<bean ref="iscSendQueue" method="addSendJobs"/>

View file

@ -4,15 +4,9 @@
http://camel.apache.org/schema/spring http://camel.apache.org/schema/spring/camel-spring.xsd">
<bean id="smartInitQueue" class="com.raytheon.edex.plugin.gfe.smartinit.SmartInitQueue" factory-method="createQueue"/>
<bean id="jms-smartinit" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsSmartInitConfig"/>
<property name="taskExecutor" ref="smartInitThreadPool"/>
</bean>
<bean id="jmsSmartInitConfig" class="org.apache.camel.component.jms.JmsConfiguration" factory-bean="jmsDurableConfig"
factory-method="copy"/>
<bean id="smartInitThreadPool" class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="${smartinit.threadpoolsize}"/>
<property name="maxPoolSize" value="${smartinit.threadpoolsize}"/>
<property name="corePoolSize" value="${smartinit.threads}" />
<property name="maxPoolSize" value="${smartinit.threads}" />
</bean>
<bean id="smartInitSrvCfg" class="com.raytheon.edex.plugin.gfe.smartinit.SmartInitSrvConfig">
<property name="executor" ref="smartInitThreadPool"/>
@ -114,7 +108,7 @@
<!-- Smart Init Routes -->
<!-- main route now handled through the gfeIngestNotification -->
<route id="manualSmartInit">
<from uri="jms-smartinit:queue:manualSmartInit"/>
<from uri="jms-durable:queue:manualSmartInit?threadName=smartInitManual" />
<doTry>
<bean ref="smartInitQueue" method="addManualInit"/>
<doCatch>
@ -126,7 +120,7 @@
</route>
<route id="gfeVtecChangeNotification">
<from uri="jms-generic:topic:edex.alerts.vtec"/>
<from uri="jms-generic:topic:edex.alerts.vtec?threadName=gfe-edex.alerts.vtec"/>
<doTry>
<bean ref="serializationUtil" method="transformFromThrift"/>
<bean ref="vtecChangeListener" method="handleNotification"/>
@ -138,7 +132,6 @@
</doTry>
</route>
<!-- Convert the topic into a queue so only one consumer gets each message and we still have competing consumers. -->
</camelContext>
<bean factory-bean="clusteredCamelContextMgr" factory-method="register">

View file

@ -59,9 +59,10 @@ import com.raytheon.uf.edex.database.dao.DaoConfig;
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Oct 20, 2011 dgilling Initial creation
* May 08, 2012 #600 dgilling Re-work logic for handling PENDING
* records.
* Oct 20, 2011 dgilling Initial creation
* May 08, 2012 600 dgilling Re-work logic for handling PENDING
* records.
* Feb 07, 2014 2357 rjpeter iscSendNotification uri.
*
* </pre>
*
@ -74,9 +75,9 @@ public class IscSendQueue {
// how we'll organize the temporary queue
private class JobSetQueueKey {
private ParmID pid;
private final ParmID pid;
private IscSendState state;
private final IscSendState state;
public JobSetQueueKey(ParmID pid, IscSendState state) {
this.pid = pid;
@ -92,8 +93,9 @@ public class IscSendQueue {
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((pid == null) ? 0 : pid.hashCode());
result = prime * result + ((state == null) ? 0 : state.hashCode());
result = (prime * result) + ((pid == null) ? 0 : pid.hashCode());
result = (prime * result)
+ ((state == null) ? 0 : state.hashCode());
return result;
}
@ -148,7 +150,7 @@ public class IscSendQueue {
private int timeoutMillis = 60000;
private Map<JobSetQueueKey, List<IscSendRecord>> jobSet = new HashMap<JobSetQueueKey, List<IscSendRecord>>();
private final Map<JobSetQueueKey, List<IscSendRecord>> jobSet = new HashMap<JobSetQueueKey, List<IscSendRecord>>();
private static final IscSendQueue instance = new IscSendQueue();
@ -168,7 +170,7 @@ public class IscSendQueue {
try {
byte[] messages = SerializationUtil.transformToThrift(sendJobs);
EDEXUtil.getMessageProducer().sendAsyncUri(
"jms-iscsend:queue:iscSendNotification", messages);
"jms-durable:queue:iscSendNotification", messages);
} catch (SerializationException e) {
handler.error("Unable to serialize IscSendRecords.", e);
} catch (EdexException e) {
@ -238,7 +240,7 @@ public class IscSendQueue {
// Now combine time ranges if we can
int i = 0;
while (i <= pending.size() - 2) {
while (i <= (pending.size() - 2)) {
TimeRange time = pending.get(i).getTimeRange();
TimeRange time1 = pending.get(i + 1).getTimeRange();

View file

@ -12,7 +12,7 @@
<bean id="goessoundingDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="goessoundingPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.goessounding"/>
<constructor-arg value="jms-durable:queue:Ingest.goessounding"/>
</bean>
<bean id="goessoundingCamelRegistered" factory-bean="contextManager"

View file

@ -5,20 +5,6 @@
<bean id="gribDecoder" class="com.raytheon.edex.plugin.grib.GribDecoder" />
<bean id="ingest-grib" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsGribConfig" />
<property name="taskExecutor" ref="gribThreadPool" />
</bean>
<bean id="jmsGribConfig" class="org.apache.camel.component.jms.JmsConfiguration"
factory-bean="jmsDurableConfig" factory-method="copy"/>
<bean id="gribThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="${grib-decode.count.threads}" />
<property name="maxPoolSize" value="${grib-decode.count.threads}" />
</bean>
<bean id="gribGridPointLock" class="com.raytheon.edex.plugin.grib.GribGridPointLock">
<constructor-arg value="${grib-decode.count.gridpoints}"/>
<constructor-arg value="${grib-decode.count.threads}"/>
@ -53,7 +39,7 @@
autoStartup="false">
<endpoint id="gribSplitJmsEndpoint" uri="jms-durable:queue:Ingest.GribSplit?concurrentConsumers=${grib-split.count.threads}"/>
<endpoint id="gribDecodeJmsEndpoint" uri="ingest-grib:queue:Ingest.GribDecode?concurrentConsumers=${grib-decode.count.threads}"/>
<endpoint id="gribDecodeJmsEndpoint" uri="jms-durable:queue:Ingest.GribDecode?concurrentConsumers=${grib-decode.count.threads}"/>
<!-- Begin Grib Decode Route -->
<route id="gribSplitIngestRoute">
@ -66,7 +52,7 @@
<bean ref="stringToFile" />
<!-- strategyRef is needed because of camel bug https://issues.apache.org/activemq/browse/CAMEL-3333,
without the strategy it uses the original message in the multicast and it loses the largeFileLock header -->
<split strategyRef="useLatestAggregationStrategy">
<split strategyRef="useLatestAggregationStrategy" streaming="true">
<method bean="gribSplitter" method="split" />
<to uri="jms-durable:queue:Ingest.GribDecode" />
</split>
@ -97,6 +83,5 @@
</doFinally>
</doTry>
</route>
</camelContext>
</beans>

View file

@ -4,6 +4,6 @@
<bean id="gribDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="grib" />
<constructor-arg value="jms-dist:queue:Ingest.GribSplit" />
<constructor-arg value="jms-durable:queue:Ingest.GribSplit" />
</bean>
</beans>

View file

@ -8,7 +8,7 @@
<bean id="ldadDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="ldad" />
<constructor-arg value="jms-dist:queue:Ingest.ldad" />
<constructor-arg value="jms-durable:queue:Ingest.ldad" />
</bean>
<camelContext id="ldad-camel"

View file

@ -13,7 +13,7 @@
<bean id="ldadhydroDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="ldadhydro" />
<constructor-arg value="jms-dist:queue:Ingest.ldadhydro" />
<constructor-arg value="jms-durable:queue:Ingest.ldadhydro" />
</bean>
<bean id="ldadhydroPointData" class="com.raytheon.edex.plugin.ldadhydro.dao.LdadhydroPointDataTransform"/>

View file

@ -12,7 +12,7 @@
<bean id="ldadmanualDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="ldadmanual" />
<constructor-arg value="jms-dist:queue:Ingest.ldadmanual"/>
<constructor-arg value="jms-durable:queue:Ingest.ldadmanual"/>
</bean>
<bean id="ldadmanualCamelRegistered" factory-bean="contextManager"

View file

@ -16,7 +16,7 @@
<bean id="ldadprofilerDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="ldadprofiler" />
<constructor-arg value="jms-dist:queue:Ingest.ldadprofiler"/>
<constructor-arg value="jms-durable:queue:Ingest.ldadprofiler"/>
</bean>
<bean id="ldadprofilerCamelRegistered" factory-bean="contextManager"

View file

@ -0,0 +1,42 @@
<beans
xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
http://camel.apache.org/schema/spring http://camel.apache.org/schema/spring/camel-spring.xsd">
<bean id="obsCamelRegistered" factory-bean="contextManager" factory-method="register"
depends-on="persistCamelRegistered,
shefCamelRegistered,
metarToHMDBCamelRegistered">
<constructor-arg ref="obs-camel" />
</bean>
<camelContext id="obs-camel" xmlns="http://camel.apache.org/schema/spring"
errorHandlerRef="errorHandler" autoStartup="false">
<!-- Begin METAR routes -->
<route id="metarIngestRoute">
<from uri="jms-durable:queue:Ingest.obs" />
<setHeader headerName="pluginName">
<constant>obs</constant>
</setHeader>
<doTry>
<pipeline>
<bean ref="stringToFile" />
<bean ref="obsDecoder" method="decode" />
<bean ref="dupElim" />
<bean ref="metarPointData" method="toPointData" />
<multicast>
<to uri="direct-vm:persistIndexAlert" />
<to uri="direct-vm:metarToShef" />
<to uri="direct-vm:metarToHMDB" />
</multicast>
</pipeline>
<doCatch>
<exception>java.lang.Throwable</exception>
<to uri="log:metar?level=ERROR" />
</doCatch>
</doTry>
</route>
</camelContext>
</beans>

View file

@ -0,0 +1,41 @@
<beans
xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
http://camel.apache.org/schema/spring http://camel.apache.org/schema/spring/camel-spring.xsd">
<!-- This spring configuration is currently only used by the ingestHydro EDEX instance. -->
<bean id="obsCamelRegistered" factory-bean="contextManager" factory-method="register"
depends-on="shefCamelRegistered,
metarToHMDBCamelRegistered">
<constructor-arg ref="obs-camel" />
</bean>
<camelContext id="obs-camel" xmlns="http://camel.apache.org/schema/spring"
errorHandlerRef="errorHandler" autoStartup="false">
<!-- Begin METAR routes -->
<route id="metarIngestRoute">
<from uri="jms-durable:queue:Ingest.obs" />
<setHeader headerName="pluginName">
<constant>obs</constant>
</setHeader>
<doTry>
<pipeline>
<bean ref="stringToFile" />
<bean ref="obsDecoder" method="decode" />
<bean ref="metarPointData" method="toPointData" />
<multicast>
<to uri="direct-vm:metarToShef" />
<to uri="direct-vm:metarToHMDB" />
</multicast>
</pipeline>
<doCatch>
<exception>java.lang.Throwable</exception>
<to uri="log:metar?level=ERROR" />
</doCatch>
</doTry>
</route>
</camelContext>
</beans>

View file

@ -3,7 +3,7 @@
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
http://camel.apache.org/schema/spring http://camel.apache.org/schema/spring/camel-spring.xsd">
<bean id="obsDecoder" class="com.raytheon.edex.plugin.obs.ObsDecoder" />
<bean id="metarPointData" class="com.raytheon.edex.plugin.obs.metar.MetarPointDataTransform" />
@ -12,42 +12,7 @@
<bean id="obsDistRegistry" factory-bean="distributionSrv" factory-method="register">
<constructor-arg value="obs" />
<constructor-arg value="jms-dist:queue:Ingest.obs" />
<constructor-arg value="jms-durable:queue:Ingest.obs" />
</bean>
<bean id="obsCamelRegistered" factory-bean="contextManager" factory-method="register"
depends-on="persistCamelRegistered,
shefCamelRegistered,
metarToHMDBCamelRegistered">
<constructor-arg ref="obs-camel" />
</bean>
<camelContext id="obs-camel" xmlns="http://camel.apache.org/schema/spring"
errorHandlerRef="errorHandler" autoStartup="false">
<!-- Begin METAR routes -->
<route id="metarIngestRoute">
<from uri="jms-durable:queue:Ingest.obs" />
<setHeader headerName="pluginName">
<constant>obs</constant>
</setHeader>
<doTry>
<pipeline>
<bean ref="stringToFile" />
<bean ref="obsDecoder" method="decode" />
<bean ref="dupElim" />
<bean ref="metarPointData" method="toPointData" />
<multicast>
<to uri="direct-vm:persistIndexAlert" />
<to uri="direct-vm:metarToShef" />
<to uri="direct-vm:metarToHMDB" />
</multicast>
</pipeline>
<doCatch>
<exception>java.lang.Throwable</exception>
<to uri="log:metar?level=ERROR" />
</doCatch>
</doTry>
</route>
</camelContext>
</beans>

View file

@ -9,7 +9,7 @@
<bean id="poessoundingDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="poessoundingPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.poessounding"/>
<constructor-arg value="jms-durable:queue:Ingest.poessounding"/>
</bean>
<bean id="poessoundingCamelRegistered" factory-bean="contextManager"

View file

@ -9,7 +9,7 @@
<bean id="profilerDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="profilerPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.profiler"/>
<constructor-arg value="jms-durable:queue:Ingest.profiler"/>
</bean>
<bean id="profilerCamelRegistered" factory-bean="contextManager"

View file

@ -5,28 +5,17 @@
<bean id="radarDecompressor" class="com.raytheon.edex.plugin.radar.RadarDecompressor"/>
<bean id="radarDecoder" class="com.raytheon.edex.plugin.radar.RadarDecoder"/>
<bean id="jms-radar" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsRadarConfig" />
<property name="taskExecutor" ref="radarThreadPool" />
</bean>
<bean id="jmsRadarConfig" class="org.apache.camel.component.jms.JmsConfiguration"
factory-bean="jmsDurableConfig" factory-method="copy"/>
<bean id="radarThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="2" />
<property name="maxPoolSize" value="2" />
</bean>
<bean id="radarDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="radar" />
<constructor-arg value="jms-dist:queue:Ingest.Radar" />
<constructor-arg value="jms-durable:queue:Ingest.Radar" />
</bean>
<bean id="radarRadarServerDistRegistry" factory-bean="radarserverDistributionSrv"
factory-method="register">
<constructor-arg value="radar" />
<constructor-arg value="jms-dist:queue:Ingest.RadarRadarServer" />
<constructor-arg value="jms-durable:queue:Ingest.RadarRadarServer" />
</bean>
<bean id="radarCamelRegistered" factory-bean="contextManager"
@ -47,13 +36,13 @@
<setHeader headerName="pluginName">
<constant>radar</constant>
</setHeader>
<to uri="jms-radar:queue:Ingest.Radar" />
<to uri="jms-durable:queue:Ingest.Radar" />
</route>
-->
<!-- Begin Radar routes -->
<route id="radarIngestRoute">
<from uri="jms-radar:queue:Ingest.Radar"/>
<from uri="jms-durable:queue:Ingest.Radar"/>
<setHeader headerName="dataType">
<constant>radar-sbn</constant>
</setHeader>
@ -61,7 +50,7 @@
</route>
<route id="radarRadarServerIngestRoute">
<from uri="jms-radar:queue:Ingest.RadarRadarServer"/>
<from uri="jms-durable:queue:Ingest.RadarRadarServer"/>
<setHeader headerName="dataType">
<constant>radar-local</constant>
</setHeader>

View file

@ -12,7 +12,7 @@
<bean id="reccoDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="reccoPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.recco" />
<constructor-arg value="jms-durable:queue:Ingest.recco" />
</bean>
<bean id="reccoCamelRegistered" factory-bean="contextManager"

View file

@ -10,7 +10,7 @@
<bean id="redbookDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="redbook" />
<constructor-arg value="jms-dist:queue:Ingest.redbook"/>
<constructor-arg value="jms-durable:queue:Ingest.redbook"/>
</bean>
<!--

View file

@ -3,18 +3,6 @@
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
http://camel.apache.org/schema/spring http://camel.apache.org/schema/spring/camel-spring.xsd">
<bean id="jms-satellite" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsSatelliteConfig" />
<property name="taskExecutor" ref="satelliteThreadPool" />
</bean>
<bean id="jmsSatelliteConfig" class="org.apache.camel.component.jms.JmsConfiguration"
factory-bean="jmsDurableConfig" factory-method="copy"/>
<bean id="satelliteThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="1" />
<property name="maxPoolSize" value="1" />
</bean>
<bean id="satelliteDecoder" class="com.raytheon.edex.plugin.satellite.SatelliteDecoder">
<property name="dao" ref="satelliteDao" />
</bean>
@ -26,7 +14,7 @@
<bean id="satDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="satellite" />
<constructor-arg value="jms-dist:queue:Ingest.Satellite" />
<constructor-arg value="jms-durable:queue:Ingest.Satellite" />
</bean>
<bean id="satCamelRegistered" factory-bean="contextManager"
@ -53,7 +41,7 @@
<!-- Begin Sat routes -->
<route id="satIngestRoute">
<from uri="jms-satellite:queue:Ingest.Satellite"/>
<from uri="jms-durable:queue:Ingest.Satellite"/>
<setHeader headerName="pluginName">
<constant>satellite</constant>
</setHeader>

View file

@ -13,7 +13,7 @@
<bean id="sfcobsDistRegistry" factory-bean="distributionSrv" factory-method="register">
<constructor-arg value="sfcobs" />
<constructor-arg value="jms-dist:queue:Ingest.sfcobs" />
<constructor-arg value="jms-durable:queue:Ingest.sfcobs" />
</bean>
<bean id="sfcobsCamelRegistered" factory-bean="contextManager" factory-method="register"

View file

@ -3,18 +3,6 @@
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
http://camel.apache.org/schema/spring http://camel.apache.org/schema/spring/camel-spring.xsd">
<bean id="jms-shef" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsShefConfig" />
<property name="taskExecutor" ref="shefThreadPool" />
</bean>
<bean id="jmsShefConfig" class="org.apache.camel.component.jms.JmsConfiguration"
factory-bean="jmsDurableConfig" factory-method="copy"/>
<bean id="shefThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="3" />
<property name="maxPoolSize" value="3" />
</bean>
<bean id="shefDecoder" class="com.raytheon.edex.plugin.shef.ShefDecoder">
<constructor-arg value="shef" />
</bean>
@ -47,13 +35,13 @@
factory-method="register">
<constructor-arg value="shef" />
<constructor-arg
value="jms-dist:queue:Ingest.Shef"/>
value="jms-durable:queue:Ingest.Shef"/>
</bean>
<bean id="shefHandleoupDistRegistry" factory-bean="handleoupDistributionSrv"
factory-method="register">
<constructor-arg value="shef" />
<constructor-arg value="jms-dist:queue:Ingest.Shef"/>
<constructor-arg value="jms-durable:queue:Ingest.Shef"/>
</bean>
<bean id="shefCamelRegistered" factory-bean="contextManager"
@ -92,7 +80,7 @@
<!-- Begin shef routes -->
<route id="shefIngestRoute">
<from
uri="jms-shef:queue:Ingest.Shef"/>
uri="jms-durable:queue:Ingest.Shef"/>
<setHeader headerName="pluginName">
<constant>shef</constant>
</setHeader>
@ -103,7 +91,7 @@
</route>
<route id="shefStagedRoute">
<from
uri="jms-shef:queue:Ingest.ShefStaged"/>
uri="jms-durable:queue:Ingest.ShefStaged"/>
<setHeader headerName="pluginName">
<constant>shef</constant>
</setHeader>
@ -155,7 +143,7 @@
<route id="shefManualIngestRoute">
<from
uri="jms-shef:queue:Ingest.ShefManual"/>
uri="jms-durable:queue:Ingest.ShefManual"/>
<setHeader headerName="pluginName">
<constant>shef</constant>
</setHeader>

View file

@ -9,13 +9,13 @@
<bean id="tafDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="taf" />
<constructor-arg value="jms-dist:queue:Ingest.taf"/>
<constructor-arg value="jms-durable:queue:Ingest.taf"/>
</bean>
<bean id="tafHandleoupDistRegistry" factory-bean="handleoupDistributionSrv"
factory-method="register">
<constructor-arg value="taf" />
<constructor-arg value="jms-dist:queue:Ingest.taf"/>
<constructor-arg value="jms-durable:queue:Ingest.taf"/>
</bean>
<bean id="tafCamelRegistered" factory-bean="contextManager"

View file

@ -82,6 +82,7 @@ import com.raytheon.uf.edex.decodertools.time.TimeTools;
* Jun 28, 2012 #827 dgilling Annotate id field for
* serialization.
* Nov 01, 2013 2361 njensen Remove XML annotations
* Feb 10, 2014 2777 rferrel set the parentId when assinging sets.
*
* </pre>
*
@ -102,7 +103,6 @@ public class ChangeGroup extends PersistableDataObject {
@Id
@GeneratedValue
@DynamicSerializeElement
private int id;
/** A String containing the change group */
@ -885,6 +885,11 @@ public class ChangeGroup extends PersistableDataObject {
public void setTurbulence_layers(Set<TurbulenceLayer> turbulence_layers) {
this.turbulence_layers = turbulence_layers;
if ((turbulence_layers != null) && (turbulence_layers.size() > 0)) {
for (TurbulenceLayer turbulence_layer : turbulence_layers) {
turbulence_layer.setParentID(this);
}
}
}
public Set<IcingLayer> getIcing_layers() {
@ -893,6 +898,11 @@ public class ChangeGroup extends PersistableDataObject {
public void setIcing_layers(Set<IcingLayer> icing_layers) {
this.icing_layers = icing_layers;
if ((icing_layers != null) && (icing_layers.size() > 0)) {
for (IcingLayer icing_layer : icing_layers) {
icing_layer.setParentID(this);
}
}
}
public Set<TemperatureForecast> getTemp_forecasts() {
@ -901,6 +911,11 @@ public class ChangeGroup extends PersistableDataObject {
public void setTemp_forecasts(Set<TemperatureForecast> temp_forecasts) {
this.temp_forecasts = temp_forecasts;
if ((temp_forecasts != null) && (temp_forecasts.size() > 0)) {
for (TemperatureForecast temForecast : temp_forecasts) {
temForecast.setParentID(this);
}
}
}
public Set<TafWeatherCondition> getWeather() {
@ -909,6 +924,11 @@ public class ChangeGroup extends PersistableDataObject {
public void setWeather(Set<TafWeatherCondition> weather) {
this.weather = weather;
if ((weather != null) && (weather.size() > 0)) {
for (TafWeatherCondition twc : weather) {
twc.setParentID(this);
}
}
}
public Set<TafSkyCover> getSky_cover() {
@ -917,6 +937,11 @@ public class ChangeGroup extends PersistableDataObject {
public void setSky_cover(Set<TafSkyCover> sky_cover) {
this.sky_cover = sky_cover;
if ((sky_cover != null) && (sky_cover.size() > 0)) {
for (TafSkyCover tsc : sky_cover) {
tsc.setParentID(this);
}
}
}
private void checkGroupDataEnd(StringBuilder group) {

View file

@ -65,6 +65,8 @@ import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
* PluginDataObject.
* Aug 30, 2013 2298 rjpeter Make getPluginName abstract
* Nov 01, 2013 2361 njensen Remove XML annotations
* Feb 10, 2014 2777 rferrel Assign parent id when setting ChangeGroup.
* Feb 11, 2014 2784 rferrel Remove override of setIdentifier.
*
* </pre>
*
@ -277,6 +279,11 @@ public class TafRecord extends PluginDataObject implements ISpatialEnabled {
*/
public void setChangeGroups(Set<ChangeGroup> changeGroups) {
this.changeGroups = changeGroups;
if ((changeGroups != null) && (changeGroups.size() > 0)) {
for (ChangeGroup changeGroup : changeGroups) {
changeGroup.setParentID(this);
}
}
}
/**
@ -324,19 +331,6 @@ public class TafRecord extends PluginDataObject implements ISpatialEnabled {
this.remarks = remarks;
}
@Override
public void setIdentifier(Object dataURI) {
this.identifier = dataURI;
if ((this.changeGroups != null) && (this.changeGroups.size() > 0)) {
for (ChangeGroup group : this.changeGroups) {
group.setParentID(this);
}
}
}
@Override
public ObStation getSpatialObject() {
return location;

View file

@ -11,13 +11,13 @@
<bean id="textDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="text" />
<constructor-arg value="jms-dist:queue:Ingest.Text"/>
<constructor-arg value="jms-durable:queue:Ingest.Text"/>
</bean>
<bean id="textHandleoupDistRegistry" factory-bean="handleoupDistributionSrv"
factory-method="register">
<constructor-arg value="text" />
<constructor-arg value="jms-dist:queue:Ingest.Text"/>
<constructor-arg value="jms-durable:queue:Ingest.Text"/>
</bean>
<!-- define the bean that handles automatic faxing of products. -->
@ -27,18 +27,6 @@
<!-- verify text product info for site, spawns in separate thread to not delay start up -->
<bean id="textVersionPurge" class="com.raytheon.edex.plugin.text.TextVersionPurge" depends-on="textRegistered"/>
<bean id="jms-text" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsTextConfig" />
<property name="taskExecutor" ref="textThreadPool" />
</bean>
<bean id="jmsTextConfig" class="org.apache.camel.component.jms.JmsConfiguration"
factory-bean="jmsDurableConfig" factory-method="copy"/>
<bean id="textThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="2" />
<property name="maxPoolSize" value="2" />
</bean>
<!-- Special handler for text plugin archives that bases filenames off
creation time-->
<bean id="textArchiveNamer" class="com.raytheon.edex.plugin.text.maintenance.archiver.TextArchiveFileNameFormatter" />
@ -118,7 +106,7 @@
</route>
<route id="textUndecodedIngestRoute">
<from uri="jms-text:queue:Ingest.Text?concurrentConsumers=2" />
<from uri="jms-durable:queue:Ingest.Text?concurrentConsumers=2" />
<setHeader headerName="pluginName">
<constant>text</constant>
</setHeader>
@ -145,7 +133,7 @@
<route id="textToWatchWarnRoute">
<from uri="direct:textToWatchWarn" />
<bean ref="textDecoder" method="transformToProductIds" />
<to uri="jms-text:queue:watchwarn" />
<to uri="jms-durable:queue:watchwarn" />
</route>
<route id="textSerializationRoute">

View file

@ -9,7 +9,7 @@
<bean id="textlightningDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="textlightning" />
<constructor-arg value="jms-dist:queue:Ingest.textlightning"/>
<constructor-arg value="jms-durable:queue:Ingest.textlightning"/>
</bean>
<bean id="textlightningCamelRegistered" factory-bean="contextManager"

View file

@ -8,13 +8,13 @@
<bean id="warningDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="warning" />
<constructor-arg value="jms-dist:queue:Ingest.Warning"/>
<constructor-arg value="jms-durable:queue:Ingest.Warning"/>
</bean>
<bean id="warningHandleoupDistRegistry" factory-bean="handleoupDistributionSrv"
factory-method="register">
<constructor-arg value="warning" />
<constructor-arg value="jms-dist:queue:Ingest.Warning"/>
<constructor-arg value="jms-durable:queue:Ingest.Warning"/>
</bean>
<bean id="warningCamelRegistered" factory-bean="contextManager"
@ -22,18 +22,6 @@
<constructor-arg ref="warning-camel"/>
</bean>
<bean id="jms-warning" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsWarningConfig" />
<property name="taskExecutor" ref="warningThreadPool" />
</bean>
<bean id="jmsWarningConfig" class="org.apache.camel.component.jms.JmsConfiguration"
factory-bean="jmsDurableConfig" factory-method="copy"/>
<bean id="warningThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="1" />
<property name="maxPoolSize" value="1" />
</bean>
<camelContext id="warning-camel"
xmlns="http://camel.apache.org/schema/spring"
errorHandlerRef="errorHandler"
@ -56,7 +44,7 @@
Warning routes
-->
<route id="warningIngestRoute">
<from uri="jms-warning:queue:Ingest.Warning"/>
<from uri="jms-durable:queue:Ingest.Warning"/>
<setHeader headerName="pluginName">
<constant>warning</constant>
</setHeader>
@ -67,10 +55,10 @@
<bean ref="index" method="index" />
<bean ref="processUtil" method="log" />
<multicast parallelProcessing="false">
<to uri="direct-vm:warningIngestAlert" />
<to uri="direct-vm:stageNotification" />
<filter>
<method bean="vtecFilter" method="hasVTEC" />
<to uri="jms-warning:queue:activeTablePending"/>
<to uri="jms-durable:queue:activeTablePending"/>
</filter>
</multicast>
</pipeline>
@ -80,10 +68,5 @@
</doCatch>
</doTry>
</route>
<route id="warningIngestAlert">
<from uri="direct-vm:warningIngestAlert" />
<to uri="direct-vm:stageNotification" />
</route>
</camelContext>
</beans>
</beans>

View file

@ -41,12 +41,18 @@
</bean>
<!-- JmsPooled* do not work... -->
<bean id="jms-alt" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsAltConfig" />
<property name="taskExecutor" ref="genericThreadPool" />
<bean id="jms-mhs" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsMhsConfig" />
<property name="taskExecutor" ref="mhsThreadPool" />
</bean>
<bean id="jmsAltConfig" class="org.apache.camel.component.jms.JmsConfiguration">
<bean id="mhsThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="1" />
<property name="maxPoolSize" value="1" />
</bean>
<bean id="jmsMhsConfig" class="org.apache.camel.component.jms.JmsConfiguration">
<property name="recoveryInterval" value="1000"/>
<property name="connectionFactory" ref="amqConnectionFactory" />
<property name="destinationResolver" ref="qpidNoDurableResolver"/>
@ -64,7 +70,7 @@
<!-- Convert from BeanInvocation if needed. -->
<convertBodyTo type="com.raytheon.uf.common.dataplugin.text.request.RemoteRetrievalRequest" />
<bean ref="serializationUtil" method="transformToThrift" />
<to uri="jms-alt:queue:fxa.mhs.request?requestTimeout=122000" pattern="InOut" />
<to uri="jms-mhs:queue:fxa.mhs.request?requestTimeout=122000" pattern="InOut" />
<bean ref="serializationUtil" method="transformFromThrift" />
</route>
</camelContext>

View file

@ -97,6 +97,7 @@ import com.raytheon.uf.common.time.util.TimeUtil;
* May 07, 2013 1869 bsteffen Remove dataURI column from
* PluginDataObject.
* Aug 30, 2013 2298 rjpeter Make getPluginName abstract
* Feb 11, 2014 2784 rferrel Remove override of setIdentifier.
* </pre>
*
* @author bphillip
@ -984,6 +985,11 @@ public class MetarRecord extends PersistablePluginDataObject implements
*/
public void setSkyCoverage(Set<SkyCover> skyCoverage) {
this.skyCoverage = skyCoverage;
if ((skyCoverage != null) && (skyCoverage.size() > 0)) {
for (SkyCover cover : skyCoverage) {
cover.setParentMetar(this);
}
}
}
public void addSkyCoverage(SkyCover cover) {
@ -1019,6 +1025,11 @@ public class MetarRecord extends PersistablePluginDataObject implements
*/
public void setWeatherCondition(List<WeatherCondition> weatherCondition) {
this.weatherCondition = weatherCondition;
if ((weatherCondition != null) && (weatherCondition.size() > 0)) {
for (WeatherCondition cond : weatherCondition) {
cond.setParentMetar(this);
}
}
}
public void addWeatherCondition(WeatherCondition condition) {
@ -1116,30 +1127,6 @@ public class MetarRecord extends PersistablePluginDataObject implements
this.snowWater = snowWater;
}
/**
* Override existing set method to modify any classes that use the dataURI
* as a foreign key
*/
@Override
public void setIdentifier(Object dataURI) {
this.identifier = dataURI;
// set the parentID to the dataURI for all values
if ((this.getWeatherCondition() != null)
&& (this.getWeatherCondition().size() > 0)) {
for (WeatherCondition cond : this.getWeatherCondition()) {
cond.setParentMetar(this);
}
}
// set the parentID to the dataURI for all values
if ((this.getSkyCoverage() != null)
&& (this.getSkyCoverage().size() > 0)) {
for (SkyCover cover : this.getSkyCoverage()) {
cover.setParentMetar(this);
}
}
}
public String getReportType() {
return reportType;
}

View file

@ -28,7 +28,6 @@ import javax.xml.bind.annotation.XmlElement;
import com.raytheon.uf.common.serialization.ISerializableObject;
import com.raytheon.uf.common.serialization.annotations.DynamicSerialize;
import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
/**
* This is the root class for any object being persisted in the database using
@ -50,6 +49,7 @@ import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
* 7/24/07 353 bphillip Initial Check in
* 20080408 1039 jkorman Added traceId for tracing data.
* Oct 10, 2012 1261 djohnson Add generic for identifier.
* Feb 11, 2014 2784 rferrel Identifier no longer a DynamicSerializeElement.
*
* </pre>
*
@ -59,8 +59,8 @@ import com.raytheon.uf.common.serialization.annotations.DynamicSerializeElement;
@XmlAccessorType(XmlAccessType.NONE)
@DynamicSerialize
public abstract class PersistableDataObject<IDENTIFIER_TYPE> implements
IPersistableDataObject<IDENTIFIER_TYPE>,
Serializable, ISerializableObject {
IPersistableDataObject<IDENTIFIER_TYPE>, Serializable,
ISerializableObject {
private static final long serialVersionUID = -6747395152869923909L;
@ -69,7 +69,6 @@ public abstract class PersistableDataObject<IDENTIFIER_TYPE> implements
* key for the associated database table.
*/
@XmlElement
@DynamicSerializeElement
protected IDENTIFIER_TYPE identifier;
private String traceId = "";

View file

@ -5,6 +5,7 @@ Bundle-SymbolicName: com.raytheon.uf.common.jms
Bundle-Version: 1.12.1174.qualifier
Bundle-Vendor: Raytheon
Require-Bundle: javax.jms,
com.raytheon.uf.common.status
com.raytheon.uf.common.status,
org.apache.qpid
Export-Package: com.raytheon.uf.common.jms
Bundle-RequiredExecutionEnvironment: JavaSE-1.6

View file

@ -39,7 +39,8 @@ import com.raytheon.uf.common.status.UFStatus.Priority;
* connection can be released to the pool. Any exception will close pooled
* session instead of returning to the pool. The sessions are tracked in both
* active and available states. An available session can be reused by the next
* client.
* client. The connection is pinned to Thread that creates the connection and
* cannot be used/reused by any other thread.
*
* Synchronization Principle To prevent deadlocks: Chained sync blocks can only
* happen in a downward direction. A manager has a synchronized lock can make a
@ -52,9 +53,11 @@ import com.raytheon.uf.common.status.UFStatus.Priority;
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Apr 15, 2011 rjpeter Initial creation
* Mar 08, 2012 194 njensen Improved safety of close()
* Feb 21, 2013 1642 rjpeter Fix deadlock scenario
* Apr 15, 2011 rjpeter Initial creation.
* Mar 08, 2012 194 njensen Improved safety of close().
* Feb 21, 2013 1642 rjpeter Fix deadlock scenario.
* Feb 07, 2014 2357 rjpeter Track by Thread, close session is it has no
* producers/consumers.
* </pre>
*
* @author rjpeter
@ -86,7 +89,7 @@ public class JmsPooledConnection implements ExceptionListener {
private volatile AvailableJmsPooledObject<JmsPooledSession> availableSession = null;
private volatile String key = null;
private final Thread thread;
private final String clientId;
@ -94,8 +97,10 @@ public class JmsPooledConnection implements ExceptionListener {
private volatile boolean exceptionOccurred = false;
public JmsPooledConnection(JmsPooledConnectionFactory connFactory) {
public JmsPooledConnection(JmsPooledConnectionFactory connFactory,
Thread thread) {
this.connFactory = connFactory;
this.thread = thread;
this.clientId = null;
getConnection();
}
@ -123,7 +128,8 @@ public class JmsPooledConnection implements ExceptionListener {
if (availableSession != null) {
JmsPooledSession availSess = availableSession.getPooledObject();
synchronized (availSess.getStateLock()) {
if (availSess.isValid()) {
if (availSess.isValid()
&& availSess.hasProducersOrConsumers()) {
availSess.setState(State.InUse);
session = availSess;
} else {
@ -185,6 +191,7 @@ public class JmsPooledConnection implements ExceptionListener {
}
if (canClose) {
statusHandler.info("Closing connection: " + this.toString());
// njensen: I moved removing the connection from the pool to be
// the first thing in this block instead of last thing so
// there's no chance it could be closed and then retrieved from
@ -283,6 +290,8 @@ public class JmsPooledConnection implements ExceptionListener {
// safe since conn is volatile
synchronized (stateLock) {
if (conn == null) {
statusHandler.info("Creating connection: "
+ this.toString());
long exceptionLastHandled = 0;
boolean connected = false;
while (!connected) {
@ -298,7 +307,7 @@ public class JmsPooledConnection implements ExceptionListener {
connectionStartTime = System.currentTimeMillis();
connected = true;
} catch (Exception e) {
if (exceptionLastHandled + ERROR_BROADCAST_INTERVAL < System
if ((exceptionLastHandled + ERROR_BROADCAST_INTERVAL) < System
.currentTimeMillis()) {
exceptionLastHandled = System
.currentTimeMillis();
@ -502,12 +511,8 @@ public class JmsPooledConnection implements ExceptionListener {
}
}
public void setKey(String key) {
this.key = key;
}
public String getKey() {
return key;
public Thread getThread() {
return thread;
}
/**

View file

@ -57,7 +57,8 @@ import com.raytheon.uf.common.status.UFStatus.Priority;
* ------------ ---------- ----------- --------------------------
* Apr 15, 2011 rjpeter Initial creation
* Oct 04, 2013 2357 rjpeter Removed pooling, keeps resources open for the
* thread that created them for a configured amount of time
* thread that created them for a configured amount of time.
* Feb 07, 2014 2357 rjpeter Track by Thread object, periodly check that tracked Threads are still alive.
* </pre>
*
* @author rjpeter
@ -72,11 +73,11 @@ public class JmsPooledConnectionFactory implements ConnectionFactory {
private String provider = "QPID";
// connections in use, key is "threadId-threadName"
private final Map<String, JmsPooledConnection> inUseConnections = new HashMap<String, JmsPooledConnection>();
// connections in use
private final Map<Thread, JmsPooledConnection> inUseConnections = new HashMap<Thread, JmsPooledConnection>();
// connections that were recently returned, key is "threadId-threadName"
private final Map<String, AvailableJmsPooledObject<JmsPooledConnection>> pendingConnections = new HashMap<String, AvailableJmsPooledObject<JmsPooledConnection>>();
// connections that were recently returned
private final Map<Thread, AvailableJmsPooledObject<JmsPooledConnection>> pendingConnections = new HashMap<Thread, AvailableJmsPooledObject<JmsPooledConnection>>();
private final ConcurrentLinkedQueue<JmsPooledConnection> deadConnections = new ConcurrentLinkedQueue<JmsPooledConnection>();
@ -95,24 +96,23 @@ public class JmsPooledConnectionFactory implements ConnectionFactory {
*/
@Override
public Connection createConnection() throws JMSException {
String threadKey = "" + Thread.currentThread().getId() + "-"
+ Thread.currentThread().getName();
Thread thread = Thread.currentThread();
JmsPooledConnection conn = null;
synchronized (inUseConnections) {
conn = inUseConnections.get(threadKey);
conn = inUseConnections.get(thread);
if (conn != null) {
JmsConnectionWrapper ref = conn.createReference();
if (ref != null) {
statusHandler
.info(threadKey
.info(thread.getName()
+ " already has a connection in use, returning previous connection thread, references="
+ conn.getReferenceCount());
return ref;
} else {
deadConnections.add(conn);
inUseConnections.remove(threadKey);
inUseConnections.remove(thread);
conn = null;
}
}
@ -122,13 +122,13 @@ public class JmsPooledConnectionFactory implements ConnectionFactory {
// check connections by Thread
synchronized (pendingConnections) {
wrapper = pendingConnections.remove(threadKey);
wrapper = pendingConnections.remove(thread);
}
// was retrieved connection valid
if (wrapper != null) {
conn = wrapper.getPooledObject();
JmsConnectionWrapper ref = getConnectionWrapper(threadKey, conn);
JmsConnectionWrapper ref = getConnectionWrapper(conn);
if (ref != null) {
return ref;
@ -140,22 +140,20 @@ public class JmsPooledConnectionFactory implements ConnectionFactory {
// create new connection?
if (conn == null) {
conn = new JmsPooledConnection(this);
conn = new JmsPooledConnection(this, thread);
}
return getConnectionWrapper(threadKey, conn);
return getConnectionWrapper(conn);
}
private JmsConnectionWrapper getConnectionWrapper(String threadKey,
JmsPooledConnection conn) {
private JmsConnectionWrapper getConnectionWrapper(JmsPooledConnection conn) {
synchronized (conn.getStateLock()) {
if (conn.isValid()) {
conn.setState(State.InUse);
JmsConnectionWrapper ref = conn.createReference();
if (ref != null) {
conn.setKey(threadKey);
synchronized (inUseConnections) {
inUseConnections.put(threadKey, conn);
inUseConnections.put(conn.getThread(), conn);
}
return ref;
}
@ -207,13 +205,13 @@ public class JmsPooledConnectionFactory implements ConnectionFactory {
}
public void removeConnectionFromPool(JmsPooledConnection conn) {
String threadKey = conn.getKey();
Thread thread = conn.getThread();
boolean success = false;
// remove it from inUseConnections if it was in use, theoretically could
// go by connection state, but may miss something due to threading
synchronized (inUseConnections) {
JmsPooledConnection inUse = inUseConnections.remove(threadKey);
JmsPooledConnection inUse = inUseConnections.remove(thread);
// make sure the one we removed is indeed this connection, 99%
// of time this is correct
@ -228,7 +226,7 @@ public class JmsPooledConnectionFactory implements ConnectionFactory {
// really only here for bullet proofing code against bad
// use of pool
if (inUse != null) {
inUseConnections.put(threadKey, inUse);
inUseConnections.put(thread, inUse);
}
}
}
@ -236,13 +234,13 @@ public class JmsPooledConnectionFactory implements ConnectionFactory {
// remove it from pendingConnections
AvailableJmsPooledObject<JmsPooledConnection> pooledObj = null;
synchronized (pendingConnections) {
pooledObj = pendingConnections.remove(threadKey);
pooledObj = pendingConnections.remove(thread);
if (pooledObj != null) {
if (pooledObj.getPooledObject() == conn) {
// found conn, done
return;
} else {
pendingConnections.put(threadKey, pooledObj);
pendingConnections.put(thread, pooledObj);
}
}
}
@ -250,10 +248,10 @@ public class JmsPooledConnectionFactory implements ConnectionFactory {
public boolean returnConnectionToPool(JmsPooledConnection conn) {
boolean success = false;
String threadKey = conn.getKey();
Thread thread = conn.getThread();
synchronized (inUseConnections) {
JmsPooledConnection inUse = inUseConnections.remove(threadKey);
JmsPooledConnection inUse = inUseConnections.remove(thread);
// make sure the one we removed is indeed this connection, 99%
// of time this is correct
@ -265,7 +263,7 @@ public class JmsPooledConnectionFactory implements ConnectionFactory {
// really only here for bullet proofing code against bad
// use of pool
if (inUse != null) {
inUseConnections.put(threadKey, inUse);
inUseConnections.put(thread, inUse);
statusHandler
.handle(Priority.INFO,
"Another connection already in use for this thread, not returning this connection to pool");
@ -279,7 +277,7 @@ public class JmsPooledConnectionFactory implements ConnectionFactory {
AvailableJmsPooledObject<JmsPooledConnection> prev = null;
synchronized (pendingConnections) {
prev = pendingConnections
.put(threadKey,
.put(thread,
new AvailableJmsPooledObject<JmsPooledConnection>(
conn));
}
@ -317,6 +315,19 @@ public class JmsPooledConnectionFactory implements ConnectionFactory {
}
}
// check for dead threads
synchronized (inUseConnections) {
Iterator<Map.Entry<Thread, JmsPooledConnection>> iter = inUseConnections
.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<Thread, JmsPooledConnection> entry = iter.next();
if (!entry.getKey().isAlive()) {
iter.remove();
deadConnections.add(entry.getValue());
}
}
}
while (!deadConnections.isEmpty()) {
JmsPooledConnection conn = deadConnections.poll();
if (conn != null) {

View file

@ -26,6 +26,8 @@ import javax.jms.Destination;
import javax.jms.JMSException;
import javax.jms.MessageConsumer;
import org.apache.qpid.client.BasicMessageConsumer;
import com.raytheon.uf.common.jms.wrapper.JmsConsumerWrapper;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.UFStatus;
@ -47,9 +49,10 @@ import com.raytheon.uf.common.status.UFStatus.Priority;
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Apr 18, 2011 rjpeter Initial creation
* Mar 08, 2012 194 njensen Improved logging
* Feb 26, 2013 1642 rjpeter Removed lazy initialization
* Apr 18, 2011 rjpeter Initial creation.
* Mar 08, 2012 194 njensen Improved logging.
* Feb 26, 2013 1642 rjpeter Removed lazy initialization.
* Feb 07, 2014 2357 rjpeter Updated logging.
* </pre>
*
* @author rjpeter
@ -87,6 +90,14 @@ public class JmsPooledConsumer {
this.destKey = destKey;
consumer = sess.getSession().createConsumer(destination,
messageSelector);
if (consumer instanceof BasicMessageConsumer) {
statusHandler.info("Creating AMQ consumer "
+ ((BasicMessageConsumer) consumer).getDestination()
.getQueueName()); // njensen
} else {
statusHandler.info("Creating consumer " + destKey); // njensen
}
}
public String getDestKey() {
@ -164,7 +175,13 @@ public class JmsPooledConsumer {
if (close) {
try {
statusHandler.info("Closing consumer " + destKey); // njensen
if (consumer instanceof BasicMessageConsumer) {
statusHandler.info("Closing AMQ consumer "
+ ((BasicMessageConsumer) consumer)
.getDestination().getQueueName()); // njensen
} else {
statusHandler.info("Closing consumer " + destKey); // njensen
}
consumer.close();
} catch (Throwable e) {
statusHandler.handle(Priority.WARN, "Failed to close consumer "

View file

@ -25,6 +25,9 @@ import java.util.List;
import javax.jms.JMSException;
import javax.jms.MessageProducer;
import org.apache.qpid.client.AMQDestination;
import org.apache.qpid.client.BasicMessageProducer;
import com.raytheon.uf.common.jms.wrapper.JmsProducerWrapper;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.UFStatus;
@ -46,9 +49,10 @@ import com.raytheon.uf.common.status.UFStatus.Priority;
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Apr 18, 2011 rjpeter Initial creation
* Mar 08, 2012 194 njensen Improved logging
* Feb 26, 2013 1642 rjpeter Removed lazy initialization
* Apr 18, 2011 rjpeter Initial creation.
* Mar 08, 2012 194 njensen Improved logging.
* Feb 26, 2013 1642 rjpeter Removed lazy initialization.
* Feb 07, 2014 2357 rjpeter Updated logging.
* </pre>
*
* @author rjpeter
@ -84,6 +88,19 @@ public class JmsPooledProducer {
this.sess = sess;
this.destKey = destKey;
this.producer = producer;
if (producer instanceof BasicMessageProducer) {
try {
statusHandler.info("Creating AMQ producer "
+ ((AMQDestination) ((BasicMessageProducer) producer)
.getDestination()).getQueueName());
} catch (Exception e) {
statusHandler
.error("Could not get producer destination for key "
+ destKey, e);
}
} else {
statusHandler.info("Creating producer " + destKey); // njensen
}
}
public String getDestKey() {
@ -161,7 +178,14 @@ public class JmsPooledProducer {
if (close) {
try {
statusHandler.info("Closing producer " + destKey); // njensen
if (producer instanceof BasicMessageProducer) {
statusHandler
.info("Closing AMQ producer "
+ ((AMQDestination) ((BasicMessageProducer) producer)
.getDestination()).getQueueName()); // njensen
} else {
statusHandler.info("Closing producer " + destKey); // njensen
}
producer.close();
} catch (Throwable e) {
statusHandler.handle(Priority.WARN, "Failed to close producer",

View file

@ -44,7 +44,9 @@ import com.raytheon.uf.common.status.UFStatus.Priority;
* can be released to the pool. Any exception will close pooled session instead
* of returning to the pool. The consumers/producers are tracked in both active
* and available states. An available consumer/producer can be reused by the
* next client.
* next client. Once a consumer has been closed the entire session is closed at
* next opportunity since QPID tracks consumers at the session level. Not doing
* this can leave a topic with no consumers on the qpid broker.
*
* Synchronization Principle To prevent deadlocks: Chained sync blocks can only
* happen in a downward direction. A manager has a synchronized lock can make a
@ -61,6 +63,8 @@ import com.raytheon.uf.common.status.UFStatus.Priority;
* Mar 08, 2012 194 njensen Improved logging
* Feb 21, 2013 1642 rjpeter Fix deadlock scenario
* Jan 26, 2014 2357 rjpeter Close a session when it has no producers or consumers.
* Feb 07, 2014 2357 rjpeter Close session at next return to pool after a
* consumer has closed.
* </pre>
*
* @author rjpeter
@ -78,7 +82,7 @@ public class JmsPooledSession {
// The thread this session was most recently used by for tracking a pending
// session that is being reserved for a given thread.
private String threadKey;
private final Thread thread;
private volatile boolean exceptionOccurred = false;
@ -86,6 +90,10 @@ public class JmsPooledSession {
private volatile State state = State.InUse;
// flag to stat that session should be closed instead of returned to pool on
// next iteration
private volatile boolean shouldClose = false;
// keeps track of number of creates vs. closes to know when it can be
// returned to the pool
private final List<JmsSessionWrapper> references = new ArrayList<JmsSessionWrapper>(
@ -102,6 +110,8 @@ public class JmsPooledSession {
public JmsPooledSession(JmsPooledConnection conn, Session sess) {
this.conn = conn;
this.sess = sess;
this.thread = conn.getThread();
statusHandler.info("Opening session: " + this.toString());
}
public long getCreateTime() {
@ -121,12 +131,8 @@ public class JmsPooledSession {
return conn;
}
public String getThreadKey() {
return threadKey;
}
public void setThreadKey(String threadKey) {
this.threadKey = threadKey;
public Thread getThread() {
return thread;
}
public boolean isValid() {
@ -437,6 +443,9 @@ public class JmsPooledSession {
String destKey = consumer.getDestKey();
boolean removed = false;
// a consumer was closed, close the session at next opportunity
shouldClose = true;
synchronized (inUseConsumers) {
JmsPooledConsumer inUse = inUseConsumers.remove(destKey);
removed = inUse == consumer;
@ -463,6 +472,7 @@ public class JmsPooledSession {
}
if (canClose) {
statusHandler.info("Closing session: " + this.toString());
closePooledConsumersProducers();
// need to close down all wrappers
@ -628,7 +638,7 @@ public class JmsPooledSession {
}
}
boolean valid = isValid() && hasProducersOrConsumers();
boolean valid = isValid() && !shouldClose && hasProducersOrConsumers();
if (valid && returnToPool) {
valid = conn.returnSessionToPool(this);
}

View file

@ -37,8 +37,9 @@ import com.raytheon.uf.common.jms.JmsPooledConsumer;
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Apr 18, 2011 rjpeter Initial creation
* Apr 18, 2011 rjpeter Initial creation.
* Feb 26, 2013 1642 rjpeter Added volatile references for better concurrency handling.
* Feb 07, 2014 2357 rjpeter Set linked exception in exception handling.
* </pre>
*
* @author rjpeter
@ -124,8 +125,11 @@ public class JmsConsumerWrapper implements MessageConsumer {
} catch (Throwable e) {
exceptionOccurred = true;
JMSException exc = new JMSException(
"Exception occurred on pooled consumer");
"Exception occurred on pooled consumer in getMessageListener");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -144,8 +148,11 @@ public class JmsConsumerWrapper implements MessageConsumer {
} catch (Throwable e) {
exceptionOccurred = true;
JMSException exc = new JMSException(
"Exception occurred on pooled consumer");
"Exception occurred on pooled consumer in getMessageSelector");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -164,8 +171,11 @@ public class JmsConsumerWrapper implements MessageConsumer {
} catch (Throwable e) {
exceptionOccurred = true;
JMSException exc = new JMSException(
"Exception occurred on pooled consumer");
"Exception occurred on pooled consumer in receive");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -184,8 +194,11 @@ public class JmsConsumerWrapper implements MessageConsumer {
} catch (Throwable e) {
exceptionOccurred = true;
JMSException exc = new JMSException(
"Exception occurred on pooled consumer");
"Exception occurred on pooled consumer in receive(timeout)");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -204,8 +217,11 @@ public class JmsConsumerWrapper implements MessageConsumer {
} catch (Throwable e) {
exceptionOccurred = true;
JMSException exc = new JMSException(
"Exception occurred on pooled consumer");
"Exception occurred on pooled consumer in receiveNoWait");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -226,8 +242,11 @@ public class JmsConsumerWrapper implements MessageConsumer {
} catch (Throwable e) {
exceptionOccurred = true;
JMSException exc = new JMSException(
"Exception occurred on pooled consumer");
"Exception occurred on pooled consumer in setMessageLister");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}

View file

@ -37,9 +37,10 @@ import com.raytheon.uf.common.jms.JmsPooledProducer;
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Dec fi8, 2011 rjpeter Initial creation
* Dec 08, 2011 rjpeter Initial creation.
* Feb 26, 2013 1642 rjpeter Added volatile references for better concurrency handling.
* Jun 07, 2013 DR 16316 rjpeter Fix memory leak
* Jun 07, 2013 DR 16316 rjpeter Fix memory leak.
* Feb 07, 2014 2357 rjpeter Set linked exception in exception handling.
* </pre>
*
* @author rjpeter
@ -122,6 +123,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -142,6 +146,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -162,6 +169,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -182,6 +192,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -202,6 +215,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -222,6 +238,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -242,6 +261,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -264,6 +286,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -285,6 +310,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -309,6 +337,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -329,6 +360,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -349,6 +383,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -369,6 +406,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -389,6 +429,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}
@ -409,6 +452,9 @@ public class JmsProducerWrapper implements MessageProducer {
JMSException exc = new JMSException(
"Exception occurred on pooled producer");
exc.initCause(e);
if (e instanceof Exception) {
exc.setLinkedException((Exception) e);
}
throw exc;
}
}

View file

@ -15,17 +15,6 @@
<constructor-arg value="com.raytheon.uf.common.activetable"/>
<constructor-arg ref="activeTableDatabaseProperties"/>
</bean>
<bean id="jms-activetable" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsActiveTableConfig" />
<property name="taskExecutor" ref="activeTableMergeThreadPool" />
</bean>
<bean id="jmsActiveTableConfig" class="org.apache.camel.component.jms.JmsConfiguration"
factory-bean="jmsConfig" factory-method="copy" />
<bean id="activeTableMergeThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="2" />
<property name="maxPoolSize" value="2" />
</bean>
<bean id="timeOffsetDecoder" class="com.raytheon.uf.edex.python.decoder.TimeOffsetDecoder">
<property name="pluginName" value="warning" />
<property name="pluginFQN" value="com.raytheon.edex.plugin.warning" />
@ -45,7 +34,7 @@
<to uri="jms-generic:topic:edex.alerts.vtec?timeToLive=60000" />
</route>
<route id="practiceVtecRoute">
<from uri="jms-activetable:queue:practiceActiveTable?concurrentConsumers=1" />
<from uri="jms-generic:queue:practiceActiveTable" />
<doTry>
<bean ref="activeTable" method="dumpProductToTempFile" />
<bean ref="practiceWarningDecoder" method="decode" />

View file

@ -25,8 +25,8 @@
<from uri="jms-generic:queue:practiceNotify" />
<doTry>
<multicast parallelProcessing="false">
<to uri="jms-warning:queue:edex.spcWatch" />
<to uri="jms-warning:queue:edex.tpcWatch" />
<to uri="jms-durable:queue:edex.spcWatch" />
<to uri="jms-durable:queue:edex.tpcWatch" />
</multicast>
<doCatch>
<exception>java.lang.Throwable</exception>

View file

@ -77,6 +77,7 @@ import com.raytheon.uf.edex.database.processor.IDatabaseProcessor;
* Dec 10, 2013 2555 rjpeter Initial creation.
* Jan 23, 2014 2555 rjpeter Updated to be a row at a time using ScrollableResults.
* Feb 04, 2014 2770 rferrel The dumpPdos now dumps all PluginDataObjects.
* Feb 12, 2014 2784 rjpeter Update logging for dup elim scenarios.
* </pre>
*
* @author rjpeter
@ -194,6 +195,7 @@ public class DatabaseArchiveProcessor<T extends PersistableDataObject<?>>
if (entriesInMemory > 0) {
try {
savePdoMap(pdosByFile);
pdosByFile.clear();
int prev = recordsSaved;
recordsSaved += entriesInMemory;
statusHandler.info(pluginName + ": Processed rows " + prev
@ -436,74 +438,93 @@ public class DatabaseArchiveProcessor<T extends PersistableDataObject<?>>
}
List<PersistableDataObject<?>> pdosFromDisk = readDataFromDisk(dataFile);
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
statusHandler.debug(pluginName + ": Checking "
+ pdosFromDisk.size() + " old records from file: "
+ dataFile.getAbsolutePath());
}
Iterator<PersistableDataObject<?>> pdoIter = pdosFromDisk
.iterator();
boolean needsUpdate = false;
int dupsRemoved = 0;
int index = 0;
while (pdoIter.hasNext() && (index < dupElimUntil)) {
PersistableDataObject<?> pdo = pdoIter.next();
if (identifierSet.contains(pdo.getIdentifier())) {
pdoIter.remove();
needsUpdate = true;
dupsRemoved++;
}
index++;
}
if (statusHandler.isPriorityEnabled(Priority.DEBUG)
&& (dupsRemoved > 0)) {
statusHandler.debug(pluginName + ": Removed " + dupsRemoved
+ " old records from file: "
+ dataFile.getAbsolutePath());
}
if (!fileIter.hasNext() && (pdosFromDisk.size() < fetchSize)) {
// last file, add more data to it
needsUpdate = true;
if (prevFileStatus == null) {
prevFileStatus = new FileStatus();
prevFileStatus.dupElimUntilIndex = pdosFromDisk.size();
prevFileStatus.fileFull = pdos.size() >= fetchSize;
filesCreatedThisSession.put(dataFile.getAbsolutePath(),
prevFileStatus);
}
int numToAdd = fetchSize - pdosFromDisk.size();
numToAdd = Math.min(numToAdd, pdos.size());
if (pdosFromDisk.size() > 0) {
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
statusHandler.debug(pluginName + ": Adding " + numToAdd
+ " records to file: "
statusHandler.debug(pluginName + ": Checking "
+ pdosFromDisk.size()
+ " old records from file: "
+ dataFile.getAbsolutePath());
}
pdosFromDisk.addAll(pdos.subList(0, numToAdd));
if (numToAdd < pdos.size()) {
pdos = pdos.subList(numToAdd, pdos.size());
} else {
pdos = Collections.emptyList();
}
}
Iterator<PersistableDataObject<?>> pdoIter = pdosFromDisk
.iterator();
int dupsRemoved = 0;
int index = 0;
boolean needsUpdate = false;
if (needsUpdate) {
if (!pdosFromDisk.isEmpty()) {
writeDataToDisk(dataFile, pdosFromDisk);
if (prevFileStatus != null) {
prevFileStatus.fileFull = pdosFromDisk.size() >= fetchSize;
while (pdoIter.hasNext() && (index < dupElimUntil)) {
PersistableDataObject<?> pdo = pdoIter.next();
if (identifierSet.contains(pdo.getIdentifier())) {
pdoIter.remove();
needsUpdate = true;
dupsRemoved++;
}
index++;
}
if (dupsRemoved > 0) {
statusHandler.info(pluginName + ": Removed "
+ dupsRemoved + " old records from file: "
+ dataFile.getAbsolutePath());
}
if (!fileIter.hasNext()
&& (pdosFromDisk.size() < fetchSize)) {
// last file, add more data to it
needsUpdate = true;
if (prevFileStatus == null) {
prevFileStatus = new FileStatus();
prevFileStatus.dupElimUntilIndex = pdosFromDisk
.size();
prevFileStatus.fileFull = pdos.size() >= fetchSize;
filesCreatedThisSession.put(
dataFile.getAbsolutePath(), prevFileStatus);
}
int numToAdd = fetchSize - pdosFromDisk.size();
numToAdd = Math.min(numToAdd, pdos.size());
if (statusHandler.isPriorityEnabled(Priority.DEBUG)) {
statusHandler.debug(pluginName + ": Adding "
+ numToAdd + " records to file: "
+ dataFile.getAbsolutePath());
}
pdosFromDisk.addAll(pdos.subList(0, numToAdd));
if (numToAdd < pdos.size()) {
pdos = pdos.subList(numToAdd, pdos.size());
} else {
pdos = Collections.emptyList();
}
}
if (needsUpdate) {
if (!pdosFromDisk.isEmpty()) {
writeDataToDisk(dataFile, pdosFromDisk);
if (prevFileStatus != null) {
prevFileStatus.fileFull = pdosFromDisk.size() >= fetchSize;
}
} else {
dirsToCheckNumbering.add(dataFile.getParentFile());
if (dataFile.exists() && !dataFile.delete()) {
statusHandler
.error(pluginName
+ ": Failed to delete file ["
+ dataFile.getAbsolutePath()
+ "], all entries have been updated in later files.");
if (!dataFile.renameTo(new File(dataFile
.getAbsoluteFile() + ".bad"))) {
statusHandler.error(pluginName + ": file ["
+ dataFile.getAbsoluteFile()
+ "] cannot be renamed to .bad");
}
}
fileIter.remove();
}
} else {
dirsToCheckNumbering.add(dataFile.getParentFile());
dataFile.delete();
fileIter.remove();
}
}
}
@ -543,7 +564,13 @@ public class DatabaseArchiveProcessor<T extends PersistableDataObject<?>>
} finally {
if (!successful) {
// couldn't read in file, move it to bad
file.renameTo(new File(file.getAbsoluteFile() + ".bad"));
if (file.exists()
&& !file.renameTo(new File(file.getAbsoluteFile()
+ ".bad"))) {
statusHandler.error(pluginName + ": file ["
+ file.getAbsoluteFile()
+ "] cannot be renamed to .bad");
}
}
if (is != null) {
try {
@ -668,8 +695,9 @@ public class DatabaseArchiveProcessor<T extends PersistableDataObject<?>>
writer = new BufferedWriter(new FileWriter(dumpFile));
if (statusHandler.isPriorityEnabled(Priority.INFO)) {
statusHandler.info(String.format("%s: Dumping records to: %s",
pluginName, dumpFile.getAbsolutePath()));
statusHandler.info(String.format("%s: Dumping " + pdos.size()
+ " records to: %s", pluginName,
dumpFile.getAbsolutePath()));
}
while (pdoIter.hasNext()) {
@ -753,6 +781,8 @@ public class DatabaseArchiveProcessor<T extends PersistableDataObject<?>>
} while (size > 0);
DecimalFormat format = new DecimalFormat(formatString.toString());
statusHandler.info("Checking file numbering consistency for "
+ dir.getAbsolutePath());
for (Map.Entry<Integer, File> entry : fileMap.entrySet()) {
int fileNum = entry.getKey();
@ -771,7 +801,15 @@ public class DatabaseArchiveProcessor<T extends PersistableDataObject<?>>
}
File newFile = new File(oldFile.getParent(), newFileName);
oldFile.renameTo(newFile);
if (!oldFile.renameTo(newFile)) {
statusHandler
.error("Failed rename file "
+ oldFile.getAbsolutePath()
+ " to "
+ newFile.getAbsolutePath()
+ ". Stopping file number consistency checking.");
return;
}
}
nextFileCount++;

View file

@ -58,6 +58,7 @@ import com.raytheon.uf.edex.database.plugin.PluginFactory;
* Nov 05, 2013 2499 rjpeter Repackaged, removed config files, always compresses hdf5.
* Nov 11, 2013 2478 rjpeter Updated data store copy to always copy hdf5.
* Dec 13, 2013 2555 rjpeter Refactored logic into DatabaseArchiveProcessor.
* Feb 12, 2014 2784 rjpeter Fixed clusterLock to not update the time by default.
* </pre>
*
* @author rjpeter
@ -145,14 +146,16 @@ public class DatabaseArchiver implements IPluginArchiver {
// cluster lock, grabbing time of last successful archive
CurrentTimeClusterLockHandler lockHandler = new CurrentTimeClusterLockHandler(
CLUSTER_LOCK_TIMEOUT, dateFormat.format(runTime.getTime()),
false);
CLUSTER_LOCK_TIMEOUT, false);
ClusterTask ct = ClusterLockUtils.lock(TASK_NAME, pluginName,
lockHandler, false);
if (!LockState.SUCCESSFUL.equals(ct.getLockState())) {
return true;
}
// keep extra info the same until processing updates the time.
lockHandler.setExtraInfo(ct.getExtraInfo());
Calendar startTime = null;
long timimgStartMillis = System.currentTimeMillis();
int recordCount = 0;
@ -226,12 +229,6 @@ public class DatabaseArchiver implements IPluginArchiver {
.info(pluginName + ": Found no records to archive");
}
} catch (Throwable e) {
// previous run time needs to be reset
if (startTime != null) {
lockHandler
.setExtraInfo(dateFormat.format(startTime.getTime()));
}
statusHandler.error(pluginName + ": Error occurred archiving data",
e);
} finally {

View file

@ -20,9 +20,9 @@
package com.raytheon.uf.edex.database;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
@ -120,7 +120,7 @@ public class DatabaseSessionFactoryBean extends AnnotationSessionFactoryBean {
public void setDatabaseSessionConfiguration(
DatabaseSessionConfiguration databaseSessionConfiguration) {
// make own copy so can modify it
List<Class<?>> annotatedClasses = new ArrayList<Class<?>>(
List<Class<?>> annotatedClasses = new LinkedList<Class<?>>(
databaseSessionConfiguration.getAnnotatedClasses());
if (databaseSessionConfiguration != null) {

View file

@ -3,21 +3,6 @@
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
http://camel.apache.org/schema/spring http://camel.apache.org/schema/spring/camel-spring.xsd">
<bean id="jmsIngestHarvesterConfig" class="org.apache.camel.component.jms.JmsConfiguration"
factory-bean="jmsGenericConfig" factory-method="copy">
</bean>
<bean id="jms-harvester" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsIngestHarvesterConfig" />
<property name="taskExecutor" ref="harvesterThreadPool" />
</bean>
<bean id="harvesterThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="${metadata-process.threads}" />
<property name="maxPoolSize" value="${metadata-process.threads}" />
</bean>
<bean id="crawlerCommunicationStrategy" class="com.raytheon.uf.edex.datadelivery.harvester.crawler.FileCommunicationStrategy" />
<bean id="MetaDataProcessor" class="com.raytheon.uf.edex.datadelivery.harvester.CrawlMetaDataHandler" depends-on="registryInit">
@ -30,11 +15,11 @@
errorHandlerRef="errorHandler">
<endpoint id="metaDataCron" uri="quartz://datadelivery/harvester/?cron=${metadata-process.cron}"/>
<endpoint id="harvesterProcessWorkEndpoint" uri="jms-harvester:queue:metaDataProcessWork?concurrentConsumers=${metadata-process.threads}"/>
<endpoint id="harvesterProcessWorkEndpoint" uri="jms-generic:queue:metaDataProcessWork?concurrentConsumers=${metadata-process.threads}&amp;threadName=harvester"/>
<route id="metaDataProcess">
<from uri="metaDataCron" />
<to uri="jms-harvester:queue:metaDataProcessWork" />
<to uri="jms-generic:queue:metaDataProcessWork" />
</route>
<route id="metaDataProcessWork">

View file

@ -8,19 +8,6 @@
<bean id="handleoupDistributionSrv" class="com.raytheon.uf.edex.distribution.DistributionSrv" />
<bean id="radarserverDistributionSrv" class="com.raytheon.uf.edex.distribution.DistributionSrv" />
<bean id="jms-dist" class="org.apache.camel.component.jms.JmsComponent">
<!-- All initial distribution queues are durable -->
<constructor-arg ref="jmsDistConfig" />
<property name="taskExecutor" ref="distributionThreadPool" />
</bean>
<bean id="jmsDistConfig" class="org.apache.camel.component.jms.JmsConfiguration"
factory-bean="jmsDurableConfig" factory-method="copy"/>
<bean id="distributionThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="7" />
<property name="maxPoolSize" value="7" />
</bean>
<camelContext id="distro"
xmlns="http://camel.apache.org/schema/spring"
errorHandlerRef="errorHandler"
@ -29,7 +16,7 @@
<endpoint id="refreshDistributionCron" uri="quartz://refreshDist/refreshDistRoute/?cron=${distribution.cron}"/>
<route id="distribution">
<from uri="jms-dist:queue:external.dropbox?concurrentConsumers=5&amp;maxConcurrentConsumers=5"/>
<from uri="jms-durable:queue:external.dropbox?concurrentConsumers=5&amp;maxConcurrentConsumers=5"/>
<doTry>
<bean ref="distributionSrv" method="route" />
<doCatch>
@ -40,7 +27,7 @@
</route>
<route id="handleoupDistribution">
<from uri="jms-dist:queue:handleoup.dropbox"/>
<from uri="jms-durable:queue:handleoup.dropbox"/>
<doTry>
<bean ref="handleoupDistributionSrv" method="route" />
<doCatch>
@ -51,7 +38,7 @@
</route>
<route id="radarserverDistribution">
<from uri="jms-dist:queue:radarserver.dropbox" />
<from uri="jms-durable:queue:radarserver.dropbox" />
<doTry>
<bean ref="radarserverDistributionSrv" method="route" />
<doCatch>

View file

@ -0,0 +1,101 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.edex.esb.camel.jms;
import java.util.Map;
import org.apache.camel.Endpoint;
import org.apache.camel.component.jms.JmsComponent;
import org.apache.camel.component.jms.JmsEndpoint;
import com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor;
/**
* Custom JMS component that makes dedicated thread pools for each JmsEndpoint
* based on the concurrent consumers needed. Each pool is named based on the JMS
* endpoint. Each endpoint also overrides the message listener container factory
* to monitor the created containers to see if they need to be restarted in a
* disconnect scenario.
*
* <pre>
*
* SOFTWARE HISTORY
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Feb 07, 2014 2357 rjpeter Initial creation.
* </pre>
*
* @author rjpeter
* @version 1.0
*/
public class DedicatedThreadJmsComponent extends JmsComponent {
public DedicatedThreadJmsComponent(
org.apache.camel.component.jms.JmsConfiguration jmsconfig) {
super(jmsconfig);
}
/*
* (non-Javadoc)
*
* @see
* org.apache.camel.component.jms.JmsComponent#createEndpoint(java.lang.
* String, java.lang.String, java.util.Map)
*/
@Override
protected Endpoint createEndpoint(String uri, String remaining,
Map<String, Object> parameters) throws Exception {
String threadName = (String) parameters.remove("threadName");
Endpoint e = super.createEndpoint(uri, remaining, parameters);
if (e instanceof JmsEndpoint) {
JmsEndpoint jmsE = (JmsEndpoint) e;
if ((threadName != null) && (threadName.length() > 0)
&& !threadName.endsWith("-")) {
threadName += "-";
} else {
threadName = jmsE.getDestinationName() + "-";
}
/*
* This is used for a SimpleMessageListenerContainer use case.
*
* JmsSimpleMessageListenerTaskExecutor executor = new
* JmsSimpleMessageListenerTaskExecutor(
* jmsE.getConcurrentConsumers(), threadName);
*/
// DefaultMessageListenerContainer use case
JmsThreadPoolTaskExecutor executor = new JmsThreadPoolTaskExecutor();
executor.setThreadNamePrefix(threadName);
executor.setCorePoolSize(jmsE.getConcurrentConsumers());
executor.setMaxPoolSize(Math.max(jmsE.getConcurrentConsumers(),
jmsE.getMaxConcurrentConsumers()));
executor.setQueueCapacity(0);
executor.afterPropertiesSet();
jmsE.setTaskExecutor(executor);
jmsE.setMessageListenerContainerFactory(MonitoredDefaultMessageListenerContainerFactory
.getInstance());
return jmsE;
}
throw new Exception(
"JmsComponent did not create a JmsEnpoint. Check Camel Jms Override");
}
}

View file

@ -0,0 +1,142 @@
/**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.uf.edex.esb.camel.jms;
import java.util.Collection;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.camel.component.jms.DefaultJmsMessageListenerContainer;
import org.apache.camel.component.jms.JmsEndpoint;
import org.apache.camel.component.jms.MessageListenerContainerFactory;
import org.springframework.jms.listener.AbstractMessageListenerContainer;
import org.springframework.jms.listener.DefaultMessageListenerContainer;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.UFStatus;
import com.raytheon.uf.common.time.util.TimeUtil;
import com.raytheon.uf.edex.core.EDEXUtil;
/**
* Creates DefaultMessageListenerContainer instances that are then monitored
* once a minute for paused tasks. If a paused task is found the container is
* restarted. This is necessary in broker restart scenarios.
*
* <pre>
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------ ---------- ----------- --------------------------
* Feb 8, 2014 2357 rjpeter Initial creation.
* </pre>
*
* @author rjpeter
* @version 1.0
*/
public class MonitoredDefaultMessageListenerContainerFactory implements
MessageListenerContainerFactory {
private static final AtomicInteger threadCount = new AtomicInteger(1);
private final Collection<DefaultJmsMessageListenerContainer> containers = new ConcurrentLinkedQueue<DefaultJmsMessageListenerContainer>();
private final IUFStatusHandler statusHandler = UFStatus
.getHandler(MonitoredDefaultMessageListenerContainerFactory.class);
private static final MonitoredDefaultMessageListenerContainerFactory instance = new MonitoredDefaultMessageListenerContainerFactory();
public static MonitoredDefaultMessageListenerContainerFactory getInstance() {
return instance;
}
private MonitoredDefaultMessageListenerContainerFactory() {
Thread containerChecker = new Thread("MessageListenerContainerMonitor-"
+ threadCount.getAndIncrement()) {
/*
* (non-Javadoc)
*
* @see java.lang.Thread#run()
*/
@Override
public void run() {
while (!EDEXUtil.isRunning()) {
try {
Thread.sleep(TimeUtil.MILLIS_PER_MINUTE);
} catch (InterruptedException e) {
// ignore
}
}
while (true) {
try {
for (DefaultMessageListenerContainer container : containers) {
if (container.getPausedTaskCount() > 0) {
StringBuilder msg = new StringBuilder(160);
msg.append("Container[")
.append(container.getDestinationName())
.append("] has paused tasks. Container is ");
if (!container.isRunning()) {
msg.append("not ");
}
msg.append("running. Container is ");
if (container.isActive()) {
msg.append("not ");
}
msg.append("active. Restarting container.");
statusHandler.warn(msg.toString());
container.start();
}
}
try {
Thread.sleep(TimeUtil.MILLIS_PER_MINUTE);
} catch (InterruptedException e) {
// ignore
}
} catch (Throwable e) {
statusHandler
.error("Error occurred in checking message listener containers",
e);
}
}
}
};
containerChecker.start();
}
/*
* (non-Javadoc)
*
* @see org.apache.camel.component.jms.MessageListenerContainerFactory#
* createMessageListenerContainer
* (org.apache.camel.component.jms.JmsEndpoint)
*/
@Override
public AbstractMessageListenerContainer createMessageListenerContainer(
JmsEndpoint endpoint) {
// track the container for monitoring in the case of a provider
// reconnect
DefaultJmsMessageListenerContainer container = new DefaultJmsMessageListenerContainer(
endpoint);
containers.add(container);
return container;
}
}

View file

@ -1,21 +0,0 @@
<beans
xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-3.1.xsd">
<bean id="jms-notify" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsIngestNotifyConfig" />
<property name="taskExecutor" ref="notifyThreadPool" />
</bean>
<bean id="jmsIngestNotifyConfig" class="org.apache.camel.component.jms.JmsConfiguration"
factory-bean="jmsConfig" factory-method="copy">
</bean>
<bean id="notifyThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="1" />
<property name="maxPoolSize" value="1" />
</bean>
</beans>

View file

@ -13,7 +13,7 @@
<!-- Begin Grid Process Route -->
<route id="gridStaticDataGenerationRoute">
<!-- Fed from plugin notification -->
<from uri="vm:grid-staticdata-generate" />
<from uri="jms-durable:grid-staticdata-generate" />
<doTry>
<bean ref="staticDataGenerator" method="processNotification"/>
<to uri="direct-vm:stageNotification"/>

View file

@ -1,8 +1,10 @@
<pluginNotificationList>
<pluginNotification>
<endpointName>grid-staticdata-generate</endpointName>
<endpointType>VM</endpointType>
<endpointType>QUEUE</endpointType>
<format>DATAURI</format>
<durable>true</durable>
<timeToLive>600000</timeToLive>
<metadataMap>
<mapping key="pluginName">
<constraint constraintValue="grid" constraintType="EQUALS"/>

View file

@ -9,13 +9,13 @@
<bean id="dpaDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="dpa" />
<constructor-arg value="jms-dist:queue:Ingest.dpa"/>
<constructor-arg value="jms-durable:queue:Ingest.dpa"/>
</bean>
<bean id="dpaRadarServerDistRegistry" factory-bean="radarserverDistributionSrv"
factory-method="register">
<constructor-arg value="dpa" />
<constructor-arg value="jms-dist:queue:Ingest.dpa"/>
<constructor-arg value="jms-durable:queue:Ingest.dpa"/>
</bean>
<camelContext id="dpa-camel"

View file

@ -15,7 +15,7 @@
<bean id="arealffgDistRegistry" class="com.raytheon.uf.edex.distribution.DistributionSrv"
factory-method="register">
<constructor-arg value="arealffg" />
<constructor-arg value="jms-dist:queue:Ingest.arealffg" />
<constructor-arg value="jms-durable:queue:Ingest.arealffg" />
</bean>
<camelContext id="arealffg-camel"

View file

@ -9,13 +9,13 @@
<bean id="dhrDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="dhr" />
<constructor-arg value="jms-dist:queue:Ingest.dhr"/>
<constructor-arg value="jms-durable:queue:Ingest.dhr"/>
</bean>
<bean id="dhrRadarServerDistRegistry" factory-bean="radarserverDistributionSrv"
factory-method="register">
<constructor-arg value="dhr" />
<constructor-arg value="jms-dist:queue:Ingest.dhr"/>
<constructor-arg value="jms-durable:queue:Ingest.dhr"/>
</bean>
<camelContext id="nonClusteredDHRroutes" xmlns="http://camel.apache.org/schema/spring"

View file

@ -0,0 +1,42 @@
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd">
<!-- IHFS Database Configuration-->
<bean id="ihfsDbSessionConfig" class="com.raytheon.uf.edex.database.DatabaseSessionConfiguration">
<property name="classFinder" ref="dbClassFinder"/>
<property name="includes">
<list>
<value>com.raytheon.uf.common.dataplugin.shef</value>
<value>com.raytheon.edex.plugin.shef</value>
</list>
</property>
</bean>
<bean id="ihfsSessionFactory"
class="com.raytheon.uf.edex.database.DatabaseSessionFactoryBean">
<property name="configLocation">
<value>file:///${edex.home}/conf/db/hibernateConfig/ihfs/hibernate.cfg.xml</value>
</property>
<property name="databaseSessionConfiguration" ref="ihfsDbSessionConfig"/>
</bean>
<bean id="ihfsTxManager"
class="org.springframework.orm.hibernate3.HibernateTransactionManager">
<property name="sessionFactory" ref="ihfsSessionFactory" />
</bean>
<!-- Dam Catalog Database Configuration-->
<bean id="damSessionFactory"
class="com.raytheon.uf.edex.database.DatabaseSessionFactoryBean">
<!-- No hibernate annotations should be loaded -->
<property name="configLocation">
<value>file:///${edex.home}/conf/db/hibernateConfig/damCatalog/hibernate.cfg.xml</value>
</property>
</bean>
<bean id="damTxManager"
class="org.springframework.orm.hibernate3.HibernateTransactionManager">
<property name="sessionFactory" ref="damSessionFactory" />
</bean>
</beans>

View file

@ -1,45 +1,6 @@
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd">
<!-- IHFS Database Configuration-->
<bean id="ihfsDbSessionConfig" class="com.raytheon.uf.edex.database.DatabaseSessionConfiguration">
<property name="classFinder" ref="dbClassFinder" />
<property name="includes">
<list>
<value>com.raytheon.uf.common.dataplugin.shef</value>
<value>com.raytheon.edex.plugin.shef</value>
</list>
</property>
</bean>
<bean id="ihfsSessionFactory"
class="com.raytheon.uf.edex.database.DatabaseSessionFactoryBean">
<property name="configLocation">
<value>file:///${edex.home}/conf/db/hibernateConfig/ihfs/hibernate.cfg.xml</value>
</property>
<property name="databaseSessionConfiguration" ref="ihfsDbSessionConfig"/>
</bean>
<bean id="ihfsTxManager"
class="org.springframework.orm.hibernate3.HibernateTransactionManager">
<property name="sessionFactory" ref="ihfsSessionFactory" />
</bean>
<!-- Dam Catalog Database Configuration-->
<bean id="damSessionFactory"
class="com.raytheon.uf.edex.database.DatabaseSessionFactoryBean">
<!-- No hibernate annotations should be loaded -->
<property name="configLocation">
<value>file:///${edex.home}/conf/db/hibernateConfig/damCatalog/hibernate.cfg.xml</value>
</property>
</bean>
<bean id="damTxManager"
class="org.springframework.orm.hibernate3.HibernateTransactionManager">
<property name="sessionFactory" ref="damSessionFactory" />
</bean>
<bean id="mpeFieldGenService" class="com.raytheon.uf.edex.ohd.pproc.MpeFieldGenSrv" />
<bean factory-bean="manualProc"

View file

@ -8,7 +8,7 @@
<bean id="q2DistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="q2" />
<constructor-arg value="jms-dist:queue:Ingest.q2"/>
<constructor-arg value="jms-durable:queue:Ingest.q2"/>
</bean>
<camelContext id="q2Proc-context"

View file

@ -10,7 +10,7 @@
<bean id="acarsDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="acarsPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.acars"/>
<constructor-arg value="jms-durable:queue:Ingest.acars"/>
</bean>
<bean id="acarsCamelRegistered" factory-bean="contextManager"

View file

@ -16,7 +16,7 @@
<bean id="bufrascatDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="bufrascat" />
<constructor-arg value="jms-dist:queue:Ingest.bufrascat" />
<constructor-arg value="jms-durable:queue:Ingest.bufrascat" />
</bean>
<bean id="bufrascatCamelRegistered" factory-bean="contextManager"

View file

@ -10,7 +10,7 @@
<bean id="bufrhdwDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="bufrhdwPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.bufrhdw"/>
<constructor-arg value="jms-durable:queue:Ingest.bufrhdw"/>
</bean>
<bean id="bufrhdwCamelRegistered" factory-bean="contextManager"

View file

@ -10,7 +10,7 @@
<bean id="bufrmthdwDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="bufrmthdwPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.bufrmthdw"/>
<constructor-arg value="jms-durable:queue:Ingest.bufrmthdw"/>
</bean>
<bean id="bufrmthdwCamelRegistered" factory-bean="contextManager"

View file

@ -9,7 +9,7 @@
<bean id="bufrncwfDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="bufrncwfPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.bufrncwf"/>
<constructor-arg value="jms-durable:queue:Ingest.bufrncwf"/>
</bean>
<bean id="bufrncwfCamelRegistered" factory-bean="contextManager"

View file

@ -16,7 +16,7 @@
<bean id="bufrquikscatDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="bufrquikscatPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.bufrquikscat"/>
<constructor-arg value="jms-durable:queue:Ingest.bufrquikscat"/>
</bean>
<bean id="bufrquikscatCamelRegistered" factory-bean="contextManager"

View file

@ -11,7 +11,7 @@
<bean id="bufrsigwxDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="bufrsigwxPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.bufrsigwx" />
<constructor-arg value="jms-durable:queue:Ingest.bufrsigwx" />
</bean>
<bean id="bufrsigwxCamelRegistered" factory-bean="contextManager"

View file

@ -16,7 +16,7 @@
<bean id="bufrssmiDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="bufrssmiPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.bufrssmi"/>
<constructor-arg value="jms-durable:queue:Ingest.bufrssmi"/>
</bean>
<bean id="bufrssmiCamelRegistered" factory-bean="contextManager"

View file

@ -10,7 +10,7 @@
<bean id="cwaDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="cwaPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.cwa" />
<constructor-arg value="jms-durable:queue:Ingest.cwa" />
</bean>
<bean id="cwaCamelRegistered" factory-bean="contextManager"

View file

@ -6,7 +6,7 @@
<bean factory-bean="distributionSrv" factory-method="register">
<constructor-arg ref="dataDeliveryRetrievalPluginName" />
<constructor-arg
value="jms-dist:queue:dataDeliveryRetrievalProcess?destinationResolver=#qpidDurableResolver" />
value="jms-durable:queue:dataDeliveryRetrievalProcess"/>
</bean>
</beans>

View file

@ -15,7 +15,7 @@
<bean id="ldadmesonetDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="ldadmesonet" />
<constructor-arg value="jms-dist:queue:Ingest.ldadmesonet"/>
<constructor-arg value="jms-durable:queue:Ingest.ldadmesonet"/>
</bean>
<bean id="ldadmesonetPointData" class="com.raytheon.uf.common.dataplugin.ldadmesonet.LdadmesonetPointDataTransform"/>

View file

@ -24,7 +24,7 @@
<bean id="loctablesDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="loctablesPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.loctables" />
<constructor-arg value="jms-durable:queue:Ingest.loctables" />
</bean>
<camelContext id="loctables-camel"

View file

@ -10,7 +10,7 @@
<bean id="lsrDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="lclstrmrptPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.lsr" />
<constructor-arg value="jms-durable:queue:Ingest.lsr" />
</bean>
<bean id="lclstrmrptCamelRegistered" factory-bean="contextManager"

View file

@ -7,21 +7,6 @@
<constructor-arg ref="madisPluginName" />
</bean>
<bean id="jmsIngestMadisConfig" class="org.apache.camel.component.jms.JmsConfiguration"
factory-bean="jmsConfig" factory-method="copy">
</bean>
<bean id="madisThreadPool"
class="com.raytheon.uf.edex.esb.camel.spring.JmsThreadPoolTaskExecutor">
<property name="corePoolSize" value="2" />
<property name="maxPoolSize" value="2" />
</bean>
<bean id="jms-madis" class="org.apache.camel.component.jms.JmsComponent">
<constructor-arg ref="jmsIngestMadisConfig" />
<property name="taskExecutor" ref="madisThreadPool" />
</bean>
<bean id="madisPointData"
class="com.raytheon.uf.edex.plugin.madis.MadisPointDataTransform"
depends-on="registerMadisPlugin" />
@ -29,8 +14,7 @@
<bean id="madisDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg value="madis" />
<constructor-arg
value="jms-dist:queue:Ingest.madis" />
<constructor-arg value="jms-durable:queue:Ingest.madis" />
</bean>
<bean id="madisCamelRegistered" factory-bean="contextManager"
@ -41,7 +25,7 @@
<bean id="madisSeparator" class="com.raytheon.uf.edex.plugin.madis.MadisSeparator"
depends-on="jmsIngestMadisConfig, jms-madis, madisThreadPool">
<constructor-arg
value="jms-madis:queue:Ingest.madisSeparator?destinationResolver=#qpidDurableResolver" />
value="jms-durable:queue:Ingest.madisSeparator" />
<!-- time in hours for orphan purging -->
<constructor-arg value="1" />
</bean>
@ -95,13 +79,12 @@
<setHeader headerName="pluginName">
<constant>madis</constant>
</setHeader>
<to uri="jms-generic:queue:Ingest.madis" />
<to uri="jms-durable:queue:Ingest.madis" />
</route>
<!-- Separates MADIS files into manageable chunks -->
<route id="madisSeperatorRoute">
<from
uri="jms-generic:queue:Ingest.madis?destinationResolver=#qpidDurableResolver" />
<from uri="jms-durable:queue:Ingest.madis" />
<setHeader headerName="pluginName">
<constant>madis</constant>
</setHeader>
@ -122,7 +105,7 @@
<!-- Begin MADIS production route -->
<route id="madisIngestRoute">
<from
uri="jms-madis:queue:Ingest.madisSeparator?destinationResolver=#qpidDurableResolver" />
uri="jms-durable:queue:Ingest.madisSeparator" />
<setHeader headerName="pluginName">
<constant>madis</constant>
</setHeader>

View file

@ -22,7 +22,7 @@
<bean id="mdlsndgDistRegistry" factory-bean="distributionSrv"
factory-method="register">
<constructor-arg ref="modelsoundingPluginName" />
<constructor-arg value="jms-dist:queue:Ingest.modelsounding"/>
<constructor-arg value="jms-durable:queue:Ingest.modelsounding"/>
</bean>
<bean id="modelsoundingCamelRegistered" factory-bean="contextManager"

Some files were not shown because too many files have changed in this diff Show more