14.1.1-10 baseline
Former-commit-id:69cef95302
[formerly8251107646
] [formerlyca0f65bd9a
] [formerly9248695a1f
[formerlyca0f65bd9a
[formerly 6970b56e5970df5cd40ddfc41470324d652f0afd]]] Former-commit-id:9248695a1f
Former-commit-id: 3a680dda0eb4accc0f16a467d0582447a9f8a94f [formerlyef6e0b0304
] Former-commit-id:e35ec60f7e
This commit is contained in:
parent
7977f24d77
commit
2c49ed51ac
24 changed files with 1418 additions and 1279 deletions
|
@ -24,6 +24,10 @@ import java.util.Calendar;
|
|||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.eclipse.core.runtime.IProgressMonitor;
|
||||
import org.eclipse.core.runtime.IStatus;
|
||||
import org.eclipse.core.runtime.Status;
|
||||
import org.eclipse.core.runtime.jobs.Job;
|
||||
import org.eclipse.jface.dialogs.MessageDialog;
|
||||
import org.eclipse.swt.SWT;
|
||||
import org.eclipse.swt.events.SelectionAdapter;
|
||||
|
@ -71,6 +75,7 @@ import com.raytheon.viz.ui.dialogs.CaveSWTDialog;
|
|||
* Jul 24, 2013 2220 rferrel Changes to queue size request for all data.
|
||||
* Aug 01, 2013 2221 rferrel Changes for select configuration.
|
||||
* Aug 06, 2013 2222 rferrel Changes to display all selected data.
|
||||
* Nov 14, 2013 2549 rferrel Get category data moved off the UI thread.
|
||||
* </pre>
|
||||
*
|
||||
* @author bgonzale
|
||||
|
@ -452,25 +457,57 @@ public abstract class AbstractArchiveDlg extends CaveSWTDialog implements
|
|||
* adjust sizes on the display table.
|
||||
*/
|
||||
protected void populateTableComp() {
|
||||
String archiveName = getSelectedArchiveName();
|
||||
String categoryName = getSelectedCategoryName();
|
||||
final String archiveName = getSelectedArchiveName();
|
||||
final String categoryName = getSelectedCategoryName();
|
||||
|
||||
setCursorBusy(true);
|
||||
|
||||
try {
|
||||
setShowingSelected(false);
|
||||
setShowingSelected(false);
|
||||
tableComp.populateTable(archiveName, categoryName,
|
||||
new ArrayList<DisplayData>(0));
|
||||
tableComp.refresh();
|
||||
|
||||
List<DisplayData> displayDatas = sizeJob.changeDisplay(archiveName,
|
||||
categoryName);
|
||||
if (displayDatas != null) {
|
||||
tableComp
|
||||
.populateTable(archiveName, categoryName, displayDatas);
|
||||
} else {
|
||||
tableComp.refresh();
|
||||
Job job = new Job("populate category table") {
|
||||
|
||||
@Override
|
||||
protected IStatus run(IProgressMonitor monitor) {
|
||||
getCategoryTableData(archiveName, categoryName);
|
||||
return Status.OK_STATUS;
|
||||
}
|
||||
} finally {
|
||||
setCursorBusy(false);
|
||||
}
|
||||
};
|
||||
job.schedule();
|
||||
}
|
||||
|
||||
/**
|
||||
* This gets the desired categories data. Assumed called from non-UI thread
|
||||
* since it is possible getting the data may take time which would hang up
|
||||
* the UI thread.
|
||||
*
|
||||
* @param archiveName
|
||||
* @param categoryName
|
||||
*/
|
||||
private void getCategoryTableData(final String archiveName,
|
||||
final String categoryName) {
|
||||
|
||||
final List<DisplayData> displayDatas = sizeJob.changeDisplay(
|
||||
archiveName, categoryName);
|
||||
|
||||
VizApp.runAsync(new Runnable() {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
if (displayDatas != null) {
|
||||
tableComp.populateTable(archiveName, categoryName,
|
||||
displayDatas);
|
||||
} else {
|
||||
tableComp.refresh();
|
||||
}
|
||||
} finally {
|
||||
setCursorBusy(false);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -312,9 +312,24 @@ public class CurrentWarnings {
|
|||
public AbstractWarningRecord getNewestByTracking(String etn, String phensig) {
|
||||
AbstractWarningRecord rval = null;
|
||||
synchronized (officeId) {
|
||||
List<AbstractWarningRecord> warnings = warningMap.get(toKey(
|
||||
List<AbstractWarningRecord> keyWarnings = warningMap.get(toKey(
|
||||
phensig, etn));
|
||||
if (warnings != null) {
|
||||
if (keyWarnings != null) {
|
||||
// filter out "future" warnings.
|
||||
List<AbstractWarningRecord> warnings = null;
|
||||
if (SimulatedTime.getSystemTime().isRealTime()) {
|
||||
warnings = keyWarnings;
|
||||
} else {
|
||||
warnings = new ArrayList<AbstractWarningRecord>(
|
||||
keyWarnings.size());
|
||||
long currentTime = TimeUtil.newCalendar().getTimeInMillis();
|
||||
for (AbstractWarningRecord warning : keyWarnings) {
|
||||
if (warning.getIssueTime().getTimeInMillis() <= currentTime) {
|
||||
warnings.add(warning);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// See if we have a NEW warning
|
||||
for (AbstractWarningRecord warning : warnings) {
|
||||
if (getAction(warning.getAct()) == WarningAction.NEW) {
|
||||
|
@ -399,8 +414,7 @@ public class CurrentWarnings {
|
|||
if (warnings != null) {
|
||||
Calendar c = TimeUtil.newCalendar();
|
||||
c.add(Calendar.MINUTE, -10);
|
||||
TimeRange t = new TimeRange(c.getTime(), SimulatedTime
|
||||
.getSystemTime().getTime());
|
||||
TimeRange t = new TimeRange(c.getTime(), TimeUtil.newDate());
|
||||
|
||||
for (AbstractWarningRecord warning : warnings) {
|
||||
if (t.contains(warning.getIssueTime().getTime())) {
|
||||
|
@ -438,8 +452,7 @@ public class CurrentWarnings {
|
|||
ArrayList<AbstractWarningRecord> conProds = new ArrayList<AbstractWarningRecord>();
|
||||
Calendar c = TimeUtil.newCalendar();
|
||||
c.add(Calendar.MINUTE, -10);
|
||||
TimeRange t = new TimeRange(c.getTime(), SimulatedTime
|
||||
.getSystemTime().getTime());
|
||||
TimeRange t = new TimeRange(c.getTime(), TimeUtil.newDate());
|
||||
for (AbstractWarningRecord warning : warnings) {
|
||||
WarningAction action = getAction(warning.getAct());
|
||||
if (t.contains(warning.getIssueTime().getTime())
|
||||
|
@ -545,12 +558,20 @@ public class CurrentWarnings {
|
|||
List<AbstractWarningRecord> records = new ArrayList<AbstractWarningRecord>(
|
||||
recordsMap.values());
|
||||
|
||||
// Sort by insert time
|
||||
// Sort by issue time when null fall back to insert time.
|
||||
Collections.sort(records, new Comparator<AbstractWarningRecord>() {
|
||||
@Override
|
||||
public int compare(AbstractWarningRecord o1,
|
||||
AbstractWarningRecord o2) {
|
||||
return o1.getInsertTime().compareTo(o2.getInsertTime());
|
||||
Calendar c1 = o1.getIssueTime();
|
||||
if (c1 == null) {
|
||||
c1 = o1.getInsertTime();
|
||||
}
|
||||
Calendar c2 = o2.getIssueTime();
|
||||
if (c2 == null) {
|
||||
c2 = o2.getInsertTime();
|
||||
}
|
||||
return c1.compareTo(c2);
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -602,7 +623,10 @@ public class CurrentWarnings {
|
|||
|
||||
Map<String, List<AbstractWarningRecord>> recordMap = new HashMap<String, List<AbstractWarningRecord>>();
|
||||
for (AbstractWarningRecord rec : newRecords) {
|
||||
List<AbstractWarningRecord> recs = recordMap.get(rec.getOfficeid());
|
||||
// This used the key rec.getOfficeid() which can be null; which
|
||||
// can drop alerts when more then one new Record.
|
||||
// Changed to use the same key as the put.
|
||||
List<AbstractWarningRecord> recs = recordMap.get(rec.getXxxid());
|
||||
if (recs == null) {
|
||||
recs = new ArrayList<AbstractWarningRecord>();
|
||||
recordMap.put(rec.getXxxid(), recs);
|
||||
|
|
|
@ -26,6 +26,7 @@ import com.raytheon.uf.common.status.UFStatus;
|
|||
import com.raytheon.uf.common.time.DataTime;
|
||||
import com.raytheon.uf.common.time.SimulatedTime;
|
||||
import com.raytheon.uf.common.time.TimeRange;
|
||||
import com.raytheon.uf.common.time.util.TimeUtil;
|
||||
import com.raytheon.uf.viz.core.DrawableString;
|
||||
import com.raytheon.uf.viz.core.IGraphicsTarget;
|
||||
import com.raytheon.uf.viz.core.IGraphicsTarget.HorizontalAlignment;
|
||||
|
@ -82,6 +83,7 @@ import com.vividsolutions.jts.geom.prep.PreparedGeometryFactory;
|
|||
* Check if geometry is null when inspecting.
|
||||
* Jul 22, 2013 2176 jsanchez Updated the wire frame and text for EMERGENCY warnings.
|
||||
* Sep 4, 2013 2176 jsanchez Made the polygon line width thicker and made regular text not bold.
|
||||
* Nov 11, 2013 2439 rferrel Changes to prevent getting future warning when in DRT mode.
|
||||
* </pre>
|
||||
*
|
||||
* @author jsanchez
|
||||
|
@ -128,7 +130,7 @@ public abstract class AbstractWWAResource extends
|
|||
protected static PreparedGeometryFactory pgf = new PreparedGeometryFactory();
|
||||
|
||||
/** one hour ahead, entirely arbitrary/magic **/
|
||||
private static final long LAST_FRAME_ADJ = (60 * 60 * 1000);
|
||||
private static final long LAST_FRAME_ADJ = TimeUtil.MILLIS_PER_HOUR;
|
||||
|
||||
protected String resourceName;
|
||||
|
||||
|
@ -465,13 +467,20 @@ public abstract class AbstractWWAResource extends
|
|||
|
||||
if (lastFrame) {
|
||||
// use current system time to determine what to display
|
||||
Date timeToDisplay = SimulatedTime.getSystemTime().getTime();
|
||||
Date timeToDisplay = TimeUtil.newDate();
|
||||
// change frame time
|
||||
frameTime = timeToDisplay;
|
||||
// point paint time to different time
|
||||
paintTime = new DataTime(timeToDisplay);
|
||||
// point framePeriod to new frame
|
||||
framePeriod = new TimeRange(frameTime, LAST_FRAME_ADJ);
|
||||
if (SimulatedTime.getSystemTime().isRealTime()) {
|
||||
framePeriod = new TimeRange(frameTime, LAST_FRAME_ADJ);
|
||||
} else {
|
||||
// Prevent getting "future" records by keeping interval in the
|
||||
// same minute.
|
||||
framePeriod = new TimeRange(frameTime,
|
||||
30 * TimeUtil.MILLIS_PER_SECOND);
|
||||
}
|
||||
}
|
||||
|
||||
// check if the warning is cancelled
|
||||
|
|
|
@ -147,4 +147,4 @@ if [ $DEBUG_FLAG == "on" ]; then
|
|||
echo "To Debug ... Connect to Port: ${EDEX_DEBUG_PORT}."
|
||||
fi
|
||||
|
||||
java -jar ${EDEX_HOME}/bin/yajsw/wrapper.jar -c ${EDEX_HOME}/conf/${CONF_FILE} ${WRAPPER_ARGS}
|
||||
java -Xmx32m -XX:MaxPermSize=12m -XX:ReservedCodeCacheSize=4m -jar ${EDEX_HOME}/bin/yajsw/wrapper.jar -c ${EDEX_HOME}/conf/${CONF_FILE} ${WRAPPER_ARGS}
|
||||
|
|
|
@ -5,79 +5,87 @@
|
|||
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
|
||||
http://camel.apache.org/schema/spring http://camel.apache.org/schema/spring/camel-spring.xsd
|
||||
http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-3.1.xsd">
|
||||
|
||||
|
||||
<bean id="uriAggregator" class="com.raytheon.uf.edex.esb.camel.DataUriAggregator" />
|
||||
<bean id="toDataURI" class="com.raytheon.uf.edex.esb.camel.ToDataURI" />
|
||||
|
||||
<bean id="persist" class="com.raytheon.edex.services.PersistSrv" factory-method="getInstance"/>
|
||||
<bean id="index" class="com.raytheon.edex.services.IndexSrv"/>
|
||||
|
||||
<bean id="persistCamelRegistered" factory-bean="contextManager"
|
||||
factory-method="register">
|
||||
<constructor-arg ref="persist-camel"/>
|
||||
</bean>
|
||||
|
||||
<camelContext id="persist-camel" xmlns="http://camel.apache.org/schema/spring" errorHandlerRef="errorHandler">
|
||||
|
||||
<!-- Generic persist and indexing
|
||||
Intended for routes that need persisting to HDF5,
|
||||
Indexing but no alert processing
|
||||
-->
|
||||
<route id="persistIndex">
|
||||
<from uri="direct-vm:persistIndex"/>
|
||||
<bean ref="persist" method="persist"/>
|
||||
<bean ref="index" method="index"/>
|
||||
<bean ref="processUtil" method="log"/>
|
||||
|
||||
|
||||
<bean id="uriAggregator" class="com.raytheon.uf.edex.esb.camel.DataUriAggregator" />
|
||||
<bean id="toDataURI" class="com.raytheon.uf.edex.esb.camel.ToDataURI" />
|
||||
|
||||
<bean id="dupElim" class="com.raytheon.edex.ingestsrv.DupElimSrv"/>
|
||||
<bean id="persist" class="com.raytheon.edex.services.PersistSrv" factory-method="getInstance" />
|
||||
<bean id="index" class="com.raytheon.edex.services.IndexSrv" />
|
||||
|
||||
<bean id="persistCamelRegistered" factory-bean="contextManager" factory-method="register">
|
||||
<constructor-arg ref="persist-camel" />
|
||||
</bean>
|
||||
|
||||
<camelContext id="persist-camel" xmlns="http://camel.apache.org/schema/spring"
|
||||
errorHandlerRef="errorHandler">
|
||||
|
||||
<!-- Generic persist and indexing
|
||||
Intended for routes that need persisting to HDF5,
|
||||
Indexing but no alert processing -->
|
||||
<route id="persistIndex">
|
||||
<from uri="direct-vm:persistIndex" />
|
||||
<doTry>
|
||||
<bean ref="persist" method="persist" />
|
||||
<bean ref="index" method="index" />
|
||||
<bean ref="processUtil" method="log" />
|
||||
<doCatch>
|
||||
<exception>java.lang.Throwable</exception>
|
||||
<to uri="log:persist?level=ERROR" />
|
||||
</doCatch>
|
||||
</doTry>
|
||||
</route>
|
||||
|
||||
<!-- Generic persist, index and alert route
|
||||
Intended for routes that need persisting to HDF5,
|
||||
Indexing and Alerting
|
||||
|
||||
<!-- Generic persist, index and alert route
|
||||
Intended for routes that need persisting to HDF5,
|
||||
Indexing and Alerting
|
||||
-->
|
||||
<route id="persistIndexAlert">
|
||||
<from uri="direct-vm:persistIndexAlert"/>
|
||||
<bean ref="persist" method="persist"/>
|
||||
<bean ref="index" method="index"/>
|
||||
<bean ref="processUtil" method="log"/>
|
||||
<bean ref="toDataURI" method="toDataURI"/>
|
||||
<to uri="vm:stageNotification"/>
|
||||
<from uri="direct-vm:persistIndexAlert" />
|
||||
<doTry>
|
||||
<bean ref="persist" method="persist" />
|
||||
<bean ref="index" method="index" />
|
||||
<bean ref="processUtil" method="log" />
|
||||
<bean ref="toDataURI" method="toDataURI" />
|
||||
<to uri="vm:stageNotification" />
|
||||
<doCatch>
|
||||
<exception>java.lang.Throwable</exception>
|
||||
<to uri="log:persist?level=ERROR" />
|
||||
</doCatch>
|
||||
</doTry>
|
||||
</route>
|
||||
|
||||
<!-- Generic index and alert route
|
||||
Intended for routes that need Indexing and Alerting
|
||||
|
||||
<!-- Generic index and alert route
|
||||
Intended for routes that need Indexing and Alerting
|
||||
-->
|
||||
<route id="indexAlert">
|
||||
<from uri="direct-vm:indexAlert"/>
|
||||
<bean ref="index" method="index"/>
|
||||
<bean ref="processUtil" method="log"/>
|
||||
<bean ref="toDataURI" method="toDataURI"/>
|
||||
<to uri="vm:stageNotification"/>
|
||||
<from uri="direct-vm:indexAlert" />
|
||||
<doTry>
|
||||
<bean ref="index" method="index" />
|
||||
<bean ref="processUtil" method="log" />
|
||||
<bean ref="toDataURI" method="toDataURI" />
|
||||
<to uri="vm:stageNotification" />
|
||||
<doCatch>
|
||||
<exception>java.lang.Throwable</exception>
|
||||
<to uri="log:persist?level=ERROR" />
|
||||
</doCatch>
|
||||
</doTry>
|
||||
</route>
|
||||
|
||||
|
||||
<route id="notificationAggregation">
|
||||
<from uri="vm:stageNotification"/>
|
||||
<bean ref="uriAggregator" method="addDataUris" />
|
||||
<!--
|
||||
<multicast>
|
||||
<pipeline>
|
||||
<bean ref="uriAggregator" method="addDataUris" />
|
||||
</pipeline>
|
||||
<pipeline>
|
||||
<to uri="jms-generic:queue:subscriptions" />
|
||||
</pipeline>
|
||||
</multicast>
|
||||
-->
|
||||
<from uri="vm:stageNotification" />
|
||||
<bean ref="uriAggregator" method="addDataUris" />
|
||||
</route>
|
||||
|
||||
|
||||
<route id="notificationTimer">
|
||||
<from uri="timer://notificationTimer?fixedRate=true&period=5000" />
|
||||
<filter>
|
||||
<method bean="uriAggregator" method="hasUris" />
|
||||
<bean ref="uriAggregator" method="sendQueuedUris" />
|
||||
<bean ref="serializationUtil" method="transformToThrift" />
|
||||
<to uri="jms-generic:topic:edex.alerts?timeToLive=60000"/>
|
||||
</filter>
|
||||
<from uri="timer://notificationTimer?fixedRate=true&period=5000" />
|
||||
<filter>
|
||||
<method bean="uriAggregator" method="hasUris" />
|
||||
<bean ref="uriAggregator" method="sendQueuedUris" />
|
||||
<bean ref="serializationUtil" method="transformToThrift" />
|
||||
<to uri="jms-generic:topic:edex.alerts?timeToLive=60000" />
|
||||
</filter>
|
||||
</route>
|
||||
</camelContext>
|
||||
</beans>
|
||||
</beans>
|
|
@ -0,0 +1,131 @@
|
|||
/**
|
||||
* This software was developed and / or modified by Raytheon Company,
|
||||
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
|
||||
*
|
||||
* U.S. EXPORT CONTROLLED TECHNICAL DATA
|
||||
* This software product contains export-restricted data whose
|
||||
* export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
* to non-U.S. persons whether in the United States or abroad requires
|
||||
* an export license or other authorization.
|
||||
*
|
||||
* Contractor Name: Raytheon Company
|
||||
* Contractor Address: 6825 Pine Street, Suite 340
|
||||
* Mail Stop B8
|
||||
* Omaha, NE 68106
|
||||
* 402.291.0100
|
||||
*
|
||||
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
* further licensing information.
|
||||
**/
|
||||
package com.raytheon.edex.ingestsrv;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.raytheon.uf.common.dataplugin.PluginDataObject;
|
||||
import com.raytheon.uf.common.dataplugin.annotations.DataURIUtil;
|
||||
import com.raytheon.uf.common.status.IPerformanceStatusHandler;
|
||||
import com.raytheon.uf.common.status.IUFStatusHandler;
|
||||
import com.raytheon.uf.common.status.PerformanceStatus;
|
||||
import com.raytheon.uf.common.status.UFStatus;
|
||||
import com.raytheon.uf.common.time.util.ITimer;
|
||||
import com.raytheon.uf.common.time.util.TimeUtil;
|
||||
import com.raytheon.uf.common.util.CollectionUtil;
|
||||
import com.raytheon.uf.edex.database.plugin.PluginDao;
|
||||
import com.raytheon.uf.edex.database.plugin.PluginFactory;
|
||||
import com.raytheon.uf.edex.database.query.DatabaseQuery;
|
||||
|
||||
/**
|
||||
* Checks database for duplicates of data. Does not account for clustering.
|
||||
*
|
||||
* <pre>
|
||||
*
|
||||
* SOFTWARE HISTORY
|
||||
*
|
||||
* Date Ticket# Engineer Description
|
||||
* ------------ ---------- ----------- --------------------------
|
||||
* Nov 11, 2013 2478 rjpeter Initial creation
|
||||
* </pre>
|
||||
*
|
||||
* @author rjpeter
|
||||
* @version 1.0
|
||||
*/
|
||||
|
||||
public class DupElimSrv {
|
||||
private static final IUFStatusHandler statusHandler = UFStatus
|
||||
.getHandler(DupElimSrv.class);
|
||||
|
||||
private final IPerformanceStatusHandler perfLog = PerformanceStatus
|
||||
.getHandler("DupElim:");
|
||||
|
||||
/**
|
||||
* Checks the passed pdos against database for existence. If duplicates
|
||||
* found returns a new array containing only the new plugin data objects. If
|
||||
* an errors occurs the original pdos array will be returned.
|
||||
*
|
||||
* @param pluginName
|
||||
* @param pdos
|
||||
* @return
|
||||
*/
|
||||
public PluginDataObject[] dupElim(PluginDataObject[] pdos) {
|
||||
if ((pdos == null) || (pdos.length == 0)) {
|
||||
return new PluginDataObject[0];
|
||||
}
|
||||
|
||||
ITimer dupCheckTimer = TimeUtil.getTimer();
|
||||
dupCheckTimer.start();
|
||||
|
||||
int numBefore = pdos.length;
|
||||
String pluginName = pdos[0].getPluginName();
|
||||
|
||||
try {
|
||||
|
||||
PluginDao dao = PluginFactory.getInstance()
|
||||
.getPluginDao(pluginName);
|
||||
List<PluginDataObject> newPdos = new ArrayList<PluginDataObject>(
|
||||
pdos.length);
|
||||
|
||||
// TODO: Bulk querying, groups of 100 using IN lists?
|
||||
for (PluginDataObject pdo : pdos) {
|
||||
DatabaseQuery dbQuery = new DatabaseQuery(pdo.getClass());
|
||||
Map<String, Object> dataUriFields = DataURIUtil
|
||||
.createDataURIMap(pdo);
|
||||
for (Map.Entry<String, Object> field : dataUriFields.entrySet()) {
|
||||
String fieldName = field.getKey();
|
||||
// ignore pluginName
|
||||
if (!DataURIUtil.PLUGIN_NAME_KEY.equals(fieldName)) {
|
||||
dbQuery.addQueryParam(field.getKey(), field.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
List<PluginDataObject> dbPdos = (List<PluginDataObject>) dao
|
||||
.queryByCriteria(dbQuery);
|
||||
if (CollectionUtil.isNullOrEmpty(dbPdos)) {
|
||||
newPdos.add(pdo);
|
||||
} else {
|
||||
// shouldn't be more than 1
|
||||
PluginDataObject dbPdo = dbPdos.get(1);
|
||||
if ((dbPdo == null)
|
||||
|| !pdo.getDataURI().equals(dbPdo.getDataURI())) {
|
||||
newPdos.add(pdo);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (pdos.length != newPdos.size()) {
|
||||
pdos = newPdos.toArray(new PluginDataObject[newPdos.size()]);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
statusHandler
|
||||
.error("Error occurred during duplicate elimination processing",
|
||||
e);
|
||||
}
|
||||
dupCheckTimer.stop();
|
||||
|
||||
perfLog.logDuration(pluginName + ": Eliminated "
|
||||
+ (numBefore - pdos.length) + " of " + numBefore
|
||||
+ " record(s): Time to process", dupCheckTimer.getElapsedTime());
|
||||
return pdos;
|
||||
}
|
||||
}
|
|
@ -96,6 +96,7 @@ import com.raytheon.uf.edex.database.query.DatabaseQuery;
|
|||
* 08/08/13 DR16485 ryu Remove call to getDatabaseId() from getMaxInsertTimeByDbId()
|
||||
* so new GFE databases aren't accidentally created.
|
||||
* 09/30/2013 #2147 rferrel Changes to archive hdf5 files.
|
||||
* 11/13/2013 #2517 randerso Added ORDER BY clause to getOverlappingTimes
|
||||
* </pre>
|
||||
*
|
||||
* @author bphillip
|
||||
|
@ -814,7 +815,8 @@ public class GFEDao extends DefaultPluginDao {
|
|||
.find("SELECT dataTime.validPeriod"
|
||||
+ " FROM GFERecord WHERE parmId = ?"
|
||||
+ " AND dataTime.validPeriod.start < ?"
|
||||
+ " AND dataTime.validPeriod.end > ?",
|
||||
+ " AND dataTime.validPeriod.end > ?"
|
||||
+ " ORDER BY dataTime.validPeriod.start",
|
||||
new Object[] { parmId, tr.getEnd(),
|
||||
tr.getStart() });
|
||||
return rval;
|
||||
|
|
|
@ -83,6 +83,10 @@ import com.raytheon.uf.common.util.Pair;
|
|||
* Scalar/VectorGridSlices, refactor
|
||||
* Discrete/WeatherGridSlices builders.
|
||||
* Jun 05, 2013 #2063 dgilling Port history() from A1.
|
||||
* Nov 11, 2013 #2517 randerso Changed put() to support multiple discontiguous saves
|
||||
* Added getKeys(tr) to get grid times overlapping a time range
|
||||
* Removed caching of inventory as it was not being updated when
|
||||
* grids were updated/deleted
|
||||
*
|
||||
* </pre>
|
||||
*
|
||||
|
@ -103,8 +107,6 @@ public class IFPWE {
|
|||
|
||||
private final GridParmInfo gpi;
|
||||
|
||||
private List<TimeRange> availableTimes;
|
||||
|
||||
private final WsId wsId;
|
||||
|
||||
/**
|
||||
|
@ -126,23 +128,41 @@ public class IFPWE {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the available times of data for the parm
|
||||
* Returns all available times of data for the parm
|
||||
*
|
||||
* @return
|
||||
* @return the time ranges of all available data for the parm
|
||||
*/
|
||||
public List<TimeRange> getKeys() {
|
||||
if (availableTimes == null) {
|
||||
availableTimes = new ArrayList<TimeRange>();
|
||||
List<TimeRange> times = GridParmManager.getGridInventory(parmId)
|
||||
.getPayload();
|
||||
if (times != null) {
|
||||
Collections.sort(times);
|
||||
availableTimes.addAll(times);
|
||||
}
|
||||
List<TimeRange> availableTimes;
|
||||
ServerResponse<List<TimeRange>> sr = GridParmManager
|
||||
.getGridInventory(parmId);
|
||||
if (sr.isOkay()) {
|
||||
availableTimes = sr.getPayload();
|
||||
} else {
|
||||
availableTimes = Collections.emptyList();
|
||||
}
|
||||
return availableTimes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns available times of data for the parm that overlap a time range
|
||||
*
|
||||
* @param tr
|
||||
* the desired time range
|
||||
* @return the time ranges of data that overlap the desired time range
|
||||
*/
|
||||
public List<TimeRange> getKeys(TimeRange tr) {
|
||||
List<TimeRange> overlappingTimes;
|
||||
ServerResponse<List<TimeRange>> sr = GridParmManager.getGridInventory(
|
||||
parmId, tr);
|
||||
if (sr.isOkay()) {
|
||||
overlappingTimes = sr.getPayload();
|
||||
} else {
|
||||
overlappingTimes = Collections.emptyList();
|
||||
}
|
||||
return overlappingTimes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the grid parm info
|
||||
*
|
||||
|
@ -245,67 +265,71 @@ public class IFPWE {
|
|||
* storage.
|
||||
*
|
||||
* @param inventory
|
||||
* A Map of TimeRanges to IGridSlices to be saved. Time is the
|
||||
* slice's valid time.
|
||||
* @param timeRangeSpan
|
||||
* The replacement time range of grids to be saved. Must cover
|
||||
* each individual TimeRange in inventory.
|
||||
* A Map of TimeRanges to List of IGridSlices. TimeRange is the
|
||||
* replacement time range
|
||||
* @throws GfeException
|
||||
* If an error occurs while trying to obtain a lock on the
|
||||
* destination database.
|
||||
*/
|
||||
public void put(LinkedHashMap<TimeRange, IGridSlice> inventory,
|
||||
TimeRange timeRangeSpan) throws GfeException {
|
||||
statusHandler.debug("Getting lock for ParmID: " + parmId + " TR: "
|
||||
+ timeRangeSpan);
|
||||
ServerResponse<List<LockTable>> lockResponse = LockManager
|
||||
.getInstance().requestLockChange(
|
||||
new LockRequest(parmId, timeRangeSpan, LockMode.LOCK),
|
||||
wsId, siteId);
|
||||
if (lockResponse.isOkay()) {
|
||||
statusHandler.debug("LOCKING: Lock granted for: " + wsId
|
||||
+ " for time range: " + timeRangeSpan);
|
||||
} else {
|
||||
statusHandler.error("Could not lock TimeRange " + timeRangeSpan
|
||||
+ " for parm [" + parmId + "]: " + lockResponse.message());
|
||||
throw new GfeException("Request lock failed. "
|
||||
+ lockResponse.message());
|
||||
}
|
||||
public void put(LinkedHashMap<TimeRange, List<IGridSlice>> inventory)
|
||||
throws GfeException {
|
||||
|
||||
List<GFERecord> records = new ArrayList<GFERecord>(inventory.size());
|
||||
for (Entry<TimeRange, IGridSlice> entry : inventory.entrySet()) {
|
||||
GFERecord rec = new GFERecord(parmId, entry.getKey());
|
||||
rec.setGridHistory(entry.getValue().getHistory());
|
||||
rec.setMessageData(entry.getValue());
|
||||
records.add(rec);
|
||||
}
|
||||
SaveGridRequest sgr = new SaveGridRequest(parmId, timeRangeSpan,
|
||||
records);
|
||||
|
||||
try {
|
||||
ServerResponse<?> sr = GridParmManager.saveGridData(
|
||||
Arrays.asList(sgr), wsId, siteId);
|
||||
if (sr.isOkay()) {
|
||||
SendNotifications.send(sr.getNotifications());
|
||||
} else {
|
||||
statusHandler.error("Unable to save grids for parm [" + parmId
|
||||
+ "] over time range " + timeRangeSpan + ": "
|
||||
+ sr.message());
|
||||
}
|
||||
} finally {
|
||||
ServerResponse<List<LockTable>> unLockResponse = LockManager
|
||||
for (Entry<TimeRange, List<IGridSlice>> entry : inventory.entrySet()) {
|
||||
TimeRange timeRangeSpan = entry.getKey();
|
||||
statusHandler.debug("Getting lock for ParmID: " + parmId + " TR: "
|
||||
+ timeRangeSpan);
|
||||
ServerResponse<List<LockTable>> lockResponse = LockManager
|
||||
.getInstance().requestLockChange(
|
||||
new LockRequest(parmId, timeRangeSpan,
|
||||
LockMode.UNLOCK), wsId, siteId);
|
||||
if (unLockResponse.isOkay()) {
|
||||
statusHandler.debug("LOCKING: Unlocked for: " + wsId + " TR: "
|
||||
+ timeRangeSpan);
|
||||
LockMode.LOCK), wsId, siteId);
|
||||
if (lockResponse.isOkay()) {
|
||||
statusHandler.debug("LOCKING: Lock granted for: " + wsId
|
||||
+ " for time range: " + timeRangeSpan);
|
||||
} else {
|
||||
statusHandler.error("Could not unlock TimeRange "
|
||||
+ timeRangeSpan + " for parm [" + parmId + "]: "
|
||||
statusHandler.error("Could not lock TimeRange " + timeRangeSpan
|
||||
+ " for parm [" + parmId + "]: "
|
||||
+ lockResponse.message());
|
||||
throw new GfeException("Request unlock failed. "
|
||||
+ unLockResponse.message());
|
||||
throw new GfeException("Request lock failed. "
|
||||
+ lockResponse.message());
|
||||
}
|
||||
|
||||
List<IGridSlice> gridSlices = entry.getValue();
|
||||
List<GFERecord> records = new ArrayList<GFERecord>(
|
||||
gridSlices.size());
|
||||
for (IGridSlice slice : gridSlices) {
|
||||
GFERecord rec = new GFERecord(parmId, slice.getValidTime());
|
||||
rec.setGridHistory(slice.getHistory());
|
||||
rec.setMessageData(slice);
|
||||
records.add(rec);
|
||||
}
|
||||
SaveGridRequest sgr = new SaveGridRequest(parmId, timeRangeSpan,
|
||||
records);
|
||||
|
||||
try {
|
||||
ServerResponse<?> sr = GridParmManager.saveGridData(
|
||||
Arrays.asList(sgr), wsId, siteId);
|
||||
if (sr.isOkay()) {
|
||||
SendNotifications.send(sr.getNotifications());
|
||||
} else {
|
||||
statusHandler.error("Unable to save grids for parm ["
|
||||
+ parmId + "] over time range " + timeRangeSpan
|
||||
+ ": " + sr.message());
|
||||
}
|
||||
} finally {
|
||||
ServerResponse<List<LockTable>> unLockResponse = LockManager
|
||||
.getInstance().requestLockChange(
|
||||
new LockRequest(parmId, timeRangeSpan,
|
||||
LockMode.UNLOCK), wsId, siteId);
|
||||
if (unLockResponse.isOkay()) {
|
||||
statusHandler.debug("LOCKING: Unlocked for: " + wsId
|
||||
+ " TR: " + timeRangeSpan);
|
||||
} else {
|
||||
statusHandler.error("Could not unlock TimeRange "
|
||||
+ timeRangeSpan + " for parm [" + parmId + "]: "
|
||||
+ lockResponse.message());
|
||||
throw new GfeException("Request unlock failed. "
|
||||
+ unLockResponse.message());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,30 +1,30 @@
|
|||
##
|
||||
# This software was developed and / or modified by Raytheon Company,
|
||||
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
|
||||
#
|
||||
#
|
||||
# U.S. EXPORT CONTROLLED TECHNICAL DATA
|
||||
# This software product contains export-restricted data whose
|
||||
# export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
# to non-U.S. persons whether in the United States or abroad requires
|
||||
# an export license or other authorization.
|
||||
#
|
||||
#
|
||||
# Contractor Name: Raytheon Company
|
||||
# Contractor Address: 6825 Pine Street, Suite 340
|
||||
# Mail Stop B8
|
||||
# Omaha, NE 68106
|
||||
# 402.291.0100
|
||||
#
|
||||
#
|
||||
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
# further licensing information.
|
||||
##
|
||||
|
||||
import string, IrtAccess, JUtil
|
||||
import string, IrtAccess, JUtil, logging
|
||||
import xml, pickle, tempfile, os
|
||||
from xml.etree import ElementTree
|
||||
from xml.etree.ElementTree import Element, SubElement
|
||||
import LogStream
|
||||
from datetime import datetime
|
||||
from time import gmtime,strftime
|
||||
from time import gmtime, strftime
|
||||
from java.io import File
|
||||
from com.raytheon.uf.common.time import TimeRange
|
||||
from com.raytheon.uf.common.dataplugin.gfe.db.objects import GridLocation
|
||||
|
@ -34,68 +34,69 @@ from com.raytheon.edex.plugin.gfe.config import IFPServerConfigManager
|
|||
from com.raytheon.uf.common.serialization import SerializationUtil
|
||||
from com.raytheon.uf.common.localization import LocalizationFile
|
||||
from com.raytheon.uf.common.localization import PathManagerFactory
|
||||
from com.raytheon.uf.common.localization import LocalizationContext
|
||||
from com.raytheon.uf.common.localization import LocalizationContext
|
||||
from com.raytheon.uf.common.localization import LocalizationContext_LocalizationType as LocalizationType
|
||||
from com.raytheon.uf.common.localization import LocalizationContext_LocalizationLevel as LocalizationLevel
|
||||
|
||||
#
|
||||
# Utility module of isc functions
|
||||
#
|
||||
#
|
||||
# SOFTWARE HISTORY
|
||||
#
|
||||
#
|
||||
# Date Ticket# Engineer Description
|
||||
# ------------ ---------- ----------- --------------------------
|
||||
# 07/06/09 1995 bphillip Initial Creation.
|
||||
# 02/19/13 1637 randerso Removed unused import
|
||||
# 03/11/13 1759 dgilling Move siteConfig import into
|
||||
# methods where it's needed.
|
||||
#
|
||||
#
|
||||
# 11/07/13 2517 randerso Allow getLogger to override logLevel
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
def getEditArea(name, siteID):
|
||||
|
||||
def getEditArea(name, siteID):
|
||||
|
||||
pathMgr = PathManagerFactory.getPathManager();
|
||||
|
||||
|
||||
commonStaticConfig = pathMgr.getContext(LocalizationType.COMMON_STATIC, LocalizationLevel.SITE)
|
||||
commonStaticConfig.setContextName(siteID)
|
||||
file = pathMgr.getFile(commonStaticConfig,"gfe/editAreas"+ File.separator + name + ".xml")
|
||||
|
||||
file = pathMgr.getFile(commonStaticConfig, "gfe/editAreas" + File.separator + name + ".xml")
|
||||
|
||||
if not os.path.exists(file.getPath()):
|
||||
commonStaticConfig = pathMgr.getContext(LocalizationType.COMMON_STATIC, LocalizationLevel.CONFIGURED)
|
||||
commonStaticConfig.setContextName(siteID)
|
||||
file = pathMgr.getFile(commonStaticConfig,"gfe/editAreas"+ File.separator + name + ".xml")
|
||||
|
||||
refData = None
|
||||
|
||||
file = pathMgr.getFile(commonStaticConfig, "gfe/editAreas" + File.separator + name + ".xml")
|
||||
|
||||
refData = None
|
||||
|
||||
try:
|
||||
if os.path.exists(file.getPath()):
|
||||
refData = SerializationUtil.jaxbUnmarshalFromXmlFile(file.getPath());
|
||||
else:
|
||||
LogStream.logProblem("EDIT AREA NOT FOUND: ",name," for site ",siteID)
|
||||
LogStream.logProblem("EDIT AREA NOT FOUND: ", name, " for site ", siteID)
|
||||
except:
|
||||
LogStream.logProblem("Unable to unmarshal " + name + " in iscExtract")
|
||||
|
||||
|
||||
return refData
|
||||
|
||||
def saveEditAreaGrid(maskName, iscMask, siteID):
|
||||
iscMask.getPolygons(CoordinateType.LATLON);
|
||||
|
||||
|
||||
pathMgr = PathManagerFactory.getPathManager();
|
||||
commonStaticConfig = pathMgr.getContext(LocalizationType.COMMON_STATIC, LocalizationLevel.CONFIGURED)
|
||||
commonStaticConfig.setContextName(siteID)
|
||||
sitePath = pathMgr.getFile(commonStaticConfig,"gfe/editAreas").getPath()
|
||||
sitePath = pathMgr.getFile(commonStaticConfig, "gfe/editAreas").getPath()
|
||||
editAreaPath = str(sitePath) + "/" + maskName + ".xml"
|
||||
SerializationUtil.jaxbMarshalToXmlFile(iscMask, editAreaPath)
|
||||
|
||||
def deleteEditArea(name, siteID):
|
||||
def deleteEditArea(name, siteID):
|
||||
pathMgr = PathManagerFactory.getPathManager()
|
||||
commonStaticConfig = pathMgr.getContext(LocalizationType.COMMON_STATIC, LocalizationLevel.CONFIGURED)
|
||||
commonStaticConfig.setContextName(siteID)
|
||||
file = pathMgr.getFile(commonStaticConfig,"gfe/editAreas"+ File.separator + name + ".xml")
|
||||
file = pathMgr.getFile(commonStaticConfig, "gfe/editAreas" + File.separator + name + ".xml")
|
||||
file.delete()
|
||||
|
||||
def transformTime(tr):
|
||||
|
||||
def transformTime(tr):
|
||||
return (int(tr.getStart().getTime() / 1000), int(tr.getEnd().getTime() / 1000))
|
||||
|
||||
def toJavaTimeRange(tr):
|
||||
|
@ -106,7 +107,7 @@ def swapCoord(coord):
|
|||
coord.y = coord.x
|
||||
coord.x = temp
|
||||
return coord
|
||||
|
||||
|
||||
def serverBoxText(server):
|
||||
#returns text based on the server dictionary that should be placed
|
||||
#into the dialog
|
||||
|
@ -117,13 +118,13 @@ def serverBoxText(server):
|
|||
hostport = server['host'] + "-primary"
|
||||
elif server['port'] == "98000001":
|
||||
hostport = server['host'] + "-svcbu"
|
||||
|
||||
|
||||
if hostport is None:
|
||||
hostport = server['host'] + "/" + server['port']
|
||||
|
||||
return server['site'] + "-> " + hostport + "@" + \
|
||||
server['mhsid'].lower()
|
||||
|
||||
|
||||
def sortServers(a, b):
|
||||
# sort function for the list of servers. Sorts in priority order for
|
||||
# most likely to have the data. Order is:
|
||||
|
@ -135,15 +136,15 @@ def sortServers(a, b):
|
|||
sameSiteA = (a['mhsid'] == a['site'])
|
||||
sameSiteB = (b['mhsid'] == b['site'])
|
||||
if sameSiteA and not sameSiteB:
|
||||
return - 1
|
||||
return -1
|
||||
elif not sameSiteA and sameSiteB:
|
||||
return 1
|
||||
return 1
|
||||
#both are same sites, check for host next
|
||||
else:
|
||||
regHostA = (a['host'][0:3] in ['dx4', 'px3'])
|
||||
regHostB = (b['host'][0:3] in ['dx4', 'px3'])
|
||||
if regHostA and not regHostB:
|
||||
return - 1
|
||||
return -1
|
||||
elif not regHostA and regHostB:
|
||||
return 1
|
||||
# same host, but not preferred host
|
||||
|
@ -151,11 +152,11 @@ def sortServers(a, b):
|
|||
regPortA = (a['port'] == "98000000")
|
||||
regPortB = (b['port'] == "98000000")
|
||||
if regPortA and not regPortB:
|
||||
return - 1
|
||||
return -1
|
||||
elif not regPortA and regPortB:
|
||||
return 1
|
||||
return 1 #must be non-standard, put at end of list
|
||||
|
||||
|
||||
def createDomainDict(xml):
|
||||
irt = IrtAccess.IrtAccess("")
|
||||
#decodes the packet of information from the ISC_REQUEST_QUERY call
|
||||
|
@ -171,7 +172,7 @@ def createDomainDict(xml):
|
|||
return None
|
||||
if serversE.tag != "servers":
|
||||
LogStream.logEvent('servers tag not found in createDomainDict')
|
||||
return None #invalid xml
|
||||
return None #invalid xml
|
||||
|
||||
#decode XML and create dictionary and parms list
|
||||
domains = {}
|
||||
|
@ -185,7 +186,7 @@ def createDomainDict(xml):
|
|||
if name == "site":
|
||||
site = value
|
||||
break
|
||||
if site is None:
|
||||
if site is None:
|
||||
LogStream.logProblem('Malformed domain site XML')
|
||||
continue
|
||||
for addressE in domainE.getchildren():
|
||||
|
@ -196,62 +197,62 @@ def createDomainDict(xml):
|
|||
list.append(info)
|
||||
guiText = serverBoxText(info)
|
||||
serverDictT2S[guiText] = info
|
||||
serverDictS2T[str(info)] = guiText
|
||||
serverDictS2T[str(info)] = guiText
|
||||
list.sort(sortServers)
|
||||
domains[site] = list
|
||||
|
||||
elif domainE.tag == "welist":
|
||||
elif domainE.tag == "welist":
|
||||
for parmE in domainE.getchildren():
|
||||
welist.append(parmE.text)
|
||||
welist.append(parmE.text)
|
||||
welist.sort()
|
||||
|
||||
|
||||
retVal = {}
|
||||
retVal['serverDictS2T'] = serverDictS2T
|
||||
retVal['serverDictT2S'] = serverDictT2S
|
||||
retVal['domains'] = domains
|
||||
|
||||
tempfile.tempdir = "/tmp/"
|
||||
|
||||
tempfile.tempdir = "/tmp/"
|
||||
fname = tempfile.mktemp(".bin")
|
||||
FILE = open(fname, "w")
|
||||
pickle.dump(retVal, FILE)
|
||||
FILE.close()
|
||||
|
||||
|
||||
FILE = open(fname, "r")
|
||||
lines = FILE.readlines()
|
||||
FILE.close()
|
||||
os.remove(fname)
|
||||
|
||||
|
||||
pickledFile = ""
|
||||
for line in lines:
|
||||
pickledFile += line
|
||||
|
||||
|
||||
return pickledFile
|
||||
|
||||
|
||||
def unPickle(str):
|
||||
import pickle,tempfile,os,JUtil
|
||||
tempfile.tempdir = "/tmp/"
|
||||
import pickle, tempfile, os, JUtil
|
||||
tempfile.tempdir = "/tmp/"
|
||||
fname = tempfile.mktemp(".bin")
|
||||
FILE = open(fname,"w")
|
||||
FILE = open(fname, "w")
|
||||
FILE.write(str)
|
||||
FILE.close()
|
||||
|
||||
FILE = open(fname,"r")
|
||||
|
||||
FILE = open(fname, "r")
|
||||
retVal = pickle.load(FILE)
|
||||
FILE.close()
|
||||
return retVal
|
||||
|
||||
|
||||
def getRequestXML(xml,selectedServers, selectedWEList):
|
||||
|
||||
def getRequestXML(xml, selectedServers, selectedWEList):
|
||||
irt = IrtAccess.IrtAccess("")
|
||||
selectedServers = JUtil.javaStringListToPylist(selectedServers)
|
||||
selectedWElist = JUtil.javaStringListToPylist(selectedWEList)
|
||||
|
||||
response = unPickle(createDomainDict(xml))
|
||||
|
||||
response = unPickle(createDomainDict(xml))
|
||||
serverDictT2S = response['serverDictT2S']
|
||||
domainDict = response['domains']
|
||||
|
||||
|
||||
iscReqE = Element('iscrequest')
|
||||
servers = []
|
||||
servers = []
|
||||
for serverT in selectedServers:
|
||||
server = serverDictT2S[serverT]
|
||||
servers.append(server)
|
||||
|
@ -275,46 +276,44 @@ def getRequestXML(xml,selectedServers, selectedWEList):
|
|||
|
||||
# send to ifpServer
|
||||
xmlreq = ElementTree.tostring(iscReqE)
|
||||
|
||||
|
||||
return xmlreq;
|
||||
|
||||
def getLogger(scriptName, logName=None):
|
||||
import logging
|
||||
# be relocating this import here we allow
|
||||
def getLogger(scriptName, logName=None, logLevel=logging.INFO):
|
||||
# be relocating this import here we allow
|
||||
# com.raytheon.edex.plugin.gfe.isc.IscScript to dynamically
|
||||
# modify its include path with the proper siteConfig just before
|
||||
# execution time
|
||||
import siteConfig
|
||||
|
||||
|
||||
if logName is None:
|
||||
logPath=siteConfig.GFESUITE_LOGDIR+"/"+strftime("%Y%m%d", gmtime())
|
||||
logName=scriptName+".log"
|
||||
logPath = siteConfig.GFESUITE_LOGDIR + "/" + strftime("%Y%m%d", gmtime())
|
||||
logName = scriptName + ".log"
|
||||
else:
|
||||
logPath=os.path.dirname(logName)
|
||||
if len(logPath)==0:
|
||||
logPath=siteConfig.GFESUITE_LOGDIR+"/"+strftime("%Y%m%d", gmtime())
|
||||
logName=os.path.basename(logName)
|
||||
|
||||
logFile=logPath+"/"+logName
|
||||
logPath = os.path.dirname(logName)
|
||||
if len(logPath) == 0:
|
||||
logPath = siteConfig.GFESUITE_LOGDIR + "/" + strftime("%Y%m%d", gmtime())
|
||||
logName = os.path.basename(logName)
|
||||
|
||||
logFile = logPath + "/" + logName
|
||||
|
||||
if not os.path.exists(logPath):
|
||||
os.makedirs(logPath)
|
||||
|
||||
|
||||
theLog = logging.getLogger(scriptName)
|
||||
theLog.setLevel(logging.INFO)
|
||||
theLog.setLevel(logLevel)
|
||||
ch = logging.FileHandler(logFile)
|
||||
|
||||
ch.setLevel(logging.INFO)
|
||||
|
||||
ch.setLevel(logLevel)
|
||||
formatter = logging.Formatter("%(levelname)s %(asctime)s [%(process)d:%(thread)d] %(filename)s: %(message)s")
|
||||
ch.setFormatter(formatter)
|
||||
for h in theLog.handlers:
|
||||
theLog.removeHandler(h)
|
||||
theLog.addHandler(ch)
|
||||
return theLog
|
||||
|
||||
|
||||
def tupleToString(*msg):
|
||||
concatMsg=""
|
||||
concatMsg = ""
|
||||
for m in msg:
|
||||
concatMsg=concatMsg+" "+str(m)
|
||||
concatMsg = concatMsg + " " + str(m)
|
||||
return concatMsg
|
||||
|
|
@ -1,19 +1,19 @@
|
|||
##
|
||||
# This software was developed and / or modified by Raytheon Company,
|
||||
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
|
||||
#
|
||||
#
|
||||
# U.S. EXPORT CONTROLLED TECHNICAL DATA
|
||||
# This software product contains export-restricted data whose
|
||||
# export/transfer/disclosure is restricted by U.S. law. Dissemination
|
||||
# to non-U.S. persons whether in the United States or abroad requires
|
||||
# an export license or other authorization.
|
||||
#
|
||||
#
|
||||
# Contractor Name: Raytheon Company
|
||||
# Contractor Address: 6825 Pine Street, Suite 340
|
||||
# Mail Stop B8
|
||||
# Omaha, NE 68106
|
||||
# 402.291.0100
|
||||
#
|
||||
#
|
||||
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
|
||||
# further licensing information.
|
||||
##
|
||||
|
@ -30,14 +30,15 @@ import LogStream, fcntl
|
|||
# Vector: ((magGrid, dirGrid), history)
|
||||
# Weather: ((byteGrid, key), history)
|
||||
# Discrete: ((byteGrid, key), history)
|
||||
#
|
||||
#
|
||||
# SOFTWARE HISTORY
|
||||
#
|
||||
#
|
||||
# Date Ticket# Engineer Description
|
||||
# ------------ ---------- ----------- --------------------------
|
||||
# 07/06/09 1995 bphillip Initial Creation.
|
||||
#
|
||||
#
|
||||
# 11/05/13 2517 randerso Improve memory utilization
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
|
@ -54,7 +55,7 @@ class MergeGrid:
|
|||
# gridType = 'SCALAR', 'VECTOR', 'WEATHER', 'DISCRETE'
|
||||
#---------------------------------------------------------------------
|
||||
def __init__(self, creationTime, siteID, inFillValue, outFillValue,
|
||||
areaMask, gridType, discreteKeys = None):
|
||||
areaMask, gridType, discreteKeys=None):
|
||||
self.__creationTime = creationTime
|
||||
self.__siteID = siteID
|
||||
self.__inFillV = inFillValue
|
||||
|
@ -91,13 +92,13 @@ class MergeGrid:
|
|||
gridB = wxB[0]
|
||||
key = wxA[1]
|
||||
newGrid = numpy.zeros_like(gridB)
|
||||
|
||||
|
||||
for k in range(len(wxB[1])):
|
||||
index = self.__findKey(wxB[1][k], key)
|
||||
newGrid = numpy.where(gridB == k, index, newGrid)
|
||||
|
||||
newGrid[gridB == k] = index
|
||||
|
||||
return (key, wxA[0], newGrid)
|
||||
|
||||
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# update history strings
|
||||
|
@ -107,17 +108,17 @@ class MergeGrid:
|
|||
# returns None if no history is present.
|
||||
#---------------------------------------------------------------------
|
||||
def __updateHistoryStrings(self, historyA, historyB):
|
||||
|
||||
|
||||
out = []
|
||||
|
||||
# removal any old entry
|
||||
|
||||
# removal any old entry
|
||||
if historyB is not None:
|
||||
for h in historyB:
|
||||
index = string.find(h, ":"+ self.__siteID + "_GRID")
|
||||
index = string.find(h, ":" + self.__siteID + "_GRID")
|
||||
if index == -1:
|
||||
out.append(h)
|
||||
|
||||
# if add mode, add in new entries
|
||||
out.append(h)
|
||||
|
||||
# if add mode, add in new entries
|
||||
if historyA is not None:
|
||||
for h in historyA:
|
||||
out.append(h)
|
||||
|
@ -125,33 +126,33 @@ class MergeGrid:
|
|||
if len(out) > 0:
|
||||
return out
|
||||
else:
|
||||
return None
|
||||
return None
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# merge scalar grid
|
||||
# Note: gridA can be None, which indicates that the data
|
||||
# is to be blanked out, i.e., made invalid. gridB can also be
|
||||
# none, which indicates that there is no destination grid and one must
|
||||
# be created.
|
||||
# be created.
|
||||
#---------------------------------------------------------------------
|
||||
def __mergeScalarGrid(self, gridA, gridB):
|
||||
if gridA is None and gridB is None:
|
||||
if gridA is None and gridB is None:
|
||||
return None
|
||||
|
||||
|
||||
# merge the grids
|
||||
if gridA is not None:
|
||||
inMask = numpy.not_equal(gridA, self.__inFillV)
|
||||
mask = numpy.logical_and(inMask, self.__areaMask)
|
||||
|
||||
mask = numpy.not_equal(gridA, self.__inFillV)
|
||||
numpy.logical_and(mask, self.__areaMask, mask)
|
||||
|
||||
if gridB is None:
|
||||
gridB = numpy.zeros(gridA.shape) + self.__outFillV
|
||||
return numpy.where(mask, gridA, gridB)
|
||||
|
||||
return numpy.where(mask, gridA, self.__outFillV)
|
||||
else:
|
||||
return numpy.where(mask, gridA, gridB)
|
||||
|
||||
# blank out the data
|
||||
else:
|
||||
blankGrid = numpy.zeros(gridB.shape) + self.__outFillV
|
||||
return numpy.where(self.__areaMask, blankGrid, gridB)
|
||||
|
||||
return numpy.where(self.__areaMask, self.__outFillV, gridB)
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# merge vector grid
|
||||
# Note: gridA can be None, which indicates that the data
|
||||
|
@ -159,50 +160,47 @@ class MergeGrid:
|
|||
# none, which indicates that there is no destination grid and one must
|
||||
# be created.
|
||||
#---------------------------------------------------------------------
|
||||
def __mergeVectorGrid(self, gridA, gridB):
|
||||
def __mergeVectorGrid(self, gridA, gridB):
|
||||
if gridA is None and gridB is None:
|
||||
return None
|
||||
|
||||
|
||||
# merge the grids
|
||||
if gridA is not None:
|
||||
inMask = numpy.not_equal(gridA[0], self.__inFillV)
|
||||
mask = numpy.logical_and(inMask, self.__areaMask)
|
||||
|
||||
mask = numpy.not_equal(gridA[0], self.__inFillV)
|
||||
numpy.logical_and(mask, self.__areaMask, mask)
|
||||
|
||||
if gridB is None:
|
||||
gridSize = gridA[0].shape
|
||||
gridB = (numpy.zeros(gridSize) + self.__outFillV,
|
||||
numpy.zeros(gridSize) + 0.0)
|
||||
|
||||
magGrid = numpy.where(mask, gridA[0], gridB[0])
|
||||
dirGrid = numpy.where(mask, gridA[1], gridB[1])
|
||||
magGrid = numpy.where(mask, gridA[0], self.__outFillV)
|
||||
dirGrid = numpy.where(mask, gridA[1], 0.0)
|
||||
else:
|
||||
magGrid = numpy.where(mask, gridA[0], gridB[0])
|
||||
dirGrid = numpy.where(mask, gridA[1], gridB[1])
|
||||
return (magGrid, dirGrid)
|
||||
|
||||
|
||||
# blank out the data
|
||||
else:
|
||||
blankGrid = numpy.zeros(gridB[0].shape) + self.__outFillV
|
||||
blankDirGrid = numpy.zeros_like(gridB[1])
|
||||
magGrid = numpy.where(self.__areaMask, blankGrid, gridB[0])
|
||||
dirGrid = numpy.where(self.__areaMask, blankDirGrid, gridB[1])
|
||||
magGrid = numpy.where(self.__areaMask, self.__outFillV, gridB[0])
|
||||
dirGrid = numpy.where(self.__areaMask, 0.0, gridB[1])
|
||||
return (magGrid, dirGrid)
|
||||
|
||||
|
||||
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# merge weather grid
|
||||
#
|
||||
# Note the outFillV is ignored for now, all out-of-bounds points will
|
||||
# get the <NoWx> value.
|
||||
#---------------------------------------------------------------------
|
||||
def __mergeWeatherGrid(self, gridA, gridB):
|
||||
|
||||
if gridA is None and gridB is None:
|
||||
def __mergeWeatherGrid(self, gridA, gridB):
|
||||
|
||||
if gridA is None and gridB is None:
|
||||
return None
|
||||
|
||||
|
||||
noWx = "<NoCov>:<NoWx>:<NoInten>:<NoVis>:"
|
||||
# merge the grids
|
||||
if gridA is not None:
|
||||
inMask = numpy.not_equal(gridA[0], self.__inFillV)
|
||||
mask = numpy.logical_and(inMask, self.__areaMask)
|
||||
|
||||
mask = numpy.not_equal(gridA[0], self.__inFillV)
|
||||
numpy.logical_and(mask, self.__areaMask, mask)
|
||||
|
||||
if gridB is None: #make an empty grid
|
||||
noWxKeys = []
|
||||
noWxGrid = numpy.empty_like(gridA[0])
|
||||
|
@ -211,15 +209,15 @@ class MergeGrid:
|
|||
(commonkey, remapG, dbG) = self.__commonizeKey(gridA, gridB)
|
||||
mergedGrid = numpy.where(mask, remapG, dbG)
|
||||
return (mergedGrid, commonkey)
|
||||
|
||||
|
||||
# blank out the data
|
||||
else:
|
||||
else:
|
||||
blankGrid = numpy.empty_like(gridB[0])
|
||||
blankGrid.fill(self.__findKey(noWx, gridB[1]))
|
||||
key = gridB[1]
|
||||
grid = numpy.where(self.__areaMask, blankGrid, gridB[0])
|
||||
return (grid, key)
|
||||
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# merge discrete grid
|
||||
#
|
||||
|
@ -231,23 +229,23 @@ class MergeGrid:
|
|||
return None
|
||||
|
||||
noKey = self.__discreteKeys[0]
|
||||
|
||||
|
||||
# merge the grids
|
||||
if gridA is not None:
|
||||
inMask = numpy.not_equal(gridA[0], self.__inFillV)
|
||||
mask = numpy.logical_and(inMask, self.__areaMask)
|
||||
|
||||
mask = numpy.not_equal(gridA[0], self.__inFillV)
|
||||
numpy.logical_and(mask, self.__areaMask)
|
||||
|
||||
if gridB is None: #make an empty grid
|
||||
noKeys = []
|
||||
noGrid = numpy.empty_like(gridA[0])
|
||||
noGrid.fill(self.__findKey(noKey, noKeys))
|
||||
gridB = (noGrid, noKeys)
|
||||
|
||||
|
||||
(commonkey, remapG, dbG) = \
|
||||
self.__commonizeKey(gridA, gridB)
|
||||
mergedGrid = numpy.where(mask, remapG, dbG)
|
||||
return (mergedGrid, commonkey)
|
||||
|
||||
|
||||
# blank out the data
|
||||
else:
|
||||
blankGrid = numpy.empty_like(gridB[0])
|
||||
|
@ -255,7 +253,7 @@ class MergeGrid:
|
|||
key = gridB[1]
|
||||
grid = numpy.where(self.__areaMask, blankGrid, gridB[0])
|
||||
return (grid, key)
|
||||
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# mergeGrid
|
||||
# Merges the grid
|
||||
|
@ -270,8 +268,8 @@ class MergeGrid:
|
|||
# none, which indicates that there is no destination grid and one must
|
||||
# be created.
|
||||
#---------------------------------------------------------------------
|
||||
def mergeGrid(self, gridAIn, gridBIn):
|
||||
# merge the grids
|
||||
def mergeGrid(self, gridAIn, gridBIn):
|
||||
# merge the grids
|
||||
if gridAIn is not None:
|
||||
gridA = gridAIn[0]
|
||||
historyA = gridAIn[1]
|
||||
|
@ -279,28 +277,28 @@ class MergeGrid:
|
|||
gridA = None
|
||||
historyA = None
|
||||
if gridBIn is not None:
|
||||
gridB = gridBIn[0]
|
||||
gridB = gridBIn[0]
|
||||
historyB = gridBIn[1]
|
||||
else:
|
||||
gridB = None
|
||||
historyB = None
|
||||
|
||||
if self.__gridType == 'SCALAR':
|
||||
mergedGrid = self.__mergeScalarGrid(gridA, gridB)
|
||||
|
||||
if self.__gridType == 'SCALAR':
|
||||
mergedGrid = self.__mergeScalarGrid(gridA, gridB)
|
||||
|
||||
elif self.__gridType == 'VECTOR':
|
||||
mergedGrid = self.__mergeVectorGrid(gridA, gridB)
|
||||
|
||||
|
||||
elif self.__gridType == 'WEATHER':
|
||||
mergedGrid = self.__mergeWeatherGrid(gridA, gridB)
|
||||
|
||||
|
||||
elif self.__gridType == 'DISCRETE':
|
||||
mergedGrid = self.__mergeDiscreteGrid(gridA, gridB)
|
||||
|
||||
|
||||
else:
|
||||
mergedGrid = None
|
||||
|
||||
# merge History
|
||||
history = self.__updateHistoryStrings(historyA, historyB)
|
||||
|
||||
|
||||
return (mergedGrid, history)
|
||||
|
|
|
@ -1,67 +1,53 @@
|
|||
<beans xmlns="http://www.springframework.org/schema/beans"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
|
||||
<beans
|
||||
xmlns="http://www.springframework.org/schema/beans"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
|
||||
http://camel.apache.org/schema/spring http://camel.apache.org/schema/spring/camel-spring.xsd">
|
||||
|
||||
<bean id="obsDecoder" class="com.raytheon.edex.plugin.obs.ObsDecoder"/>
|
||||
|
||||
<bean id="metarPointData" class="com.raytheon.edex.plugin.obs.metar.MetarPointDataTransform"/>
|
||||
|
||||
<bean id="obsSeparator" class="com.raytheon.edex.plugin.obs.metar.MetarSeparator" />
|
||||
<bean id="obsDecoder" class="com.raytheon.edex.plugin.obs.ObsDecoder" />
|
||||
|
||||
<bean id="obsDistRegistry" factory-bean="distributionSrv"
|
||||
factory-method="register">
|
||||
<constructor-arg value="obs" />
|
||||
<constructor-arg value="jms-dist:queue:Ingest.obs"/>
|
||||
</bean>
|
||||
<bean id="metarPointData" class="com.raytheon.edex.plugin.obs.metar.MetarPointDataTransform" />
|
||||
|
||||
<bean id="obsCamelRegistered" factory-bean="contextManager"
|
||||
factory-method="register"
|
||||
depends-on="persistCamelRegistered,
|
||||
<bean id="obsSeparator" class="com.raytheon.edex.plugin.obs.metar.MetarSeparator" />
|
||||
|
||||
<bean id="obsDistRegistry" factory-bean="distributionSrv" factory-method="register">
|
||||
<constructor-arg value="obs" />
|
||||
<constructor-arg value="jms-dist:queue:Ingest.obs" />
|
||||
</bean>
|
||||
|
||||
<bean id="obsCamelRegistered" factory-bean="contextManager" factory-method="register"
|
||||
depends-on="persistCamelRegistered,
|
||||
shefCamelRegistered,
|
||||
metarToHMDBCamelRegistered">
|
||||
<constructor-arg ref="obs-camel"/>
|
||||
</bean>
|
||||
<constructor-arg ref="obs-camel" />
|
||||
</bean>
|
||||
|
||||
<camelContext id="obs-camel"
|
||||
xmlns="http://camel.apache.org/schema/spring"
|
||||
errorHandlerRef="errorHandler"
|
||||
autoStartup="false">
|
||||
<!--
|
||||
<endpoint id="metarFileEndpoint" uri="file:${edex.home}/data/sbn/metar?noop=true&idempotent=false" />
|
||||
|
||||
<route id="metarFileConsumerRoute">
|
||||
<from ref="metarFileEndpoint" />
|
||||
<bean ref="fileToString" />
|
||||
<setHeader headerName="pluginName">
|
||||
<constant>obs</constant>
|
||||
</setHeader>
|
||||
<to uri="jms-durable:queue:Ingest.obs" />
|
||||
</route>
|
||||
-->
|
||||
|
||||
<!-- Begin METAR routes -->
|
||||
<route id="metarIngestRoute">
|
||||
<from uri="jms-durable:queue:Ingest.obs"/>
|
||||
<setHeader headerName="pluginName">
|
||||
<constant>obs</constant>
|
||||
</setHeader>
|
||||
<doTry>
|
||||
<pipeline>
|
||||
<bean ref="stringToFile" />
|
||||
<bean ref="obsDecoder" method="decode" />
|
||||
<bean ref="metarPointData" method="toPointData" />
|
||||
<multicast>
|
||||
<to uri="direct-vm:persistIndexAlert" />
|
||||
<to uri="direct-vm:metarToShef" />
|
||||
<to uri="direct-vm:metarToHMDB"/>
|
||||
</multicast>
|
||||
</pipeline>
|
||||
<doCatch>
|
||||
<exception>java.lang.Throwable</exception>
|
||||
<to uri="log:metar?level=ERROR"/>
|
||||
</doCatch>
|
||||
</doTry>
|
||||
</route>
|
||||
</camelContext>
|
||||
<camelContext id="obs-camel" xmlns="http://camel.apache.org/schema/spring"
|
||||
errorHandlerRef="errorHandler" autoStartup="false">
|
||||
|
||||
<!-- Begin METAR routes -->
|
||||
<route id="metarIngestRoute">
|
||||
<from uri="jms-durable:queue:Ingest.obs" />
|
||||
<setHeader headerName="pluginName">
|
||||
<constant>obs</constant>
|
||||
</setHeader>
|
||||
<doTry>
|
||||
<pipeline>
|
||||
<bean ref="stringToFile" />
|
||||
<bean ref="obsDecoder" method="decode" />
|
||||
<bean ref="dupElim" />
|
||||
<bean ref="metarPointData" method="toPointData" />
|
||||
<multicast>
|
||||
<to uri="direct-vm:persistIndexAlert" />
|
||||
<to uri="direct-vm:metarToShef" />
|
||||
<to uri="direct-vm:metarToHMDB" />
|
||||
</multicast>
|
||||
</pipeline>
|
||||
<doCatch>
|
||||
<exception>java.lang.Throwable</exception>
|
||||
<to uri="log:metar?level=ERROR" />
|
||||
</doCatch>
|
||||
</doTry>
|
||||
</route>
|
||||
</camelContext>
|
||||
</beans>
|
|
@ -1,68 +1,52 @@
|
|||
<beans xmlns="http://www.springframework.org/schema/beans"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
|
||||
<beans
|
||||
xmlns="http://www.springframework.org/schema/beans"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
|
||||
http://camel.apache.org/schema/spring http://camel.apache.org/schema/spring/camel-spring.xsd">
|
||||
|
||||
<bean id="sfcobsDecoder" class="com.raytheon.edex.plugin.sfcobs.SfcObsDecoder" />
|
||||
<bean id="sfcobsSeparator" class="com.raytheon.edex.plugin.sfcobs.SfcObsSeparator" />
|
||||
<bean id="sfcobsDecoder" class="com.raytheon.edex.plugin.sfcobs.SfcObsDecoder" />
|
||||
<bean id="sfcobsSeparator" class="com.raytheon.edex.plugin.sfcobs.SfcObsSeparator" />
|
||||
|
||||
<bean id="sfcobsPointData" class="com.raytheon.uf.common.dataplugin.sfcobs.dao.SfcObsPointDataTransform">
|
||||
<constructor-arg ref="sfcobsPluginName" />
|
||||
</bean>
|
||||
|
||||
<bean id="sfcobsDistRegistry" factory-bean="distributionSrv"
|
||||
factory-method="register">
|
||||
<constructor-arg value="sfcobs" />
|
||||
<constructor-arg value="jms-dist:queue:Ingest.sfcobs"/>
|
||||
</bean>
|
||||
|
||||
<bean id="sfcobsCamelRegistered" factory-bean="contextManager"
|
||||
factory-method="register"
|
||||
depends-on="persistCamelRegistered,
|
||||
<bean id="sfcobsDistRegistry" factory-bean="distributionSrv" factory-method="register">
|
||||
<constructor-arg value="sfcobs" />
|
||||
<constructor-arg value="jms-dist:queue:Ingest.sfcobs" />
|
||||
</bean>
|
||||
|
||||
<bean id="sfcobsCamelRegistered" factory-bean="contextManager" factory-method="register"
|
||||
depends-on="persistCamelRegistered,
|
||||
shefCamelRegistered">
|
||||
<constructor-arg ref="sfcobs-camel"/>
|
||||
</bean>
|
||||
<constructor-arg ref="sfcobs-camel" />
|
||||
</bean>
|
||||
|
||||
<camelContext id="sfcobs-camel"
|
||||
xmlns="http://camel.apache.org/schema/spring"
|
||||
errorHandlerRef="errorHandler"
|
||||
autoStartup="false">
|
||||
<!--
|
||||
<endpoint id="sfcobsFileEndpoint"
|
||||
uri="file:${edex.home}/data/sbn/sfcobs?noop=true&idempotent=false" />
|
||||
<camelContext id="sfcobs-camel" xmlns="http://camel.apache.org/schema/spring"
|
||||
errorHandlerRef="errorHandler" autoStartup="false">
|
||||
|
||||
<route id="sfcobsFileConsumerRoute">
|
||||
<from ref="sfcobsFileEndpoint" />
|
||||
<bean ref="fileToString" />
|
||||
<setHeader headerName="pluginName">
|
||||
<constant>sfcobs</constant>
|
||||
</setHeader>
|
||||
<to uri="jms-durable:queue:Ingest.sfcobs" />
|
||||
</route>
|
||||
-->
|
||||
|
||||
<!-- Begin sfcobs routes -->
|
||||
<route id="sfcobsIngestRoute">
|
||||
<from uri="jms-durable:queue:Ingest.sfcobs"/>
|
||||
<setHeader headerName="pluginName">
|
||||
<constant>sfcobs</constant>
|
||||
</setHeader>
|
||||
<bean ref="stringToFile" />
|
||||
<doTry>
|
||||
<pipeline>
|
||||
<bean ref="sfcobsDecoder" method="decode" />
|
||||
<!-- Begin sfcobs routes -->
|
||||
<route id="sfcobsIngestRoute">
|
||||
<from uri="jms-durable:queue:Ingest.sfcobs" />
|
||||
<setHeader headerName="pluginName">
|
||||
<constant>sfcobs</constant>
|
||||
</setHeader>
|
||||
<bean ref="stringToFile" />
|
||||
<doTry>
|
||||
<pipeline>
|
||||
<bean ref="sfcobsDecoder" method="decode" />
|
||||
<bean ref="dupElim" />
|
||||
<bean ref="sfcobsPointData" method="toPointData" />
|
||||
<multicast>
|
||||
<to uri="direct-vm:persistIndexAlert" />
|
||||
<to uri="direct-vm:synopticToShef"/>
|
||||
<to uri="direct-vm:persistIndexAlert" />
|
||||
<to uri="direct-vm:synopticToShef" />
|
||||
</multicast>
|
||||
</pipeline>
|
||||
<doCatch>
|
||||
<exception>java.lang.Throwable</exception>
|
||||
<to uri="log:sfcobs?level=ERROR"/>
|
||||
</doCatch>
|
||||
</doTry>
|
||||
</route>
|
||||
|
||||
</camelContext>
|
||||
</pipeline>
|
||||
<doCatch>
|
||||
<exception>java.lang.Throwable</exception>
|
||||
<to uri="log:sfcobs?level=ERROR" />
|
||||
</doCatch>
|
||||
</doTry>
|
||||
</route>
|
||||
</camelContext>
|
||||
</beans>
|
|
@ -63,7 +63,7 @@ import com.raytheon.uf.common.util.ConvertUtil;
|
|||
*/
|
||||
public class DataURIUtil {
|
||||
|
||||
private static final String PLUGIN_NAME_KEY = "pluginName";
|
||||
public static final String PLUGIN_NAME_KEY = "pluginName";
|
||||
|
||||
private static final String FIELD_SEPARATOR = ".";
|
||||
|
||||
|
|
|
@ -6,6 +6,8 @@ archive.cron=0+40+*+*+*+?
|
|||
archive.purge.enable=true
|
||||
# purge archives
|
||||
archive.purge.cron=0+5+0/3+*+*+?
|
||||
# compress database records
|
||||
archive.compression.enable=true
|
||||
|
||||
# to disable a specific archive, use property archive.disable=pluginName,pluginName...
|
||||
#archive.disable=grid,text,acars
|
|
@ -19,6 +19,8 @@
|
|||
**/
|
||||
package com.raytheon.uf.edex.archive;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
|
@ -80,7 +82,8 @@ import com.raytheon.uf.edex.database.plugin.PluginFactory;
|
|||
* Jan 18, 2013 1469 bkowal Removed the hdf5 data directory.
|
||||
* Oct 23, 2013 2478 rferrel Make date format thread safe.
|
||||
* Add debug information.
|
||||
* Nov 05, 2013 2499 rjpeter Repackaged, removed config files, always compresses.
|
||||
* Nov 05, 2013 2499 rjpeter Repackaged, removed config files, always compresses hdf5.
|
||||
* Nov 11, 2013 2478 rjpeter Updated data store copy to always copy hdf5.
|
||||
* </pre>
|
||||
*
|
||||
* @author rjpeter
|
||||
|
@ -114,12 +117,17 @@ public class DatabaseArchiver implements IPluginArchiver {
|
|||
/** Cluster time out on lock. */
|
||||
private static final int CLUSTER_LOCK_TIMEOUT = 60000;
|
||||
|
||||
/** Chunk size for I/O Buffering and Compression */
|
||||
private static final int CHUNK_SIZE = 8192;
|
||||
|
||||
/** Mapping for plug-in formatters. */
|
||||
private final Map<String, IPluginArchiveFileNameFormatter> pluginArchiveFormatters;
|
||||
|
||||
/** When true dump the pdos. */
|
||||
private final boolean debugArchiver;
|
||||
|
||||
private final boolean compressDatabaseFiles;
|
||||
|
||||
/**
|
||||
* The constructor.
|
||||
*/
|
||||
|
@ -128,6 +136,8 @@ public class DatabaseArchiver implements IPluginArchiver {
|
|||
pluginArchiveFormatters.put("default",
|
||||
new DefaultPluginArchiveFileNameFormatter());
|
||||
debugArchiver = Boolean.getBoolean("archive.debug.enable");
|
||||
compressDatabaseFiles = Boolean
|
||||
.getBoolean("archive.compression.enable");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -259,12 +269,9 @@ public class DatabaseArchiver implements IPluginArchiver {
|
|||
.join(archivePath, pluginName, dataStoreFile));
|
||||
|
||||
try {
|
||||
// data must be older than 30 minutes, and no older than
|
||||
// hours to keep hours need to lookup plugin and see if
|
||||
// compression matches, or embed in configuration the
|
||||
// compression level on archive, but would still need to
|
||||
// lookup plugin
|
||||
ds.copy(outputDir, compRequired, "lastArchived", 0, 0);
|
||||
// copy the changed hdf5 file, does repack if
|
||||
// compRequired, otherwise pure file copy
|
||||
ds.copy(outputDir, compRequired, null, 0, 0);
|
||||
} catch (StorageException e) {
|
||||
statusHandler.handle(Priority.PROBLEM,
|
||||
e.getLocalizedMessage());
|
||||
|
@ -325,7 +332,11 @@ public class DatabaseArchiver implements IPluginArchiver {
|
|||
path.setLength(path.length() - 3);
|
||||
}
|
||||
int pathDebugLength = path.length();
|
||||
path.append(".bin.gz");
|
||||
if (compressDatabaseFiles) {
|
||||
path.append(".bin.gz");
|
||||
} else {
|
||||
path.append(".bin");
|
||||
}
|
||||
|
||||
File file = new File(path.toString());
|
||||
List<PersistableDataObject> pdosToSerialize = entry.getValue();
|
||||
|
@ -338,7 +349,13 @@ public class DatabaseArchiver implements IPluginArchiver {
|
|||
try {
|
||||
|
||||
// created gzip'd stream
|
||||
is = new GZIPInputStream(new FileInputStream(file), 8192);
|
||||
if (compressDatabaseFiles) {
|
||||
is = new GZIPInputStream(new FileInputStream(file),
|
||||
CHUNK_SIZE);
|
||||
} else {
|
||||
is = new BufferedInputStream(new FileInputStream(file),
|
||||
CHUNK_SIZE);
|
||||
}
|
||||
|
||||
// transform back for list append
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -400,7 +417,12 @@ public class DatabaseArchiver implements IPluginArchiver {
|
|||
}
|
||||
|
||||
// created gzip'd stream
|
||||
os = new GZIPOutputStream(new FileOutputStream(file), 8192);
|
||||
if (compressDatabaseFiles) {
|
||||
os = new GZIPOutputStream(new FileOutputStream(file), CHUNK_SIZE);
|
||||
} else {
|
||||
os = new BufferedOutputStream(new FileOutputStream(file),
|
||||
CHUNK_SIZE);
|
||||
}
|
||||
|
||||
// Thrift serialize pdo list
|
||||
SerializationUtil.transformToThriftUsingStream(pdosToSerialize,
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
* Aug 05, 2013 2224 rferrel Changes to add dataSet tags.
|
||||
* Oct 01, 2013 2147 rferrel Date time stamp no longer requires an hour field.
|
||||
* Nov 05, 2013 2497 rferrel Change root directory.
|
||||
* Nov 13, 2013 2549 rferrel Changes to GFE and modelsounding.
|
||||
*
|
||||
* @author rferrel
|
||||
* @version 1.0
|
||||
|
@ -151,7 +152,7 @@
|
|||
<dateGroupIndices>3,4,5,6</dateGroupIndices>
|
||||
</dataSet>
|
||||
<dataSet>
|
||||
<dirPattern>gfe/(.*)/(Fcst|Official)</dirPattern>
|
||||
<dirPattern>gfe/(.*)/(.*)</dirPattern>
|
||||
<filePattern>.*_(\d{4})(\d{2})(\d{2})_.*</filePattern>
|
||||
<displayLabel>{1} - {2}</displayLabel>
|
||||
<dateGroupIndices>3,4,5</dateGroupIndices>
|
||||
|
@ -177,11 +178,11 @@
|
|||
<filePattern>.*-(\d{4})-(\d{2})-(\d{2})-(\d{2})-.*</filePattern>
|
||||
</dataSet>
|
||||
<dataSet>
|
||||
<dirPattern>(modelsounding)/(.*)</dirPattern>
|
||||
<dirPattern>(modelsounding)/(.*)/.*</dirPattern>
|
||||
<dirPattern>(bufrmos)(.*)</dirPattern>
|
||||
<displayLabel>{1} - {2}</displayLabel>
|
||||
<dateGroupIndices>3,4,5,6</dateGroupIndices>
|
||||
<filePattern>.*(\d{4})-(\d{2})-(\d{2})[-_](\d{2}).*</filePattern>
|
||||
<filePattern>.*(\d{4})-(\d{2})-(\d{2})-(\d{2}).*</filePattern>
|
||||
</dataSet>
|
||||
</category>
|
||||
<category>
|
||||
|
|
|
@ -219,6 +219,20 @@
|
|||
<dateGroupIndices>1,2,3,4</dateGroupIndices>
|
||||
</dataSet>
|
||||
</category>
|
||||
<category>
|
||||
<name>Radar (Local)</name>
|
||||
<extRetentionHours>168</extRetentionHours>
|
||||
<dataSet>
|
||||
<dirPattern>radar/([k|t|e|f]\w{3})/.*</dirPattern> <!-- one level like GSM or HI --> <!-- e and f are for FAA ASR and ARSR radars -->
|
||||
<dirPattern>radar/(k...|t...|e...|f...)/.*/.*</dirPattern> <!-- two levels like ML -->
|
||||
<dirPattern>radar/(k...|t...|e...|f...)/.*/.*/.*</dirPattern> <!-- three levels like ML -->
|
||||
<dirPattern>radar/(k...|t...|e...|f...)/.*/.*/.*/.*</dirPattern> <!-- four levels like Z -->
|
||||
<dirPattern>radar/(k...|t...|e...|f...)/.*/.*/.*/.*/.*</dirPattern> <!-- five levels like Z (superres) -->
|
||||
<filePattern>(\w{4}).(\d*).(\d{4})(\d{2})(\d{2})_(\d{2})(\d{2})(.*)</filePattern>
|
||||
<displayLabel>{1}</displayLabel>
|
||||
<dateGroupIndices>4,5,6,7</dateGroupIndices>
|
||||
</dataSet>
|
||||
</category>
|
||||
<category>
|
||||
<name>Satellite</name>
|
||||
<extRetentionHours>168</extRetentionHours>
|
||||
|
|
|
@ -1,126 +1,126 @@
|
|||
#!/bin/bash
|
||||
if [ ${#AWIPS_HOME} = 0 ]
|
||||
then
|
||||
path_to_script=`readlink -f $0`
|
||||
export AWIPS_HOME=$(dirname $(dirname $(dirname $(dirname $path_to_script))))
|
||||
fi
|
||||
|
||||
. ${AWIPS_HOME}/GFESuite/ServiceBackup/configuration/svcbu.env
|
||||
|
||||
# Create the log file
|
||||
logdir=${IFPS_LOG}/`date +%Y%m%d`
|
||||
logfil=svcbu_receive_grids_from_bksite`date +%H%M`
|
||||
logfile=${logdir}/${logfil}
|
||||
[ ! -d ${logdir} ] && (umask 000;mkdir ${logdir})
|
||||
touch ${logdir}/${logfil}
|
||||
exec 1>${logdir}/${logfil} 2>&1
|
||||
|
||||
# Check the status of the lock file to see if we are OK to proceed
|
||||
if [ -f ${LOCK_DIR}/importBkSiteGrids ];
|
||||
then
|
||||
log_msg Cannot process grids.
|
||||
exit 1
|
||||
fi
|
||||
|
||||
touch ${LOCK_DIR}/importBkSiteGrids
|
||||
|
||||
log_msg 0
|
||||
|
||||
# Retrieve the name of the site from the tar file.
|
||||
import_grd_file=${1}
|
||||
if [ -a ${import_grd_file} ]
|
||||
then
|
||||
|
||||
log_msg "Import Grids file is ${import_grd_file}"
|
||||
mv ${import_grd_file} ${GFESUITE_HOME}/Grd
|
||||
cd ${GFESUITE_HOME}
|
||||
|
||||
tar xf Grd
|
||||
if [ $? -eq 0 ]; then
|
||||
SITE=`cat siteID.txt | tr '[a-z]' '[A-Z]'`
|
||||
site=`echo $SITE | tr '[A-Z]' '[a-z]'`
|
||||
gunzip -f ${site}Grd.netcdf.gz
|
||||
else
|
||||
# move the file to appropriate directory.
|
||||
mv -f Grd Grd.netcdf.gz
|
||||
chmod 777 Grd.netcdf.gz
|
||||
|
||||
log_msg "Gunzipping ${GFESUITE_HOME}/Grd.netcdf.gz"
|
||||
gunzip -f Grd.netcdf.gz
|
||||
if [ $? -ne 0 ];
|
||||
then
|
||||
log_msg "ERROR: Could not gunzip ${GFESUITE_HOME}/Grd.netcdf.gz"
|
||||
rm -f ${LOCK_DIR}/importBkSiteGrids
|
||||
log_msg 100
|
||||
exit 1
|
||||
fi
|
||||
log_msg "Done Gunzipping!"
|
||||
|
||||
log_msg "Finding site-id using ncdump method."
|
||||
hdr=`mktemp`
|
||||
ncdump -h ${GFESUITE_HOME}/Grd.netcdf > $hdr
|
||||
SITE=`grep siteID $hdr | head -1 | cut -d'"' -f2`
|
||||
site=`echo $SITE | tr '[A-Z]' '[a-z]'`
|
||||
if [ -z $site ]
|
||||
then
|
||||
log_msg "ERROR: Could not find out the site from ncdump method..."
|
||||
rm -f $hdr
|
||||
rm -f ${LOCK_DIR}/importBkSiteGrids
|
||||
log_msg 100
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv -f ${GFESUITE_HOME}/Grd.netcdf ${GFESUITE_HOME}/${site}Grd.netcdf
|
||||
rm -f $hdr
|
||||
fi
|
||||
log_msg "site is $site"
|
||||
|
||||
|
||||
else
|
||||
log_msg "Unable to locate the gridded data of the site, ${import_grd_file}"
|
||||
log_msg "You will need to request your backup site to send grids again."
|
||||
log_msg 100
|
||||
rm -f ${LOCK_DIR}/importBkSiteGrids
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
log_msg 50
|
||||
# Check if import file was supplied correctly by msg_send.
|
||||
import_file=${GFESUITE_HOME}/${site}Grd.netcdf
|
||||
log_msg "import_file=${import_file}"
|
||||
SITE=`echo ${SITE} | tr '[a-z]' '[A-Z]'`
|
||||
if [ -a ${import_file} ]
|
||||
then
|
||||
#use iscMosaic to load grids into databases
|
||||
log_msg "Running iscMosaic to unpack griddded data..."
|
||||
${GFESUITE_BIN}/iscMosaic -h $SVCBU_HOST -r $CDSPORT -d ${SITE}_GRID__Restore_00000000_0000 -f ${import_file} -n -x
|
||||
if [ $? -ne 0 ];
|
||||
then
|
||||
log_msg "ERROR: iscMosaic failed to run correctly. Please re-run iscMosaic manually."
|
||||
log_msg 100
|
||||
rm -f ${LOCK_DIR}/importBkSiteGrids
|
||||
exit 1
|
||||
fi
|
||||
log_msg "Finished running iscMosaic..."
|
||||
# Generate a GFE message saying new Grids have arrived in Restore database.
|
||||
cd ${GFESUITE_BIN}
|
||||
sendGfeMessage -h ${SVCBU_HOST} -p ${CDSPORT} -u -m "Restore database has been populated with new grids."
|
||||
else
|
||||
log_msg "Unable to locate the gridded data of the site,${import_file} You will need to request your backup site to send grids again."
|
||||
log_msg 100
|
||||
rm -f ${LOCK_DIR}/importBkSiteGrids
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
#clean-up.
|
||||
rm -f ${GFESUITE_HOME}/Grd
|
||||
rm -f ${GFESUITE_HOME}/${SITE}Grd* siteID.txt
|
||||
log_msg 100
|
||||
rm -f ${LOCK_DIR}/importBkSiteGrids
|
||||
#ALL well, send a msg and get out of here
|
||||
log_msg "Importing Grids from backup site is completed. You may start your GFE now."
|
||||
log_msg "Grids received from backup site are stored in ${SITE}_GRID__Restore_00000000_0000 database."
|
||||
exit 0
|
||||
|
||||
#!/bin/bash
|
||||
if [ ${#AWIPS_HOME} = 0 ]
|
||||
then
|
||||
path_to_script=`readlink -f $0`
|
||||
export AWIPS_HOME=$(dirname $(dirname $(dirname $(dirname $path_to_script))))
|
||||
fi
|
||||
|
||||
. ${AWIPS_HOME}/GFESuite/ServiceBackup/configuration/svcbu.env
|
||||
|
||||
# Create the log file
|
||||
logdir=${IFPS_LOG}/`date +%Y%m%d`
|
||||
logfil=svcbu_receive_grids_from_bksite`date +%H%M`
|
||||
logfile=${logdir}/${logfil}
|
||||
[ ! -d ${logdir} ] && (umask 000;mkdir ${logdir})
|
||||
touch ${logdir}/${logfil}
|
||||
exec 1>${logdir}/${logfil} 2>&1
|
||||
|
||||
# Check the status of the lock file to see if we are OK to proceed
|
||||
if [ -f ${LOCK_DIR}/importBkSiteGrids ];
|
||||
then
|
||||
log_msg Cannot process grids.
|
||||
exit 1
|
||||
fi
|
||||
|
||||
touch ${LOCK_DIR}/importBkSiteGrids
|
||||
|
||||
log_msg 0
|
||||
|
||||
# Retrieve the name of the site from the tar file.
|
||||
import_grd_file=${1}
|
||||
if [ -a ${import_grd_file} ]
|
||||
then
|
||||
|
||||
log_msg "Import Grids file is ${import_grd_file}"
|
||||
mv ${import_grd_file} ${GFESUITE_HOME}/Grd
|
||||
cd ${GFESUITE_HOME}
|
||||
|
||||
tar xf Grd
|
||||
if [ $? -eq 0 ]; then
|
||||
SITE=`cat siteID.txt | tr '[a-z]' '[A-Z]'`
|
||||
site=`echo $SITE | tr '[A-Z]' '[a-z]'`
|
||||
gunzip -f ${site}Grd.netcdf.gz
|
||||
else
|
||||
# move the file to appropriate directory.
|
||||
mv -f Grd Grd.netcdf.gz
|
||||
chmod 777 Grd.netcdf.gz
|
||||
|
||||
log_msg "Gunzipping ${GFESUITE_HOME}/Grd.netcdf.gz"
|
||||
gunzip -f Grd.netcdf.gz
|
||||
if [ $? -ne 0 ];
|
||||
then
|
||||
log_msg "ERROR: Could not gunzip ${GFESUITE_HOME}/Grd.netcdf.gz"
|
||||
rm -f ${LOCK_DIR}/importBkSiteGrids
|
||||
log_msg 100
|
||||
exit 1
|
||||
fi
|
||||
log_msg "Done Gunzipping!"
|
||||
|
||||
log_msg "Finding site-id using ncdump method."
|
||||
hdr=`mktemp`
|
||||
ncdump -h ${GFESUITE_HOME}/Grd.netcdf > $hdr
|
||||
SITE=`grep siteID $hdr | head -1 | cut -d'"' -f2`
|
||||
site=`echo $SITE | tr '[A-Z]' '[a-z]'`
|
||||
if [ -z $site ]
|
||||
then
|
||||
log_msg "ERROR: Could not find out the site from ncdump method..."
|
||||
rm -f $hdr
|
||||
rm -f ${LOCK_DIR}/importBkSiteGrids
|
||||
log_msg 100
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv -f ${GFESUITE_HOME}/Grd.netcdf ${GFESUITE_HOME}/${site}Grd.netcdf
|
||||
rm -f $hdr
|
||||
fi
|
||||
log_msg "site is $site"
|
||||
|
||||
|
||||
else
|
||||
log_msg "Unable to locate the gridded data of the site, ${import_grd_file}"
|
||||
log_msg "You will need to request your backup site to send grids again."
|
||||
log_msg 100
|
||||
rm -f ${LOCK_DIR}/importBkSiteGrids
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
log_msg 50
|
||||
# Check if import file was supplied correctly by msg_send.
|
||||
import_file=${GFESUITE_HOME}/${site}Grd.netcdf
|
||||
log_msg "import_file=${import_file}"
|
||||
SITE=`echo ${SITE} | tr '[a-z]' '[A-Z]'`
|
||||
if [ -a ${import_file} ]
|
||||
then
|
||||
#use iscMosaic to load grids into databases
|
||||
log_msg "Running iscMosaic to unpack griddded data..."
|
||||
${GFESUITE_BIN}/iscMosaic -h $SVCBU_HOST -r $CDSPORT -d ${SITE}_GRID__Restore_00000000_0000 -f ${import_file} -n -x
|
||||
if [ $? -ne 0 ];
|
||||
then
|
||||
log_msg "ERROR: iscMosaic failed to run correctly. Please re-run iscMosaic manually."
|
||||
log_msg 100
|
||||
rm -f ${LOCK_DIR}/importBkSiteGrids
|
||||
exit 1
|
||||
fi
|
||||
log_msg "Finished running iscMosaic..."
|
||||
# Generate a GFE message saying new Grids have arrived in Restore database.
|
||||
cd ${GFESUITE_BIN}
|
||||
./sendGfeMessage -h ${SVCBU_HOST} -p ${CDSPORT} -u -m "Restore database has been populated with new grids."
|
||||
else
|
||||
log_msg "Unable to locate the gridded data of the site,${import_file} You will need to request your backup site to send grids again."
|
||||
log_msg 100
|
||||
rm -f ${LOCK_DIR}/importBkSiteGrids
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
#clean-up.
|
||||
rm -f ${GFESUITE_HOME}/Grd
|
||||
rm -f ${GFESUITE_HOME}/${SITE}Grd* siteID.txt
|
||||
log_msg 100
|
||||
rm -f ${LOCK_DIR}/importBkSiteGrids
|
||||
#ALL well, send a msg and get out of here
|
||||
log_msg "Importing Grids from backup site is completed. You may start your GFE now."
|
||||
log_msg "Grids received from backup site are stored in ${SITE}_GRID__Restore_00000000_0000 database."
|
||||
exit 0
|
||||
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
Copyright (c) 2009, Swiss AviationSoftware Ltd. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
- Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
- Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
- Neither the name of the Swiss AviationSoftware Ltd. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,178 +0,0 @@
|
|||
#
|
||||
# AWIPS II Eclipse Spec File
|
||||
#
|
||||
|
||||
# --define arguments:
|
||||
# %{_uframe_eclipse}
|
||||
# %{_build_root}
|
||||
# %{_baseline_workspace}
|
||||
|
||||
Name: awips2-eclipse
|
||||
Summary: AWIPS II Eclipse Distribution
|
||||
Version: 3.6.1
|
||||
Release: 1
|
||||
Group: AWIPSII
|
||||
BuildRoot: %{_build_root}
|
||||
URL: N/A
|
||||
License: N/A
|
||||
Distribution: N/A
|
||||
Vendor: Raytheon
|
||||
Packager: Bryan Kowal
|
||||
|
||||
AutoReq: no
|
||||
provides: awips2-eclipse
|
||||
|
||||
%description
|
||||
AWIPS II Eclipse Distribution - Contains the AWIPS II Eclipse Distribution.
|
||||
|
||||
# Turn off the brp-python-bytecompile script
|
||||
%global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g')
|
||||
%global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-java-repack-jars[[:space:]].*$!!g')
|
||||
|
||||
%prep
|
||||
# Verify That The User Has Specified A BuildRoot.
|
||||
if [ "%{_build_root}" = "/tmp" ]
|
||||
then
|
||||
echo "An Actual BuildRoot Must Be Specified. Use The --buildroot Parameter."
|
||||
echo "Unable To Continue ... Terminating"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -d %{_build_root} ]; then
|
||||
rm -rf %{_build_root}
|
||||
fi
|
||||
mkdir -p %{_build_root}/awips2/eclipse
|
||||
|
||||
%build
|
||||
|
||||
%install
|
||||
mkdir -p %{_build_root}/awips2/eclipse
|
||||
# The location of the awips2 eclipse source directory will be
|
||||
# specified as a command line argument. Fail if the specified
|
||||
# directory cannot be found.
|
||||
if [ ! -d %{_uframe_eclipse} ]; then
|
||||
echo "ERROR: Unable To Find The AWIPS II Eclipse Distribution."
|
||||
echo "Unable To Continue ... Terminating"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Copy the uframe eclipse distribution.
|
||||
cp -r %{_uframe_eclipse}/* %{_build_root}/awips2/eclipse
|
||||
|
||||
# Copy eclipse.sh to our build-directory.
|
||||
cp %{_baseline_workspace}/rpms/awips2.ade/Installer.eclipse/scripts/* \
|
||||
%{_build_root}/awips2/eclipse
|
||||
|
||||
# delete the basemaps and etc links
|
||||
rm -f %{_build_root}/awips2/eclipse/basemaps
|
||||
rm -f %{_build_root}/awips2/eclipse/etc
|
||||
|
||||
%pre
|
||||
JAVA_INSTALL="<Not Installed>"
|
||||
PYTHON_INSTALL="<Not Installed>"
|
||||
ANT_INSTALL="<Not Installed>"
|
||||
|
||||
INSTALL_PATH="/awips2/java"
|
||||
if [ -d ${INSTALL_PATH} ]; then
|
||||
JAVA_INSTALL=${INSTALL_PATH}
|
||||
fi
|
||||
|
||||
INSTALL_PATH="/awips2/python"
|
||||
if [ -d ${INSTALL_PATH} ]; then
|
||||
PYTHON_INSTALL=${INSTALL_PATH}
|
||||
fi
|
||||
|
||||
INSTALL_PATH="/awips2/ant"
|
||||
if [ -d ${INSTALL_PATH} ]; then
|
||||
ANT_INSTALL=${INSTALL_PATH}
|
||||
fi
|
||||
|
||||
echo -e "\e[1;34m--------------------------------------------------------------------------------\e[m"
|
||||
echo -e "\e[1;34m\| Installing the AWIPS II Eclipse Distribution...\e[m"
|
||||
echo -e "\e[1;34m--------------------------------------------------------------------------------\e[m"
|
||||
echo -e "\e[1;34m Java Detected At: ${JAVA_INSTALL}\e[m"
|
||||
echo -e "\e[1;34m Python Detected At: ${PYTHON_INSTALL}\e[m"
|
||||
echo -e "\e[1;34m Ant Detected At: ${ANT_INSTALL}\e[m"
|
||||
|
||||
%post
|
||||
echo -e "\e[1;34m--------------------------------------------------------------------------------\e[m"
|
||||
echo -e "\e[1;34m\| Creating ADE Eclipse Desktop Shortcut...\e[m"
|
||||
echo -e "\e[1;34m--------------------------------------------------------------------------------\e[m"
|
||||
ADE_ECLIPSE_SHORTCUT="ade-eclipse"
|
||||
SHORTCUT_OWNER="${USER}"
|
||||
CREATE_SHORTCUT="true"
|
||||
if [ ! "${SUDO_USER}" = "" ]; then
|
||||
SHORTCUT_OWNER="${SUDO_USER}"
|
||||
fi
|
||||
echo -e "\e[1;34m Creating Shortcut For User: ${SHORTCUT_OWNER}\e[m"
|
||||
|
||||
USER_HOME_DIR="~${SHORTCUT_OWNER}"
|
||||
if [ ! -d ${USER_HOME_DIR} ]; then
|
||||
USER_HOME_DIR="/home/${SHORTCUT_OWNER}"
|
||||
echo " (Assuming User Home Directory Is Under '/home')"
|
||||
fi
|
||||
|
||||
if [ ! -d ${USER_HOME_DIR}/Desktop ]; then
|
||||
echo -e "\e[1;31m ERROR: Unable To Find The User's Desktop!!!"
|
||||
CREATE_SHORTCUT="false"
|
||||
fi
|
||||
|
||||
if [ "${CREATE_SHORTCUT}" = "true" ]; then
|
||||
SHORTCUT_TMP="${USER_HOME_DIR}/Desktop/${ADE_ECLIPSE_SHORTCUT}.tmp"
|
||||
SHORTCUT="${USER_HOME_DIR}/Desktop/${ADE_ECLIPSE_SHORTCUT}.desktop"
|
||||
|
||||
if [ -f ${SHORTCUT} ]; then
|
||||
echo -n " Attempting To Remove The Existing Shortcut ... "
|
||||
sudo -u ${SHORTCUT_OWNER} rm -f ${SHORTCUT}
|
||||
if [ ! -f ${SHORTCUT} ]; then
|
||||
echo -n "SUCCESS"
|
||||
else
|
||||
echo -n "FAILURE"
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
sudo -u ${SHORTCUT_OWNER} touch ${SHORTCUT_TMP}
|
||||
sudo -u ${SHORTCUT_OWNER} chmod 666 ${SHORTCUT_TMP}
|
||||
|
||||
echo "[Desktop Entry]" >> ${SHORTCUT_TMP}
|
||||
echo "Version=1.0" >> ${SHORTCUT_TMP}
|
||||
echo "Encoding=UTF-8" >> ${SHORTCUT_TMP}
|
||||
echo "Name=ADE Eclipse" >> ${SHORTCUT_TMP}
|
||||
echo "GenericName=Eclipse" >> ${SHORTCUT_TMP}
|
||||
echo "Comment=IDE" >> ${SHORTCUT_TMP}
|
||||
echo "Exec=/bin/bash -i -c \"xterm -title 'AWIPS II ADE Eclipse' -e '/awips2/eclipse/eclipseShortcutWrap.sh'\"" >> ${SHORTCUT_TMP}
|
||||
echo "Icon=/awips2/eclipse/icon.xpm" >> ${SHORTCUT_TMP}
|
||||
echo "Terminal=false" >> ${SHORTCUT_TMP}
|
||||
echo "Type=Application" >> ${SHORTCUT_TMP}
|
||||
echo "Categories=Development;IDE;" >> ${SHORTCUT_TMP}
|
||||
|
||||
sudo -u ${SHORTCUT_OWNER} mv ${SHORTCUT_TMP} ${SHORTCUT}
|
||||
sudo -u ${SHORTCUT_OWNER} chmod 644 ${SHORTCUT}
|
||||
fi
|
||||
|
||||
echo -e "\e[1;32m--------------------------------------------------------------------------------\e[m"
|
||||
echo -e "\e[1;32m\| AWIPS II Eclipse Distribution Installation - COMPLETE\e[m"
|
||||
echo -e "\e[1;32m--------------------------------------------------------------------------------\e[m"
|
||||
|
||||
%preun
|
||||
|
||||
%postun
|
||||
|
||||
%clean
|
||||
rm -rf ${RPM_BUILD_ROOT}
|
||||
|
||||
%files
|
||||
%defattr(644,awips,fxalpha,755)
|
||||
%dir /awips2/eclipse
|
||||
/awips2/eclipse/*
|
||||
%defattr(755,awips,fxalpha,755)
|
||||
/awips2/eclipse/about.html
|
||||
/awips2/eclipse/artifacts.xml
|
||||
/awips2/eclipse/eclipse
|
||||
/awips2/eclipse/eclipse.ini
|
||||
/awips2/eclipse/eclipse.sh
|
||||
/awips2/eclipse/eclipseShortcutWrap.sh
|
||||
/awips2/eclipse/epl-v10.html
|
||||
/awips2/eclipse/icon.xpm
|
||||
/awips2/eclipse/libcairo-swt.so
|
||||
/awips2/eclipse/notice.html
|
|
@ -48,4 +48,4 @@ if [ ! -f ${JAVA} ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
$JAVA -jar ${QPID_HOME}/bin/yajsw/wrapper.jar -c ${QPID_HOME}/conf/${CONF_FILE}
|
||||
$JAVA -Xmx32m -XX:MaxPermSize=12m -XX:ReservedCodeCacheSize=4m -jar ${QPID_HOME}/bin/yajsw/wrapper.jar -c ${QPID_HOME}/conf/${CONF_FILE}
|
||||
|
|
|
@ -14,7 +14,7 @@ diff -crB a/qpid-java.spec b/qpid-java.spec
|
|||
!
|
||||
! Name: awips2-qpid-java
|
||||
Version: 0.18
|
||||
! Release: 3%{?dist}
|
||||
! Release: 4%{?dist}
|
||||
Summary: Java implementation of Apache Qpid
|
||||
License: Apache Software License
|
||||
Group: Development/Java
|
||||
|
|
|
@ -400,14 +400,16 @@ if [ "${1}" = "-viz" ]; then
|
|||
buildRPM "awips2"
|
||||
buildRPM "awips2-common-base"
|
||||
#buildRPM "awips2-python-dynamicserialize"
|
||||
buildRPM "awips2-python"
|
||||
buildRPM "awips2-adapt-native"
|
||||
buildRPM "awips2-gfesuite-client"
|
||||
buildRPM "awips2-gfesuite-server"
|
||||
#buildRPM "awips2-python"
|
||||
#buildRPM "awips2-adapt-native"
|
||||
#unpackHttpdPypies
|
||||
#if [ $? -ne 0 ]; then
|
||||
# exit 1
|
||||
#fi
|
||||
#buildRPM "awips2-httpd-pypies"
|
||||
buildRPM "awips2-hydroapps-shared"
|
||||
#buildRPM "awips2-hydroapps-shared"
|
||||
#buildRPM "awips2-rcm"
|
||||
#buildRPM "awips2-tools"
|
||||
#buildRPM "awips2-cli"
|
||||
|
|
Loading…
Add table
Reference in a new issue