diff --git a/cave/com.raytheon.uf.viz.archive/src/com/raytheon/uf/viz/archive/ui/AbstractArchiveDlg.java b/cave/com.raytheon.uf.viz.archive/src/com/raytheon/uf/viz/archive/ui/AbstractArchiveDlg.java index 4f6e103485..3bc0e0706d 100644 --- a/cave/com.raytheon.uf.viz.archive/src/com/raytheon/uf/viz/archive/ui/AbstractArchiveDlg.java +++ b/cave/com.raytheon.uf.viz.archive/src/com/raytheon/uf/viz/archive/ui/AbstractArchiveDlg.java @@ -24,6 +24,10 @@ import java.util.Calendar; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import org.eclipse.core.runtime.IProgressMonitor; +import org.eclipse.core.runtime.IStatus; +import org.eclipse.core.runtime.Status; +import org.eclipse.core.runtime.jobs.Job; import org.eclipse.jface.dialogs.MessageDialog; import org.eclipse.swt.SWT; import org.eclipse.swt.events.SelectionAdapter; @@ -71,6 +75,7 @@ import com.raytheon.viz.ui.dialogs.CaveSWTDialog; * Jul 24, 2013 2220 rferrel Changes to queue size request for all data. * Aug 01, 2013 2221 rferrel Changes for select configuration. * Aug 06, 2013 2222 rferrel Changes to display all selected data. + * Nov 14, 2013 2549 rferrel Get category data moved off the UI thread. * * * @author bgonzale @@ -452,25 +457,57 @@ public abstract class AbstractArchiveDlg extends CaveSWTDialog implements * adjust sizes on the display table. */ protected void populateTableComp() { - String archiveName = getSelectedArchiveName(); - String categoryName = getSelectedCategoryName(); + final String archiveName = getSelectedArchiveName(); + final String categoryName = getSelectedCategoryName(); setCursorBusy(true); - try { - setShowingSelected(false); + setShowingSelected(false); + tableComp.populateTable(archiveName, categoryName, + new ArrayList(0)); + tableComp.refresh(); - List displayDatas = sizeJob.changeDisplay(archiveName, - categoryName); - if (displayDatas != null) { - tableComp - .populateTable(archiveName, categoryName, displayDatas); - } else { - tableComp.refresh(); + Job job = new Job("populate category table") { + + @Override + protected IStatus run(IProgressMonitor monitor) { + getCategoryTableData(archiveName, categoryName); + return Status.OK_STATUS; } - } finally { - setCursorBusy(false); - } + }; + job.schedule(); + } + + /** + * This gets the desired categories data. Assumed called from non-UI thread + * since it is possible getting the data may take time which would hang up + * the UI thread. + * + * @param archiveName + * @param categoryName + */ + private void getCategoryTableData(final String archiveName, + final String categoryName) { + + final List displayDatas = sizeJob.changeDisplay( + archiveName, categoryName); + + VizApp.runAsync(new Runnable() { + + @Override + public void run() { + try { + if (displayDatas != null) { + tableComp.populateTable(archiveName, categoryName, + displayDatas); + } else { + tableComp.refresh(); + } + } finally { + setCursorBusy(false); + } + } + }); } /** diff --git a/cave/com.raytheon.viz.warngen/src/com/raytheon/viz/warngen/util/CurrentWarnings.java b/cave/com.raytheon.viz.warngen/src/com/raytheon/viz/warngen/util/CurrentWarnings.java index 264f20eb60..599a55c3e3 100644 --- a/cave/com.raytheon.viz.warngen/src/com/raytheon/viz/warngen/util/CurrentWarnings.java +++ b/cave/com.raytheon.viz.warngen/src/com/raytheon/viz/warngen/util/CurrentWarnings.java @@ -312,9 +312,24 @@ public class CurrentWarnings { public AbstractWarningRecord getNewestByTracking(String etn, String phensig) { AbstractWarningRecord rval = null; synchronized (officeId) { - List warnings = warningMap.get(toKey( + List keyWarnings = warningMap.get(toKey( phensig, etn)); - if (warnings != null) { + if (keyWarnings != null) { + // filter out "future" warnings. + List warnings = null; + if (SimulatedTime.getSystemTime().isRealTime()) { + warnings = keyWarnings; + } else { + warnings = new ArrayList( + keyWarnings.size()); + long currentTime = TimeUtil.newCalendar().getTimeInMillis(); + for (AbstractWarningRecord warning : keyWarnings) { + if (warning.getIssueTime().getTimeInMillis() <= currentTime) { + warnings.add(warning); + } + } + } + // See if we have a NEW warning for (AbstractWarningRecord warning : warnings) { if (getAction(warning.getAct()) == WarningAction.NEW) { @@ -399,8 +414,7 @@ public class CurrentWarnings { if (warnings != null) { Calendar c = TimeUtil.newCalendar(); c.add(Calendar.MINUTE, -10); - TimeRange t = new TimeRange(c.getTime(), SimulatedTime - .getSystemTime().getTime()); + TimeRange t = new TimeRange(c.getTime(), TimeUtil.newDate()); for (AbstractWarningRecord warning : warnings) { if (t.contains(warning.getIssueTime().getTime())) { @@ -438,8 +452,7 @@ public class CurrentWarnings { ArrayList conProds = new ArrayList(); Calendar c = TimeUtil.newCalendar(); c.add(Calendar.MINUTE, -10); - TimeRange t = new TimeRange(c.getTime(), SimulatedTime - .getSystemTime().getTime()); + TimeRange t = new TimeRange(c.getTime(), TimeUtil.newDate()); for (AbstractWarningRecord warning : warnings) { WarningAction action = getAction(warning.getAct()); if (t.contains(warning.getIssueTime().getTime()) @@ -545,12 +558,20 @@ public class CurrentWarnings { List records = new ArrayList( recordsMap.values()); - // Sort by insert time + // Sort by issue time when null fall back to insert time. Collections.sort(records, new Comparator() { @Override public int compare(AbstractWarningRecord o1, AbstractWarningRecord o2) { - return o1.getInsertTime().compareTo(o2.getInsertTime()); + Calendar c1 = o1.getIssueTime(); + if (c1 == null) { + c1 = o1.getInsertTime(); + } + Calendar c2 = o2.getIssueTime(); + if (c2 == null) { + c2 = o2.getInsertTime(); + } + return c1.compareTo(c2); } }); @@ -602,7 +623,10 @@ public class CurrentWarnings { Map> recordMap = new HashMap>(); for (AbstractWarningRecord rec : newRecords) { - List recs = recordMap.get(rec.getOfficeid()); + // This used the key rec.getOfficeid() which can be null; which + // can drop alerts when more then one new Record. + // Changed to use the same key as the put. + List recs = recordMap.get(rec.getXxxid()); if (recs == null) { recs = new ArrayList(); recordMap.put(rec.getXxxid(), recs); diff --git a/cave/com.raytheon.viz.warnings/src/com/raytheon/viz/warnings/rsc/AbstractWWAResource.java b/cave/com.raytheon.viz.warnings/src/com/raytheon/viz/warnings/rsc/AbstractWWAResource.java index c796d18a46..92e31143b2 100644 --- a/cave/com.raytheon.viz.warnings/src/com/raytheon/viz/warnings/rsc/AbstractWWAResource.java +++ b/cave/com.raytheon.viz.warnings/src/com/raytheon/viz/warnings/rsc/AbstractWWAResource.java @@ -26,6 +26,7 @@ import com.raytheon.uf.common.status.UFStatus; import com.raytheon.uf.common.time.DataTime; import com.raytheon.uf.common.time.SimulatedTime; import com.raytheon.uf.common.time.TimeRange; +import com.raytheon.uf.common.time.util.TimeUtil; import com.raytheon.uf.viz.core.DrawableString; import com.raytheon.uf.viz.core.IGraphicsTarget; import com.raytheon.uf.viz.core.IGraphicsTarget.HorizontalAlignment; @@ -82,6 +83,7 @@ import com.vividsolutions.jts.geom.prep.PreparedGeometryFactory; * Check if geometry is null when inspecting. * Jul 22, 2013 2176 jsanchez Updated the wire frame and text for EMERGENCY warnings. * Sep 4, 2013 2176 jsanchez Made the polygon line width thicker and made regular text not bold. + * Nov 11, 2013 2439 rferrel Changes to prevent getting future warning when in DRT mode. * * * @author jsanchez @@ -128,7 +130,7 @@ public abstract class AbstractWWAResource extends protected static PreparedGeometryFactory pgf = new PreparedGeometryFactory(); /** one hour ahead, entirely arbitrary/magic **/ - private static final long LAST_FRAME_ADJ = (60 * 60 * 1000); + private static final long LAST_FRAME_ADJ = TimeUtil.MILLIS_PER_HOUR; protected String resourceName; @@ -465,13 +467,20 @@ public abstract class AbstractWWAResource extends if (lastFrame) { // use current system time to determine what to display - Date timeToDisplay = SimulatedTime.getSystemTime().getTime(); + Date timeToDisplay = TimeUtil.newDate(); // change frame time frameTime = timeToDisplay; // point paint time to different time paintTime = new DataTime(timeToDisplay); // point framePeriod to new frame - framePeriod = new TimeRange(frameTime, LAST_FRAME_ADJ); + if (SimulatedTime.getSystemTime().isRealTime()) { + framePeriod = new TimeRange(frameTime, LAST_FRAME_ADJ); + } else { + // Prevent getting "future" records by keeping interval in the + // same minute. + framePeriod = new TimeRange(frameTime, + 30 * TimeUtil.MILLIS_PER_SECOND); + } } // check if the warning is cancelled diff --git a/edexOsgi/build.edex/esb/bin/start.sh b/edexOsgi/build.edex/esb/bin/start.sh index a07b6277df..88096cfbab 100644 --- a/edexOsgi/build.edex/esb/bin/start.sh +++ b/edexOsgi/build.edex/esb/bin/start.sh @@ -147,4 +147,4 @@ if [ $DEBUG_FLAG == "on" ]; then echo "To Debug ... Connect to Port: ${EDEX_DEBUG_PORT}." fi -java -jar ${EDEX_HOME}/bin/yajsw/wrapper.jar -c ${EDEX_HOME}/conf/${CONF_FILE} ${WRAPPER_ARGS} +java -Xmx32m -XX:MaxPermSize=12m -XX:ReservedCodeCacheSize=4m -jar ${EDEX_HOME}/bin/yajsw/wrapper.jar -c ${EDEX_HOME}/conf/${CONF_FILE} ${WRAPPER_ARGS} diff --git a/edexOsgi/com.raytheon.edex.ingestsrv/res/spring/persist-ingest.xml b/edexOsgi/com.raytheon.edex.ingestsrv/res/spring/persist-ingest.xml index 23ffbb0dcc..6958949ab7 100644 --- a/edexOsgi/com.raytheon.edex.ingestsrv/res/spring/persist-ingest.xml +++ b/edexOsgi/com.raytheon.edex.ingestsrv/res/spring/persist-ingest.xml @@ -5,79 +5,87 @@ xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd http://camel.apache.org/schema/spring http://camel.apache.org/schema/spring/camel-spring.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-3.1.xsd"> - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + java.lang.Throwable + + + - - - - - - - - + + + + + + + + + java.lang.Throwable + + + - - - - - - - + + + + + + + + java.lang.Throwable + + + - + - - - + + - + - - - - - - - + + + + + + + - \ No newline at end of file + \ No newline at end of file diff --git a/edexOsgi/com.raytheon.edex.ingestsrv/src/com/raytheon/edex/ingestsrv/DupElimSrv.java b/edexOsgi/com.raytheon.edex.ingestsrv/src/com/raytheon/edex/ingestsrv/DupElimSrv.java new file mode 100644 index 0000000000..a14c97fdef --- /dev/null +++ b/edexOsgi/com.raytheon.edex.ingestsrv/src/com/raytheon/edex/ingestsrv/DupElimSrv.java @@ -0,0 +1,131 @@ +/** + * This software was developed and / or modified by Raytheon Company, + * pursuant to Contract DG133W-05-CQ-1067 with the US Government. + * + * U.S. EXPORT CONTROLLED TECHNICAL DATA + * This software product contains export-restricted data whose + * export/transfer/disclosure is restricted by U.S. law. Dissemination + * to non-U.S. persons whether in the United States or abroad requires + * an export license or other authorization. + * + * Contractor Name: Raytheon Company + * Contractor Address: 6825 Pine Street, Suite 340 + * Mail Stop B8 + * Omaha, NE 68106 + * 402.291.0100 + * + * See the AWIPS II Master Rights File ("Master Rights File.pdf") for + * further licensing information. + **/ +package com.raytheon.edex.ingestsrv; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import com.raytheon.uf.common.dataplugin.PluginDataObject; +import com.raytheon.uf.common.dataplugin.annotations.DataURIUtil; +import com.raytheon.uf.common.status.IPerformanceStatusHandler; +import com.raytheon.uf.common.status.IUFStatusHandler; +import com.raytheon.uf.common.status.PerformanceStatus; +import com.raytheon.uf.common.status.UFStatus; +import com.raytheon.uf.common.time.util.ITimer; +import com.raytheon.uf.common.time.util.TimeUtil; +import com.raytheon.uf.common.util.CollectionUtil; +import com.raytheon.uf.edex.database.plugin.PluginDao; +import com.raytheon.uf.edex.database.plugin.PluginFactory; +import com.raytheon.uf.edex.database.query.DatabaseQuery; + +/** + * Checks database for duplicates of data. Does not account for clustering. + * + *
+ * 
+ * SOFTWARE HISTORY
+ * 
+ * Date         Ticket#    Engineer    Description
+ * ------------ ---------- ----------- --------------------------
+ * Nov 11, 2013 2478       rjpeter     Initial creation
+ * 
+ * + * @author rjpeter + * @version 1.0 + */ + +public class DupElimSrv { + private static final IUFStatusHandler statusHandler = UFStatus + .getHandler(DupElimSrv.class); + + private final IPerformanceStatusHandler perfLog = PerformanceStatus + .getHandler("DupElim:"); + + /** + * Checks the passed pdos against database for existence. If duplicates + * found returns a new array containing only the new plugin data objects. If + * an errors occurs the original pdos array will be returned. + * + * @param pluginName + * @param pdos + * @return + */ + public PluginDataObject[] dupElim(PluginDataObject[] pdos) { + if ((pdos == null) || (pdos.length == 0)) { + return new PluginDataObject[0]; + } + + ITimer dupCheckTimer = TimeUtil.getTimer(); + dupCheckTimer.start(); + + int numBefore = pdos.length; + String pluginName = pdos[0].getPluginName(); + + try { + + PluginDao dao = PluginFactory.getInstance() + .getPluginDao(pluginName); + List newPdos = new ArrayList( + pdos.length); + + // TODO: Bulk querying, groups of 100 using IN lists? + for (PluginDataObject pdo : pdos) { + DatabaseQuery dbQuery = new DatabaseQuery(pdo.getClass()); + Map dataUriFields = DataURIUtil + .createDataURIMap(pdo); + for (Map.Entry field : dataUriFields.entrySet()) { + String fieldName = field.getKey(); + // ignore pluginName + if (!DataURIUtil.PLUGIN_NAME_KEY.equals(fieldName)) { + dbQuery.addQueryParam(field.getKey(), field.getValue()); + } + } + + @SuppressWarnings("unchecked") + List dbPdos = (List) dao + .queryByCriteria(dbQuery); + if (CollectionUtil.isNullOrEmpty(dbPdos)) { + newPdos.add(pdo); + } else { + // shouldn't be more than 1 + PluginDataObject dbPdo = dbPdos.get(1); + if ((dbPdo == null) + || !pdo.getDataURI().equals(dbPdo.getDataURI())) { + newPdos.add(pdo); + } + } + } + if (pdos.length != newPdos.size()) { + pdos = newPdos.toArray(new PluginDataObject[newPdos.size()]); + } + } catch (Exception e) { + statusHandler + .error("Error occurred during duplicate elimination processing", + e); + } + dupCheckTimer.stop(); + + perfLog.logDuration(pluginName + ": Eliminated " + + (numBefore - pdos.length) + " of " + numBefore + + " record(s): Time to process", dupCheckTimer.getElapsedTime()); + return pdos; + } +} diff --git a/edexOsgi/com.raytheon.edex.plugin.gfe/src/com/raytheon/edex/plugin/gfe/db/dao/GFEDao.java b/edexOsgi/com.raytheon.edex.plugin.gfe/src/com/raytheon/edex/plugin/gfe/db/dao/GFEDao.java index 619a4f31ff..b8a3f8cd27 100644 --- a/edexOsgi/com.raytheon.edex.plugin.gfe/src/com/raytheon/edex/plugin/gfe/db/dao/GFEDao.java +++ b/edexOsgi/com.raytheon.edex.plugin.gfe/src/com/raytheon/edex/plugin/gfe/db/dao/GFEDao.java @@ -96,6 +96,7 @@ import com.raytheon.uf.edex.database.query.DatabaseQuery; * 08/08/13 DR16485 ryu Remove call to getDatabaseId() from getMaxInsertTimeByDbId() * so new GFE databases aren't accidentally created. * 09/30/2013 #2147 rferrel Changes to archive hdf5 files. + * 11/13/2013 #2517 randerso Added ORDER BY clause to getOverlappingTimes * * * @author bphillip @@ -814,7 +815,8 @@ public class GFEDao extends DefaultPluginDao { .find("SELECT dataTime.validPeriod" + " FROM GFERecord WHERE parmId = ?" + " AND dataTime.validPeriod.start < ?" - + " AND dataTime.validPeriod.end > ?", + + " AND dataTime.validPeriod.end > ?" + + " ORDER BY dataTime.validPeriod.start", new Object[] { parmId, tr.getEnd(), tr.getStart() }); return rval; diff --git a/edexOsgi/com.raytheon.edex.plugin.gfe/src/com/raytheon/edex/plugin/gfe/smartinit/IFPWE.java b/edexOsgi/com.raytheon.edex.plugin.gfe/src/com/raytheon/edex/plugin/gfe/smartinit/IFPWE.java index d9e33c964b..a093e1ddf5 100644 --- a/edexOsgi/com.raytheon.edex.plugin.gfe/src/com/raytheon/edex/plugin/gfe/smartinit/IFPWE.java +++ b/edexOsgi/com.raytheon.edex.plugin.gfe/src/com/raytheon/edex/plugin/gfe/smartinit/IFPWE.java @@ -83,6 +83,10 @@ import com.raytheon.uf.common.util.Pair; * Scalar/VectorGridSlices, refactor * Discrete/WeatherGridSlices builders. * Jun 05, 2013 #2063 dgilling Port history() from A1. + * Nov 11, 2013 #2517 randerso Changed put() to support multiple discontiguous saves + * Added getKeys(tr) to get grid times overlapping a time range + * Removed caching of inventory as it was not being updated when + * grids were updated/deleted * * * @@ -103,8 +107,6 @@ public class IFPWE { private final GridParmInfo gpi; - private List availableTimes; - private final WsId wsId; /** @@ -126,23 +128,41 @@ public class IFPWE { } /** - * Returns the available times of data for the parm + * Returns all available times of data for the parm * - * @return + * @return the time ranges of all available data for the parm */ public List getKeys() { - if (availableTimes == null) { - availableTimes = new ArrayList(); - List times = GridParmManager.getGridInventory(parmId) - .getPayload(); - if (times != null) { - Collections.sort(times); - availableTimes.addAll(times); - } + List availableTimes; + ServerResponse> sr = GridParmManager + .getGridInventory(parmId); + if (sr.isOkay()) { + availableTimes = sr.getPayload(); + } else { + availableTimes = Collections.emptyList(); } return availableTimes; } + /** + * Returns available times of data for the parm that overlap a time range + * + * @param tr + * the desired time range + * @return the time ranges of data that overlap the desired time range + */ + public List getKeys(TimeRange tr) { + List overlappingTimes; + ServerResponse> sr = GridParmManager.getGridInventory( + parmId, tr); + if (sr.isOkay()) { + overlappingTimes = sr.getPayload(); + } else { + overlappingTimes = Collections.emptyList(); + } + return overlappingTimes; + } + /** * Returns the grid parm info * @@ -245,67 +265,71 @@ public class IFPWE { * storage. * * @param inventory - * A Map of TimeRanges to IGridSlices to be saved. Time is the - * slice's valid time. - * @param timeRangeSpan - * The replacement time range of grids to be saved. Must cover - * each individual TimeRange in inventory. + * A Map of TimeRanges to List of IGridSlices. TimeRange is the + * replacement time range * @throws GfeException * If an error occurs while trying to obtain a lock on the * destination database. */ - public void put(LinkedHashMap inventory, - TimeRange timeRangeSpan) throws GfeException { - statusHandler.debug("Getting lock for ParmID: " + parmId + " TR: " - + timeRangeSpan); - ServerResponse> lockResponse = LockManager - .getInstance().requestLockChange( - new LockRequest(parmId, timeRangeSpan, LockMode.LOCK), - wsId, siteId); - if (lockResponse.isOkay()) { - statusHandler.debug("LOCKING: Lock granted for: " + wsId - + " for time range: " + timeRangeSpan); - } else { - statusHandler.error("Could not lock TimeRange " + timeRangeSpan - + " for parm [" + parmId + "]: " + lockResponse.message()); - throw new GfeException("Request lock failed. " - + lockResponse.message()); - } + public void put(LinkedHashMap> inventory) + throws GfeException { - List records = new ArrayList(inventory.size()); - for (Entry entry : inventory.entrySet()) { - GFERecord rec = new GFERecord(parmId, entry.getKey()); - rec.setGridHistory(entry.getValue().getHistory()); - rec.setMessageData(entry.getValue()); - records.add(rec); - } - SaveGridRequest sgr = new SaveGridRequest(parmId, timeRangeSpan, - records); - - try { - ServerResponse sr = GridParmManager.saveGridData( - Arrays.asList(sgr), wsId, siteId); - if (sr.isOkay()) { - SendNotifications.send(sr.getNotifications()); - } else { - statusHandler.error("Unable to save grids for parm [" + parmId - + "] over time range " + timeRangeSpan + ": " - + sr.message()); - } - } finally { - ServerResponse> unLockResponse = LockManager + for (Entry> entry : inventory.entrySet()) { + TimeRange timeRangeSpan = entry.getKey(); + statusHandler.debug("Getting lock for ParmID: " + parmId + " TR: " + + timeRangeSpan); + ServerResponse> lockResponse = LockManager .getInstance().requestLockChange( new LockRequest(parmId, timeRangeSpan, - LockMode.UNLOCK), wsId, siteId); - if (unLockResponse.isOkay()) { - statusHandler.debug("LOCKING: Unlocked for: " + wsId + " TR: " - + timeRangeSpan); + LockMode.LOCK), wsId, siteId); + if (lockResponse.isOkay()) { + statusHandler.debug("LOCKING: Lock granted for: " + wsId + + " for time range: " + timeRangeSpan); } else { - statusHandler.error("Could not unlock TimeRange " - + timeRangeSpan + " for parm [" + parmId + "]: " + statusHandler.error("Could not lock TimeRange " + timeRangeSpan + + " for parm [" + parmId + "]: " + lockResponse.message()); - throw new GfeException("Request unlock failed. " - + unLockResponse.message()); + throw new GfeException("Request lock failed. " + + lockResponse.message()); + } + + List gridSlices = entry.getValue(); + List records = new ArrayList( + gridSlices.size()); + for (IGridSlice slice : gridSlices) { + GFERecord rec = new GFERecord(parmId, slice.getValidTime()); + rec.setGridHistory(slice.getHistory()); + rec.setMessageData(slice); + records.add(rec); + } + SaveGridRequest sgr = new SaveGridRequest(parmId, timeRangeSpan, + records); + + try { + ServerResponse sr = GridParmManager.saveGridData( + Arrays.asList(sgr), wsId, siteId); + if (sr.isOkay()) { + SendNotifications.send(sr.getNotifications()); + } else { + statusHandler.error("Unable to save grids for parm [" + + parmId + "] over time range " + timeRangeSpan + + ": " + sr.message()); + } + } finally { + ServerResponse> unLockResponse = LockManager + .getInstance().requestLockChange( + new LockRequest(parmId, timeRangeSpan, + LockMode.UNLOCK), wsId, siteId); + if (unLockResponse.isOkay()) { + statusHandler.debug("LOCKING: Unlocked for: " + wsId + + " TR: " + timeRangeSpan); + } else { + statusHandler.error("Could not unlock TimeRange " + + timeRangeSpan + " for parm [" + parmId + "]: " + + lockResponse.message()); + throw new GfeException("Request unlock failed. " + + unLockResponse.message()); + } } } } diff --git a/edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/gfe/isc/iscMosaic.py b/edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/gfe/isc/iscMosaic.py index d7e9660e1d..05c6233d13 100644 --- a/edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/gfe/isc/iscMosaic.py +++ b/edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/gfe/isc/iscMosaic.py @@ -1,27 +1,26 @@ ## # This software was developed and / or modified by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with the US Government. -# +# # U.S. EXPORT CONTROLLED TECHNICAL DATA # This software product contains export-restricted data whose # export/transfer/disclosure is restricted by U.S. law. Dissemination # to non-U.S. persons whether in the United States or abroad requires # an export license or other authorization. -# +# # Contractor Name: Raytheon Company # Contractor Address: 6825 Pine Street, Suite 340 # Mail Stop B8 # Omaha, NE 68106 # 402.291.0100 -# +# # See the AWIPS II Master Rights File ("Master Rights File.pdf") for # further licensing information. ## -import os, stat, time, string, bisect, getopt, sys, traceback -import LogStream, iscTime, iscUtil, mergeGrid -#import pupynere as NetCDF +import os, stat, time, string, sys +import iscTime, iscUtil, mergeGrid try: # dev environment from Scientific.IO import NetCDF @@ -48,7 +47,7 @@ from com.raytheon.uf.common.dataplugin.gfe import GridDataHistory_OriginType as from com.raytheon.uf.common.dataplugin.gfe.config import ProjectionData from com.raytheon.uf.common.dataplugin.gfe.config import ProjectionData_ProjectionType as ProjectionType from com.raytheon.uf.common.dataplugin.gfe.db.objects import DatabaseID -from com.raytheon.uf.common.dataplugin.gfe.db.objects import GridLocation +from com.raytheon.uf.common.dataplugin.gfe.db.objects import GridLocation from com.raytheon.uf.common.dataplugin.gfe.slice import DiscreteGridSlice from com.raytheon.uf.common.dataplugin.gfe.slice import ScalarGridSlice from com.raytheon.uf.common.dataplugin.gfe.slice import VectorGridSlice @@ -67,9 +66,9 @@ from com.raytheon.uf.edex.database.cluster import ClusterTask # # Port of iscMosaic.py # -# +# # SOFTWARE HISTORY -# +# # Date Ticket# Engineer Description # ------------ ---------- ----------- -------------------------- # 07/06/09 1995 bphillip Initial Creation. @@ -82,72 +81,289 @@ from com.raytheon.uf.edex.database.cluster import ClusterTask # 05/23/13 1759 dgilling Remove unnecessary imports. # 06/05/13 2063 dgilling Change __siteInDbGrid() to # call IFPWE.history() like A1. -# -# +# 11/05/13 2517 randerso Restructured logging so it coulde be used by WECache +# Changed WECache to limit the number of cached grids kept in memory +# +# - -BATCH_WRITE_COUNT = 20 BATCH_DELAY = 0.0 -ISC_USER="isc" +MAX_CACHE_BYTES = 64 * 1024 * 1024 # 64 MB -class WECache(object): +ISC_USER = "isc" + +logger = None + +## Logging methods ## +def initLogger(logName): + import logging + global logger + logger = iscUtil.getLogger("iscMosaic", logName=logName, logLevel=logging.INFO) + +def printTR(tr): + """ + Format time range for printing (yymmdd_hhmm,yymmdd_hhmm) + Works with list or tuple + + Args: + tr: the time range to format + + Returns: + the formatted time range string + """ + if tr is not None: + format = "%Y%m%d_%H%M" + s = '(' + time.strftime(format, time.gmtime(tr[0])) + ',' + \ + time.strftime(format, time.gmtime(tr[1])) + ')' + return s + else: + return "None" + +def printShortTR(tr): + """ + Format time range for printing (dd/hh,dd/hh) + Works with list or tuple + + Args: + tr: the time range to format + + Returns: + the formatted time range string + """ + if tr is not None: + format = "%d/%H" + s = '(' + time.strftime(format, time.gmtime(tr[0])) + '->' + \ + time.strftime(format, time.gmtime(tr[1])) + ')' + return s + else: + return "None" + +class WECache(object): + """ + Cache representing the grids for a weather element that overlap a time range. + The cache will keep a limited number of grids in memory. This limit is determined + at initialization to be the number of grids that will fit in MAX_CACHE_BYTES (or a minimum of 2). + + This is not a general purpose cache. It's behavior is designed to match the access patterns of iscMosaic + """ def __init__(self, we, tr=None): self._we = we - self._inv = {} - self._invCache = None - - javaInv = self._we.getKeys() + logger.debug("WECache creating: %s", str(self._we.getParmid().toString())) + + gridType = self._we.getGridType() + if gridType == "SCALAR": + bytesPerCell = 4 + elif gridType == "VECTOR": + bytesPerCell = 8 + elif gridType == "WEATHER": + bytesPerCell = 1 + elif gridType == "DISCRETE": + bytesPerCell = 1 + + gloc = self._we.getGpi().getGridLoc() + gridBytes = gloc.getNx().intValue() * gloc.getNy().intValue() * bytesPerCell + + self._maxCacheSize = max(2, MAX_CACHE_BYTES / gridBytes) + self._batchSize = self._maxCacheSize / 2 + logger.debug("WECache max grids: %d, batch grids: %d", self._maxCacheSize, self._batchSize) + + self._inv = {} # Map containing keys for all inventory + self._invCache = None # Cache of inventory sorted by start time + self._dirty = set() # Cache written to but not flushed to disk + self._populated = set() # Grid is currently in the cache + self._loaded = set() # Grid has been loaded into cache at least once + + # get only keys that overlap tr + javaInv = self._we.getKeys(iscUtil.toJavaTimeRange(tr)) pyInv = [] for i in xrange(javaInv.size()): pyInv.append(iscUtil.transformTime(javaInv.get(i))) - # Dont get grids outside of the passed in timerange. - if tr: - tokill = [] - for i, t in enumerate(pyInv): - if not self.overlaps(tr, t): - tokill.append(i) - tokill.reverse() - for i in tokill: - del pyInv[i] + # create unpopulated entries for the entire inventory + for invTr in pyInv: + self._inv[invTr] = None - lst = list(pyInv) - while len(lst): - i = lst[:BATCH_WRITE_COUNT] - javaTRs = ArrayList() - for tr in i: - javaTRs.add(iscUtil.toJavaTimeRange(tr)) - gridsAndHist = self._we.get(javaTRs, True) - for idx, tr in enumerate(i): - pair = gridsAndHist.get(idx) - g = self.__encodeGridSlice(pair.getFirst()) - h = self.__encodeGridHistory(pair.getSecond()) - self._inv[tr] = (g, h) - lst = lst[BATCH_WRITE_COUNT:] - time.sleep(BATCH_DELAY) + # populate first BATCH_READCOUNT grids + if len(pyInv) > self._batchSize: + trList = pyInv[:self._batchSize - 1] + # add on the last tr since it is used by __splitGridsOnProcBoundary + trList.append(pyInv[-1]) + + else: + trList = pyInv + + self.__loadGrids(trList) + + def __loadGrids(self, trList): + javaTRs = ArrayList(len(trList)) + for tr in trList: + javaTRs.add(iscUtil.toJavaTimeRange(tr)) + + gridsAndHist = self._we.get(javaTRs, True) + for idx in range(gridsAndHist.size()): + pair = gridsAndHist.get(idx) + tr = iscUtil.transformTime(pair.getFirst().getValidTime()) + + if tr in self._loaded: + logger.debug("WECache reloading: %s", printTR(tr)) + else: + logger.debug("WECache loading: %s", printTR(tr)) + + g = self.__encodeGridSlice(pair.getFirst()) + h = self.__encodeGridHistory(pair.getSecond()) + self._inv[tr] = (g, h) + self._populated.add(tr) + self._loaded.add(tr) def keys(self): if not self._invCache: self._invCache = tuple(sorted(self._inv.keys(), key=lambda t: t[0])) return self._invCache - def __getitem__(self, key): - try: - return self._inv[key] - except KeyError: - grid = self._we.getItem(iscUtil.toJavaTimeRange(key)) - pyGrid = self.__encodeGridSlice(grid) - history = grid.getGridDataHistory() - pyHist = self.__encodeGridHistory(history) - return (pyGrid, pyHist) + def __getitem__(self, tr): + logger.debug("WECache getting: %s", printTR(tr)) + if tr in self._populated or tr in self._dirty: + return self._inv[tr] + + if self._inv.has_key(tr): + self.__handleCacheMiss(tr) + return self._inv[tr] + else: + return None + + def __handleCacheMiss(self, tr): + """ + This function is called when a time range is requested that is not currently in the cache. + It will load the next batch of unpopulated grids in time range order starting with the time range passed in. + + If the cache does not have room for a batch of grids to be loaded without exceeding the max cache size + the earliest dirty grids (or clean if not enough dirty grids are found) are flushed to disk before reading + the next dash. + + Args: + tr: the missing time range + """ + logger.debug("WECache miss: %s", printTR(tr)) + # determine next batch of keys to read + toRead = self.keys() + toRead = toRead[toRead.index(tr):] + toRead = sorted(set(toRead) - self._populated, key=lambda t: t[0]) + toRead = toRead[:self._batchSize] + + # if not room to read another batch + if len(self._populated) + self._batchSize > self._maxCacheSize: + toFlush = [] + # first flush dirty grids + toFlush += self._populated & self._dirty + + # then flush clean grids if necessary + toFlush += self._populated - self._dirty + + # flush only enough to read a batch + toFlush = sorted(toFlush, key=lambda t: t[0]) + toFlush = toFlush[:self._maxCacheSize - self._batchSize] + self.__flushGrids(toFlush) + + self.__loadGrids(toRead) + + def __flushGrids(self, trList): + """ + Flush a list time ranges from the cache. + Dirty time ranges will be written to disk. + Writes will be done in _batchSize groups + + Args: + trList: the list of time ranges to be flushed + """ + logger.debug("WECache flushing: %d grids", len(trList)) + + saveRequest = LinkedHashMap() + saveList = [] # python time ranges covered by this saveRequest + saveSize = 0 # number of grids in saveRequest + + timeSpan = None # time span if this contiguous batch + gridsToSave = ArrayList(self._batchSize) # grids in this contiguous batch + saveBatch = False + for tr in sorted(trList, key=lambda t: t[0]): + dirty = tr in self._dirty + if dirty: + logger.debug("WECache storing: %s", printTR(tr)) + saveList.append(tr) + + pyGrid, pyHist = self._inv[tr] + if pyGrid is not None: + javaGrid = self.__buildJavaGridSlice(tr, pyGrid, pyHist) + gridsToSave.add(javaGrid) + if timeSpan is None: + timeSpan = [tr[0], tr[1]] + else: + timeSpan[1] = tr [1] + saveBatch = gridsToSave.size() >= self._batchSize + + else: # clean grid + # save contiguous dirty blocks + saveBatch = timeSpan is not None + + # if populated and clean just purge from cache + if tr in self._populated: + logger.debug("WECache purging: %s", printTR(tr)) + self._inv[tr] = None + self._populated.remove(tr) + + if saveBatch: + # add this contiguous batch to saveRequest + logger.debug("WECache saving %d grids in %s", gridsToSave.size(), printTR(timeSpan)) + gridSaveTR = iscUtil.toJavaTimeRange(timeSpan) + saveRequest.put(gridSaveTR, gridsToSave) + timeSpan = None + saveBatch = False + saveSize += gridsToSave.size() + gridsToSave = ArrayList(self._batchSize) + + # if this saveRequest has reached the batchSize save it + if saveSize >= self._batchSize: + try: + self._we.put(saveRequest) + except: + raise + else: # no exceptions on save, clear saved grids from cache + # depopulate save grids + for tr in saveList: + self._inv[tr] = None + self._populated.discard(tr) + self._dirty.remove(tr) + saveRequest.clear() + saveList = [] + saveSize = 0 + + # save partial batch if necessary + if len(saveList): + if timeSpan is not None: + logger.debug("WECache saving %d grids in %s", gridsToSave.size(), printTR(timeSpan)) + gridSaveTR = iscUtil.toJavaTimeRange(timeSpan) + saveRequest.put(gridSaveTR, gridsToSave) + + try: + self._we.put(saveRequest) + except: + raise + else: # no exceptions on save, clear saved grids from cache + # depopulate save grids + for tr in saveList: + self._inv[tr] = None + self._populated.discard(tr) + self._dirty.remove(tr) + + return def __setitem__(self, tr, value): if value is None: + logger.debug("WECache erasing: %s", printTR(tr)) grid = hist = None else: - grid, hist = value - + logger.debug("WECache setting: %s", printTR(tr)) + grid, hist = value + # Remove any overlapping grids tokill = [] for itr in self._inv: @@ -155,53 +371,44 @@ class WECache(object): tokill.append(itr) for i in tokill: del self._inv[i] + self._dirty.discard(i) + self._populated.discard(i) + self._loaded.discard(i) self._invCache = None - - # Now add the new grid if it exists + + # if cache full flush some grids to disk + if len(self._populated) >= self._maxCacheSize: + toFlush = [] + # first flush dirty grids + toFlush += self._populated & self._dirty + + # then flush clean grids if necessary + toFlush += self._populated - self._dirty + + # flush a full batch is possible + toFlush = sorted(toFlush, key=lambda t: t[0]) + toFlush = toFlush[:self._batchSize] + self.__flushGrids(toFlush) + + # Now add the new grid + self._inv[tr] = (grid, hist) + self._dirty.add(tr) + self._loaded.add(tr) + self._invCache = None if grid is not None: - self._inv[tr] = (grid, hist) - self._invCache = None + self._populated.add(tr) def flush(self): - """Actually writes the contents of the WECache to HDF5/DB""" - # get cache inventory in time range order - # we want to write to disk in contiguous time range blocks so we only - # overwrite what we have full sets of grids for. - inv = list(self.keys()) - # Don't believe the grid slices need to be in time order when saving - # but leaving them that way just in case. - gridsToSave = LinkedHashMap() - while inv: - # retrieve the next BATCH of grids to persist - i = inv[:BATCH_WRITE_COUNT] - # pre-compute the replacement TR for the save requests generated by - # IFPWE.put(). - # since the inventory is in order it's the start time of the - # first TR and the end time of the last TR. - gridSaveTR = iscUtil.toJavaTimeRange((i[0][0], i[-1][1])) - for tr in i: - javaTR = iscUtil.toJavaTimeRange(tr) - pyGrid, pyHist = self._inv[tr] - javaHist = self.__buildJavaGridHistory(pyHist) - javaGrid = self.__buildJavaGridSlice(javaTR, pyGrid, javaHist) - gridsToSave.put(javaTR, javaGrid) - self._we.put(gridsToSave, gridSaveTR) - # delete the persisted items from the cache and our copy of the - # inventory - gridsToSave.clear() - for tr in i: - del self._inv[tr] - self._invCache = None - inv = inv[BATCH_WRITE_COUNT:] - time.sleep(BATCH_DELAY) - + """Writes the entire contents of the WECache to HDF5/DB""" + # flush entire inventory + self.__flushGrids(self.keys()) def overlaps(self, tr1, tr2): if (tr1[0] >= tr2[0] and tr1[0] < tr2[1]) or \ - (tr2[0] >= tr1[0] and tr2[0] < tr1[1]): + (tr2[0] >= tr1[0] and tr2[0] < tr1[1]): return True return False - + def __encodeGridSlice(self, grid): gridType = self._we.getGridType() if gridType == "SCALAR": @@ -215,37 +422,40 @@ class WECache(object): for theKey in keys: keyList.append(theKey.toString()) return (grid.__numpy__[0], keyList) - elif gridType =="DISCRETE": + elif gridType == "DISCRETE": keys = grid.getKey() keyList = [] for theKey in keys: keyList.append(theKey.toString()) return (grid.__numpy__[0], keyList) - + def __encodeGridHistory(self, histories): retVal = [] for i in xrange(histories.size()): retVal.append(histories.get(i).getCodedString()) return tuple(retVal) - + def __buildJavaGridSlice(self, tr, grid, history): + javaTR = iscUtil.toJavaTimeRange(tr) + javaHist = self.__buildJavaGridHistory(history) + gridType = self._we.getGridType() if gridType == "SCALAR": - return self._we.buildScalarSlice(tr, grid.astype(numpy.float32), history) + return self._we.buildScalarSlice(javaTR, grid.astype(numpy.float32), javaHist) elif gridType == "VECTOR": - return self._we.buildVectorSlice(tr, grid[0].astype(numpy.float32), grid[1].astype(numpy.float32), history) + return self._we.buildVectorSlice(javaTR, grid[0].astype(numpy.float32), grid[1].astype(numpy.float32), javaHist) elif gridType == "WEATHER": - return self._we.buildWeatherSlice(tr, grid[0].astype(numpy.byte), str(grid[1]), history) + return self._we.buildWeatherSlice(javaTR, grid[0].astype(numpy.byte), str(grid[1]), javaHist) elif gridType == "DISCRETE": - return self._we.buildDiscreteSlice(tr, grid[0].astype(numpy.byte), str(grid[1]), history) - + return self._we.buildDiscreteSlice(javaTR, grid[0].astype(numpy.byte), str(grid[1]), javaHist) + def __buildJavaGridHistory(self, histories): retVal = ArrayList() blankPubTime = "Fcst" in self._we.getParmid().getDbId().toString() for histEntry in histories: javaHist = GridDataHistory(histEntry) # strip out publish time to allow for publishing correctly - # when merging Fcst out of A1 + # when merging Fcst out of A1 if blankPubTime: javaHist.setPublishTime(None) retVal.add(javaHist) @@ -253,16 +463,10 @@ class WECache(object): class IscMosaic: - - ## Logging methods ## - def __initLogger(self): - self.__logger=iscUtil.getLogger("iscMosaic",self.__logFile) - def __init__(self, args): self.__mysite = args['siteID'] self.__userID = args['userID'] self.__db = None # ifpServer database object - self.__dbGrid = None self.__parmsToProcess = args['parmsToProcess'] self.__blankOtherPeriods = args['blankOtherPeriods'] self.__altMask = args['altMask'] @@ -273,7 +477,7 @@ class IscMosaic: self.__iscSends = args['iscSends'] if args['databaseID'] is not None: self.__databaseID = args['databaseID'] - else: + else: self.__databaseID = self.__mysite + "_GRID__ISC_00000000_0000" self.__inFiles = args['inFiles'] self.__ignoreMask = args['ignoreMask'] @@ -282,8 +486,8 @@ class IscMosaic: self.__parmsToIgnore = args['parmsToIgnore'] self.__gridDelay = args['gridDelay'] self.__logFile = args['logFileName'] - - startTime = 0 + + startTime = 0 if args['startTime'] is not None: startTime = self.__decodeTimeString(args['startTime']) endTime = int(2 ** 30 - 1 + 2 ** 30) @@ -291,63 +495,47 @@ class IscMosaic: endTime = self.__decodeTimeString(args['endTime']) self.__processTimePeriod = (startTime, endTime) - self.__initLogger() + initLogger(self.__logFile) - - def logEvent(self,*msg): - self.__logger.info(iscUtil.tupleToString(*msg)) - - def logProblem(self,*msg): - self.__logger.error(iscUtil.tupleToString(*msg)) - - def logException(self,*msg): - self.__logger.exception(iscUtil.tupleToString(*msg)) - - def logVerbose(self,*msg): - self.__logger.debug(iscUtil.tupleToString(*msg)) - - def logDebug(self,*msg): - self.logVerbose(iscUtil.tupleToString(*msg)) - def execute(self): - self.logEvent("iscMosaic Starting") - + logger.info("iscMosaic Starting") + # get the WxDefinition and DiscreteDefinition config = IFPServerConfigManager.getServerConfig(self.__mysite) self.__wxDef = config.getWxDefinition() self.__disDef = config.getDiscreteDefinition() - + self.__db = IFPDB(self.__databaseID) # parms in database - parmsInDb = self.__db.getKeys() - if len(self.__parmsToProcess) == 0: - for i in range(0, parmsInDb.size()): + if len(self.__parmsToProcess) == 0: + parmsInDb = self.__db.getKeys() + for i in range(0, parmsInDb.size()): self.__parmsToProcess.append(parmsInDb.get(i).toString()) - # get office type information + # get office type information self.__myOfficeType = IFPServerConfigManager.getServerConfig(DatabaseID(self.__databaseID).getSiteId()).officeType() - + #process each input file for file in self.__inFiles: self.__areaMask = None self.__processInputFile(file) - + if self.__deleteInput: os.remove(file) - self.logEvent("iscMosaic Finished") - + logger.info("iscMosaic Finished") + def __processInputFile(self, filename): - + a = os.times() cpu0 = a[0] + a[1] start = a[4] - self.logEvent("Processing file=", filename) + logger.info("Processing file=%s", filename) fsize = os.stat(filename)[stat.ST_SIZE] - self.logEvent("Input file size: ", fsize) - + logger.info("Input file size: %d", fsize) + gzipFile = None unzippedFile = None try: @@ -365,63 +553,53 @@ class IscMosaic: if unzippedFile is not None: unzippedFile.close() os.remove(unzippedFile.name) - + a = os.times() cpu = a[0] + a[1] stop1 = a[4] - if hasattr(NetCDF, "netcdf_file"): - # use this for pupynere - - # TODO: Remove False flag passed to constructor to resolve memory - # allocation error found in #7788. If AWIPS2 ever moves to 64-bit - # we'll probably have enough address space to allow the file to be - # memory-mapped. - file = NetCDF.netcdf_file(filename, "r", False) - else: - # use this for ScientificIO.NetCDF - file = NetCDF.NetCDFFile(filename, "r") - + file = NetCDF.NetCDFFile(filename, "r") + # check version fileV = getattr(file, 'fileFormatVersion') if fileV != "20010816" and fileV != "20030117": - self.logProblem("Incompatible file format found") + logger.error("Incompatible file format found") raise Exception, "Incompatible file format" - + # get creation time self.__creTime = getattr(file, 'creationTime') creTimeString = time.asctime(time.gmtime(self.__creTime)) - self.logEvent("CreationTime:" , creTimeString) - + logger.info("CreationTime: %s" , creTimeString) + # get file start/end processing times self.__modProcTime = self.__getFileProcessingTimes(file) if self.__modProcTime is None: return None - self.logEvent("Process TR: ", self.__printTR(self.__modProcTime)) - + logger.info("Process TR: %s", printTR(self.__modProcTime)) + # prepare for the notification message totalTimeRange = None totalGrids = 0 totalFails = 0 pParms = [] self.__adjDataMsg = [] - + # process each parm in the netCDF file # Only use one area mask for all parms. This will break # if we ever use parms with differing dims in a database. areaMask = None inFileVars = file.variables.keys() #parm names - + for parm in inFileVars: - tup = self.__getNetCDFInputVariables(file, parm) - + tup = self.__getNetCDFInputVariables(file, parm) + if tup is None: continue else: parmName = tup[0] vars = tup[1] - remapHistory = tup[2] - + remapHistory = tup[2] + # rename weather element if self.__renameWE: siteID = str(getattr(vars[0], "siteID")) @@ -430,23 +608,22 @@ class IscMosaic: idx = parmName.rfind("_") parmName = parmName[0:idx] + incomingOfficeType + \ parmName[idx:] - self.logEvent("Renamed to: " + parmName + \ - " data from " + siteID) - + logger.info("Renamed to: %s data from %s", parmName, siteID) + # ignore this parm? if parmName in self.__parmsToIgnore: - self.logEvent("Ignoring ", parmName) + logger.info("Ignoring %s", parmName) continue - - # match in ifp database? + + # match in ifp database? if not parmName in self.__parmsToProcess and \ len(self.__parmsToProcess) != 0: - self.logEvent("Skipping", parmName) + logger.info("Skipping %s", parmName) continue - + (pName, pTR, nGrids, nFail) = self.__processParm(parmName, vars, remapHistory, filename) - - # save info for the notification message + + # save info for the notification message pParms.append(pName) if pTR is not None: if totalTimeRange is None: @@ -457,199 +634,201 @@ class IscMosaic: if totalTimeRange[1] < pTR[1]: totalTimeRange = (totalTimeRange[0], pTR[1]) totalGrids = totalGrids + nGrids - totalFails = totalFails + nFail - + totalFails = totalFails + nFail + file.close() - + #announce storage if len(self.__announce) and totalGrids > 0: - msg = self.__announce + self.__siteID + ' ' + `pParms` + ' ' + self.__printShortTR(totalTimeRange) + ' #Grids=' + `totalGrids` + msg = self.__announce + self.__siteID + ' ' + `pParms` + ' ' + printShortTR(totalTimeRange) + ' #Grids=' + `totalGrids` if totalFails: - msg = msg + '[' + `totalFails` + ' FAILED]' - notification = UserMessageNotification(msg, Priority.CRITICAL, "ISC", self.__mysite) - else: - notification = UserMessageNotification(msg, Priority.EVENTA, "ISC", self.__mysite) - self.logEvent(msg) + msg = msg + '[' + `totalFails` + ' FAILED]' + notification = UserMessageNotification(msg, Priority.CRITICAL, "ISC", self.__mysite) + else: + notification = UserMessageNotification(msg, Priority.EVENTA, "ISC", self.__mysite) + logger.info(msg) SendNotifications.send(notification) - + # announce "modified/adjusted" data if len(self.__announce) and len(self.__adjDataMsg): msg = "ISC Data Modified:\n" + "\n".join(self.__adjDataMsg) notification = UserMessageNotification(msg, Priority.EVENTA, "ISC", self.__mysite) SendNotifications.send(notification) - + a = os.times() cpugz = a[0] + a[1] stop = a[4] - self.logEvent("Elapsed/CPU time: ", - "%-.2f" % (stop1 - start), "/", "%-.2f" % (cpu - cpu0), "decompress,", - "%-.2f" % (stop - stop1), "/", "%-.2f" % (cpugz - cpu), "processing,", - "%-.2f" % (stop - start), "/", "%-.2f" % (cpugz - cpu0), "total") - + logger.info("Elapsed/CPU time: " + "%-.2f / %-.2f decompress, " + "%-.2f / %-.2f processing, " + "%-.2f / %-.2f total", + stop1 - start, cpu - cpu0, + stop - stop1, cpugz - cpu, + stop - start, cpugz - cpu0) + + def __processParm(self, parmName, vars, history, filename): - + retries = 5 retryAttempt = 0 pName = string.replace(parmName, "_SFC", "") totalTimeRange = None inTimesProc = [] numFailed = 0 - - while retryAttempt != retries: - self.logDebug("iscMosaic: Attempting to acquire cluster lock for:",parmName) - startTime = time.time() - clusterLock = ClusterLockUtils.lock("ISC Write Lock",parmName , 120000, True) - elapsedTime = (time.time() - startTime)*1000 - self.logDebug("iscMosaic: Request for",parmName+" took",elapsedTime,"ms") - if str(clusterLock.getLockState()) == "SUCCESSFUL": - self.logDebug("iscMosaic: Successfully acquired cluster lock for:",parmName) - try: - # open up the ifpServer weather element - self.__dbwe = self.__db.getItem(parmName,ISC_USER) - self._wec = WECache(self.__dbwe, self.__modProcTime) - self.__rateParm = self.__dbwe.getGpi().isRateParm() - self.__parmName = parmName - - # get general info for the parm from the input file and output db - inGeoDict = self.__getInputGeoInfo(vars[0]) - inFillV = self.__determineFillValue(vars[0]) - - gridType = getattr(vars[0], "gridType") - minV = self.__dbwe.getGpi().getMinValue() - # compute the site mask - self.__siteID = str(getattr(vars[0], "siteID")) - if self.__areaMask is None: - self.__areaMask = self.__computeAreaMask().getGrid().__numpy__[0] + self.__siteID = str(getattr(vars[0], "siteID")) + inTimes = self.__getIncomingValidTimes(vars[0]) + logger.info("Processing %s #Grids=%d Site=%s", parmName, len(inTimes), self.__siteID) - # create the mergeGrid class - mGrid = mergeGrid.MergeGrid(self.__creTime, self.__siteID, inFillV, - minV, self.__areaMask, gridType, self.__dbwe.getDiscreteKeys()) - - # erase all existing grids first? - #self.__dbinv = self.__dbwe.keys() - self.__dbinv = self._wec.keys() - if self.__eraseFirst: - self.__eraseAllGrids(self.__modProcTime) - else: + if self.__eraseFirst or len(inTimes) > 0: + while retryAttempt != retries: + logger.debug("iscMosaic: Attempting to acquire cluster lock for: %s", parmName) + startTime = time.time() + clusterLock = ClusterLockUtils.lock("ISC Write Lock", parmName , 120000, True) + elapsedTime = (time.time() - startTime) * 1000 + logger.debug("iscMosaic: Request for %s took %d ms", parmName, elapsedTime) + if str(clusterLock.getLockState()) == "SUCCESSFUL": + logger.debug("iscMosaic: Successfully acquired cluster lock for: %s", parmName) + try: + # open up the ifpServer weather element + self.__dbwe = self.__db.getItem(parmName, ISC_USER) + self._wec = WECache(self.__dbwe, tr=self.__modProcTime) + self.__rateParm = self.__dbwe.getGpi().isRateParm() + self.__parmName = parmName + + # get general info for the parm from the input file and output db + inGeoDict = self.__getInputGeoInfo(vars[0]) + inFillV = self.__determineFillValue(vars[0]) + + gridType = getattr(vars[0], "gridType") + minV = self.__dbwe.getGpi().getMinValue() + # compute the site mask + + if self.__areaMask is None: + self.__areaMask = self.__computeAreaMask().getGrid().__numpy__[0] + + # create the mergeGrid class + mGrid = mergeGrid.MergeGrid(self.__creTime, self.__siteID, inFillV, + minV, self.__areaMask, gridType, self.__dbwe.getDiscreteKeys()) + + # erase all existing grids first? + self.__dbinv = self._wec.keys() try: self.__splitGridsOnProcBoundary(self.__modProcTime) except: - self.logProblem('Failure to splitGridsOnProcBoundary ', - ' Parm=', parmName, ' Time=', - self.__printTR(self.__modProcTime), traceback.format_exc()) - - # process each incoming grid - inTimes = self.__getIncomingValidTimes(vars[0]) - inTimesProc = [] - numFailed = 0 - self.logEvent("Processing ", parmName, " #Grids=", - len(inTimes), " Site=", self.__siteID) - - # process incoming grids - for i in xrange(len(inTimes)): - # Put in a delay so we don't hammer the server so hard. - if self.__gridDelay > 0.0: - time.sleep(self.__gridDelay) - tr = iscTime.intersection(inTimes[i], self.__modProcTime) - if tr is not None: - inTimesProc.append(tr) - try: - self.logDebug("Processing Grid: ", parmName, \ - " TR=", self.__printTR(tr)) - - # get the grid and remap it - grid = self.__getGridFromNetCDF(gridType, vars, i) - - # if WEATHER or DISCRETE, then validate and adjust keys - if self.__adjustTranslate: - if gridType == "DISCRETE": - grid = self.__validateAdjustDiscreteKeys(grid, - self.__parmName, tr) - elif gridType == "WEATHER": - grid = self.__validateAdjustWeatherKeys(grid, - self.__parmName, tr) - - grid = self.__remap(self.__dbwe.getParmid(), grid, inGeoDict, inFillV) - - # if rate parm, then may need to adjust the values - if self.__rateParm and inTimes[i] != tr: - grid = self.__adjustForTime(inTimes[i], tr, grid, - inFillV) - - # get inventory original inventory - # self.__dbinv = self.__dbwe.getKeys() - # print self.__dbinv - - # merge the grids - self.__processIncomingGrid(parmName, grid, history[i], - mGrid, tr, inFillV) - - except: - - self.logProblem('Failure to process grid in file [', - filename, '] Parm=', parmName, ' Time=', - self.__printTR(tr), traceback.format_exc()) - numFailed = numFailed + 1 - - else: - self.logDebug("Skipping Grid: ", parmName, " TR=", self.__printTR(tr), "outside start/end range") - - - # blank out any gaps - if self.__blankOtherPeriods == 1: - blankTimes = self.__calcBlankingTimes(inTimesProc) - # get updated inventory - - for i in xrange(len(blankTimes)): - tr = iscTime.intersection(blankTimes[i], self.__modProcTime) + logger.exception('Failure to splitGridsOnProcBoundary Parm=%s Time=%s', + parmName, printTR(self.__modProcTime)) + + if self.__eraseFirst: + self.__eraseAllGrids(self.__modProcTime) + + # process each incoming grid + inTimesProc = [] + numFailed = 0 + + # process incoming grids + for i in xrange(len(inTimes)): + # Put in a delay so we don't hammer the server so hard. + if self.__gridDelay > 0.0: + time.sleep(self.__gridDelay) + tr = iscTime.intersection(inTimes[i], self.__modProcTime) if tr is not None: + inTimesProc.append(tr) try: - self.__processBlankTime(mGrid, tr) + logger.debug("Processing Grid: %s TR=%s", parmName, printTR(tr)) + + # get the grid and remap it + grid = self.__getGridFromNetCDF(gridType, vars, i) + + # if WEATHER or DISCRETE, then validate and adjust keys + if self.__adjustTranslate: + if gridType == "DISCRETE": + grid = self.__validateAdjustDiscreteKeys(grid, + self.__parmName, tr) + elif gridType == "WEATHER": + grid = self.__validateAdjustWeatherKeys(grid, + self.__parmName, tr) + + grid = self.__remap(self.__dbwe.getParmid(), grid, inGeoDict, inFillV) + + # if rate parm, then may need to adjust the values + if self.__rateParm and inTimes[i] != tr: + grid = self.__adjustForTime(inTimes[i], tr, grid, + inFillV) + + # merge the grids + self.__processIncomingGrid(parmName, grid, history[i], + mGrid, tr, inFillV) + except: - self.logProblem('Failure to process grid blanking Parm=', parmName, ' Time=', self.__printTR(tr), traceback.format_exc()) - - - - # Returns tuple of (parmName, TR, #grids, #fails) - if len(inTimesProc): - totalTimeRange = (inTimesProc[0][0], inTimesProc[ -1][ -1] - 3600) - self._wec.flush() - - retryAttempt = retries - except: + logger.exception('Failure to process grid in file [%s] Parm=%s Time=%s', + filename, parmName, printTR(tr)) + numFailed = numFailed + 1 + + else: + logger.debug("Skipping Grid: %s TR=%s outside start/end range", + parmName, printTR(tr)) + + + # blank out any gaps + if self.__blankOtherPeriods == 1: + blankTimes = self.__calcBlankingTimes(inTimesProc) + # get updated inventory + + for i in xrange(len(blankTimes)): + tr = iscTime.intersection(blankTimes[i], self.__modProcTime) + if tr is not None: + try: + logger.debug("Processing Blank: %s TR=%s", + parmName, self.__printTR(tr)) + self.__processBlankTime(mGrid, tr) + except: + logger.exception('Failure to process grid blanking Parm=%s Time=%s', + parmName, printTR(tr)) + + + + # Returns tuple of (parmName, TR, #grids, #fails) + if len(inTimesProc): + totalTimeRange = (inTimesProc[0][0], inTimesProc[ -1][ -1] - 3600) + self._wec.flush() + + retryAttempt = retries + except: + retryAttempt = retryAttempt + 1 + logger.exception("Error saving ISC data. Retrying ( %d / %d )", retryAttempt, retries) + time.sleep(1) + finally: + logger.debug("iscMosaic: Attempting to release cluster lock for: %s", parmName) + ClusterLockUtils.unlock(clusterLock, False) + logger.debug("iscMosaic: Successfully released cluster lock for: %s", parmName) + elif str(clusterLock.getLockState()) == "OLD": retryAttempt = retryAttempt + 1 - self.logProblem("Error saving ISC data. Retrying (", retryAttempt, "/", retries, ")",traceback.format_exc()) - time.sleep(1) - finally: - self.logDebug("iscMosaic: Attempting to release cluster lock for:",parmName) + # Clear old lock to retry + logger.debug("Old lock retrieved for ISC write. Attempting to renew lock") ClusterLockUtils.unlock(clusterLock, False) - self.logDebug("iscMosaic: Successfully released cluster lock for:",parmName) - elif str(clusterLock.getLockState()) == "OLD": - retryAttempt = retryAttempt + 1 - # Clear old lock to retry - self.logDebug("Old lock retrieved for ISC write. Attempting to renew lock") - ClusterLockUtils.unlock(clusterLock, False) - elif str(clusterLock.getLockState()) == "FAILED": - retryAttempt = retryAttempt + 1 - if retryAttempt == retries: - self.logProblem("Cluster lock could not be established for ",self._we.getParmid(),"at time range",TimeRange(tr[0],tr[1]),"Data was not saved.") - else: - self.logProblem("Cluster lock request failed for ISC write.", retries, "Retrying (", retryAttempt, "/", retries, ")") - time.sleep(1) + elif str(clusterLock.getLockState()) == "FAILED": + retryAttempt = retryAttempt + 1 + if retryAttempt == retries: + logger.error("Cluster lock could not be established for %s at time range %s Data was not saved.", + self._we.getParmid(), TimeRange(tr[0], tr[1])) + else: + logger.error("Cluster lock request failed for ISC write. Retrying ( %d / %d )", + retryAttempt, retries) + time.sleep(1) return (pName, totalTimeRange, len(inTimesProc), numFailed) - + def __processIncomingGrid(self, parmName, remappedGrid, remappedHistory, mGrid, tr, inFillV): # calculate merge merge = iscTime.mergeTR(tr, self.__dbinv) # get the associated db grids, merge, and store for m in merge: - - self.logDebug("Merge: ", self.__printTR(m[0]), - self.__printTR(m[1]), m[2]) - gotGrid = self.__getDbGrid(m[0]) + + logger.debug("Merge: %s %s %s", printTR(m[0]), + printTR(m[1]), m[2]) + gotGrid = self._wec[m[0]] if gotGrid is not None: destGrid = gotGrid[0] @@ -704,48 +883,15 @@ class IscMosaic: def __storeGrid(self, tr, grid): if grid is not None and grid[1] is not None and grid[0] is not None: - #try: - #self.__dbwe.histSave(tr, grid[0], grid[1]) + logger.debug("Store: %s", printTR(tr)) self._wec[tr] = grid - #except Exception, e: - # self.logProblem("StoreFailed: ", tr, `grid[1]`) - # raise e if tr not in self.__dbinv: self.__dbinv = self._wec.keys() - #self.__dbinv = self.__dbwe.keys() - self.logDebug("Store:", self.__printTR(tr)) else: + logger.debug("Erase: %s", printTR(tr)) self._wec[tr] = None self.__dbinv = self._wec.keys() - #self.__dbwe[tr] = None - #self.__dbinv = self.__dbwe.keys() - self.logDebug("Erase:", self.__printTR(tr)) - - - #--------------------------------------------------------------------- - # get db grid - # Gets the needed database grid - # tr = desired grid, identified by time range - # Returns tuple of (grid, history) (or None if unknown) - #--------------------------------------------------------------------- - def __getDbGrid(self, tr): - if tr is None: - return None - - if self.__dbGrid is None or tr != self.__dbGrid[2]: - self.__dbGrid = None - #grid = self.__dbwe.getGridAndHist(tr) - grid = self._wec[tr] - if grid is not None: - destGrid, history = grid - self.__dbGrid = (destGrid, history, tr) - else: - self.logProblem("Unable to access grid for ", - self.__printTR(tr), "for ", self.__parmName) - return None - - return (self.__dbGrid[0], self.__dbGrid[1]) #--------------------------------------------------------------------- # calculate file start/end processing times @@ -753,7 +899,7 @@ class IscMosaic: # file= netcdf file descriptor object #--------------------------------------------------------------------- def __getFileProcessingTimes(self, file): - + # try: startTime = self.__decodeTimeString(getattr(file, 'startProcTime')) endTime = self.__decodeTimeString(getattr(file, 'endProcTime')) @@ -761,22 +907,9 @@ class IscMosaic: self.__processTimePeriod) if modProcTime is None: - self.logProblem("Skipping file due to non overlapping periods") + logger.error("Skipping file due to non overlapping periods") return modProcTime - - #------------------------------------------------------------------------- - # Get printable time range (yymmdd_hhmm,yymmdd_hhmm) - # Works with list or tuple - #------------------------------------------------------------------------- - def __printTR(self, tr): - if tr is not None: - format = "%Y%m%d_%H%M" - s = '(' + time.strftime(format, time.gmtime(tr[0])) + ',' + \ - time.strftime(format, time.gmtime(tr[1])) + ')' - return s - else: - return "None" - + def __decodeTimeString(self, timeStr): "Create an Integer time from a string: YYYYMMDD_HHMM" @@ -788,12 +921,11 @@ class IscMosaic: except ImportError: importError = True except: - self.logProblem(timeStr, \ - "is not a valid time string. Use YYYYMMDD_HHMM", traceback.format_exc()) + logger.exception("%s is not a valid time string. Use YYYYMMDD_HHMM", timeStr) raise Exception, "Bad date format YYYYMMDD_HHMM" return iscTime.timeFromComponents(intTime) - + #--------------------------------------------------------------------- # get netcdf input variables # Gets the input variables from the netCDF file based on the parm name. @@ -858,7 +990,7 @@ class IscMosaic: else: return None - + #------------------------------------------------------------------------- # Get Geographical Input Information # var is the netCDF variable @@ -916,7 +1048,7 @@ class IscMosaic: ((float(origin[0]), float(origin[1])), (float(extent[0]), float(extent[1]))) return inProjData - + #--------------------------------------------------------------------- # determine fill value for input # vars = netCDF variables @@ -927,10 +1059,10 @@ class IscMosaic: def __determineFillValue(self, var): gridType = getattr(var, "gridType") if gridType == 'SCALAR' or gridType == 'VECTOR': - return - 30000.0 + return -30000.0 else: - return - 127 - + return -127 + #--------------------------------------------------------------------- # compute the area mask # Returns areaMask to use based on the siteID. for ISC data, @@ -942,8 +1074,8 @@ class IscMosaic: #maskDims = (domain.getNy().intValue(), domain.getNx().intValue()) #areaMask = numpy.ones(maskDims) #areaMask.setGloc(domain) - - + + areaMask = ReferenceData(domain, ReferenceID("full"), None, CoordinateType.GRID); areaMask.getGrid(); areaMask.invert(); @@ -953,21 +1085,20 @@ class IscMosaic: areaMask = iscUtil.getEditArea(self.__altMask, self.__mysite) areaMask.setGloc(self.__dbwe.getGpi().getGridLoc()) except: - self.logProblem("Unable to access edit mask [", - self.__altMask, "]", traceback.format_exc()) - raise Exception, "Unknown edit area mask [" + self.__altMask + ']' + logger.exception("Unable to access edit mask [%s]", + self.__altMask) + raise Exception("Unknown edit area mask [%s]" % self.__altMask) else: maskName = "ISC_" + self.__siteID try: areaMask = iscUtil.getEditArea(maskName, self.__mysite) areaMask.setGloc(self.__dbwe.getGpi().getGridLoc()) except: - self.logProblem("Unable to access edit mask [", - maskName, "]", traceback.format_exc()) - raise Exception, "Unknown edit area mask [" + maskName + ']' + logger.exception("Unable to access edit mask [%s]", maskName) + raise Exception("Unknown edit area mask [%s]" % maskName) return areaMask - + #--------------------------------------------------------------------- # split grids on processing time, so to preserve all grids that # overlap partially the processing time @@ -981,7 +1112,7 @@ class IscMosaic: for m in mergeInfo: if m[0] != m[1]: #split grid needed if m[0] != oldTR: - oldGrid = self.__getDbGrid(m[0]) + oldGrid = self._wec[m[0]] oldTR = m[0] if oldGrid is not None: if self.__rateParm: @@ -990,8 +1121,7 @@ class IscMosaic: self.__storeGrid(m[1], (adjGrid, oldGrid[1])) else: self.__storeGrid(m[1], oldGrid) - self.__dbGrid = None - + #------------------------------------------------------------------------- # Get Incoming netCDF file grid valid times # netCDFfile, var is the netCDF variable @@ -1003,7 +1133,7 @@ class IscMosaic: for t in xrange(ntimes): times.append((inTimesA[t * 2], inTimesA[t * 2 + 1])) return times - + #--------------------------------------------------------------------- # get grid from netCDF file. # gridType = type of grid: scalar, vector, weather @@ -1031,13 +1161,13 @@ class IscMosaic: elif gridType == 'WEATHER': compKey = self.__compressKey(vars[1][index, :, :]) grid = (numpy.flipud(vars[0][index]), compKey) - + elif gridType == 'DISCRETE': compKey = self.__compressKey(vars[1][index, :, :]) grid = (numpy.flipud(vars[0][index]), compKey) - + return grid - + #--------------------------------------------------------------------- # scaling changes for incoming grids # var = netCDF variable @@ -1072,50 +1202,50 @@ class IscMosaic: grid = numpy.where(goodDataMask, grid, outFillV) return grid.astype(numpy.float32) - + def __remap(self, pid, grid, inGeoDict, inFillV): gpi = GridParmManager.getGridParmInfo(pid).getPayload() gridType = gpi.getGridType().toString() - + gs = self.__decodeGridSlice(pid, grid, TimeRange()) - + pd = self.__decodeProj(inGeoDict) fill = inFillV ifill = int(inFillV) origin = Coordinate(float(str(inGeoDict['gridDomain'][0][0])), float(str(inGeoDict['gridDomain'][0][1]))) extent = Coordinate(float(str(inGeoDict['gridDomain'][1][0])), float(str(inGeoDict['gridDomain'][1][1]))) - + gl = GridLocation("iscMosaic", pd, self.__getSize(gs), origin, extent, "GMT") mapper = RemapGrid(gl, gpi.getGridLoc()) - - if gridType == 'SCALAR': + + if gridType == 'SCALAR': newGrid = mapper.remap(gs.getScalarGrid(), fill, gpi.getMaxValue(), gpi.getMinValue(), fill) return newGrid.__numpy__[0] - + elif gridType == 'VECTOR': magGrid = Grid2DFloat(gs.getGridParmInfo().getGridLoc().getNx().intValue(), gs.getGridParmInfo().getGridLoc().getNy().intValue()) dirGrid = Grid2DFloat(gs.getGridParmInfo().getGridLoc().getNx().intValue(), gs.getGridParmInfo().getGridLoc().getNy().intValue()) mapper.remap(gs.getMagGrid(), gs.getDirGrid(), fill, gpi.getMaxValue(), gpi.getMinValue(), fill, magGrid, dirGrid) return (magGrid.__numpy__[0], dirGrid.__numpy__[0]) - + elif gridType == 'WEATHER': newGrid = mapper.remap(gs.getWeatherGrid(), fill, fill) return (newGrid.__numpy__[0], grid[1]) - + elif gridType == 'DISCRETE': newGrid = mapper.remap(gs.getDiscreteGrid(), fill, fill) return (newGrid.__numpy__[0], grid[1]) - - def __decodeGridSlice(self, pid, value, tr, history=None): - + + def __decodeGridSlice(self, pid, value, tr, history=None): + gpi = GridParmManager.getGridParmInfo(pid).getPayload() gridType = gpi.getGridType().toString() - + hist = ArrayList() - + if history is None: hist.add(GridDataHistory(OriginType.INITIALIZED, pid, tr)) else: @@ -1126,15 +1256,15 @@ class IscMosaic: if gridType == 'SCALAR': data = Grid2DFloat.createGrid(value.shape[1], value.shape[0], value) slice = ScalarGridSlice(tr, gpi, hist, data) - elif gridType == 'VECTOR': - + elif gridType == 'VECTOR': + magVal = value[0] dirVal = value[1] - + magGrid = Grid2DFloat.createGrid(magVal.shape[1], magVal.shape[0], magVal) dirGrid = Grid2DFloat.createGrid(dirVal.shape[1], dirVal.shape[0], dirVal) slice = VectorGridSlice(tr, gpi, hist, magGrid, dirGrid) - elif gridType == 'WEATHER': + elif gridType == 'WEATHER': data = Grid2DByte.createGrid(value[0].shape[1], value[0].shape[0], value[0]) keyList = ArrayList() for key in value[1]: @@ -1145,7 +1275,7 @@ class IscMosaic: slice.setGridDataHistory(hist) slice.setWeatherGrid(data) slice.setKey(keyList) - elif gridType == 'DISCRETE': + elif gridType == 'DISCRETE': data = Grid2DByte.createGrid(value[0].shape[1], value[0].shape[0], value[0]) keyList = ArrayList() for key in value[1]: @@ -1157,9 +1287,9 @@ class IscMosaic: slice.setDiscreteGrid(data) slice.setKey(keyList) return slice - + def __decodeProj(self, pdata): - + pid = "GrandUnifiedRemappingProj" type = ProjectionType.valueOf(pdata["projectionType"]) llLL = Coordinate(pdata["latLonLL"][0], pdata["latLonLL"][1]) @@ -1172,12 +1302,12 @@ class IscMosaic: lati = pdata["latIntersect"] lonc = pdata["lonCenter"] lono = pdata["lonOrigin"] - + return ProjectionData(pid, type, llLL, llUR, llo, sp1, sp2, gpll, gpur, lati, lonc, lono) - + def __getSize(self, gs): gridType = gs.getGridParmInfo().getGridType().toString() - + if gridType == "SCALAR" or gridType == "VECTOR": return Point(gs.getScalarGrid().getXdim(), gs.getScalarGrid().getYdim()) elif gridType == "WEATHER": @@ -1205,16 +1335,7 @@ class IscMosaic: if len(s) > 0: outKeys.append(s) return outKeys - - def __printShortTR(self, tr): - if tr is not None: - format = "%d/%H" - s = '(' + time.strftime(format, time.gmtime(tr[0])) + '->' + \ - time.strftime(format, time.gmtime(tr[1])) + ')' - return s - else: - return "None" - + #--------------------------------------------------------------------- # adjust for time # Adjusts a rate dependent grid based on time durations. No processing @@ -1244,7 +1365,7 @@ class IscMosaic: return (grid[0], grid[1]) else: return grid - + def __calcBlankingTimes(self, inTimes): out = [] for t in range(len(inTimes)): @@ -1268,7 +1389,7 @@ class IscMosaic: outLimit.append(inter) return outLimit - + def __processBlankTime(self, mGrid, tr): # calculate the merge @@ -1279,7 +1400,7 @@ class IscMosaic: if m[0] != None and m[2] == 1: if self.__siteInDbGrid(m[0]): try: - (destGrid, oldHist) = self.__getDbGrid(m[0]) + (destGrid, oldHist) = self._wec[m[0]] except: destGrid = None oldHist = None @@ -1297,20 +1418,20 @@ class IscMosaic: if tr is None: return None history = self.__dbwe.history(iscUtil.toJavaTimeRange(tr)) - + itr = history.iterator() while itr.hasNext(): h = str(itr.next()) if self.__siteID + "_GRID" in h: return True return False - + #--------------------------------------------------------------------- # validateAdjustDiscreteKeys() # grid = incoming grid (grid, key) # parmName = parm name # tr = time range of grid - # + # # returns 'adjusted grid' with a potentially modified key. The key # is guaranteed to be compatible with the current ifpServer definition. #--------------------------------------------------------------------- @@ -1323,13 +1444,13 @@ class IscMosaic: # set up error message smsg = "Adjusting DiscreteKey for Compatibility: " + parmName + \ - ' tr=' + self.__printTR(tr) + ' tr=' + printTR(tr) # get the list of discrete keys for this parameter that are allowed dd = self.__disDef.keys(parmName) if dd.size() == 0: - self.logProblem("Unable to validate keys for ", - parmName, " - no def in DiscreteDefinition") + logger.error("Unable to validate keys for %s - no def in DiscreteDefinition", + parmName) return grid #now go through the incoming grid's keys and validate each one @@ -1374,31 +1495,29 @@ class IscMosaic: ke = dd.get(0).getSymbol() #use 1st one changedReasons.append("Unknown Key") - + eachKey[idx1] = ke #store back into list keyentry = "^".join(eachKey) #join back to string if len(changedReasons): - self.logProblem(smsg, - "from [" + oldEntry + "] to [" + keyentry + "]", - "(" + ",".join(changedReasons) + ")") - msg = self.__siteID + " " + parmName + " " + \ - self.__printShortTR(tr) + \ - " [" + oldEntry + "] -> [" + keyentry + "] (" + \ - ",".join(changedReasons) + ")" + logger.error("%s from [%s] to [%s] (%s)", + smsg, oldEntry, keyentry, ",".join(changedReasons)) + + msg = "%s %s %s [%s] -> [%s] (%s)" % \ + self.__siteID, parmName, printShortTR(tr), oldEntry, keyentry, ",".join(changedReasons) self.__adjDataMsg.append(msg) key[idx] = keyentry #store back into list - + return (g, key) - + #--------------------------------------------------------------------- # validateAdjustWeatherKeys() # grid = incoming grid (grid, key) # parmName = parm name # tr = time range of grid - # + # # returns 'adjusted grid' #--------------------------------------------------------------------- def __validateAdjustWeatherKeys(self, grid, parmName, tr): @@ -1409,7 +1528,7 @@ class IscMosaic: # set up error message smsg = "Adjusting WeatherKey for Compatibility: " + parmName + \ - ' tr=' + self.__printTR(tr) + ' tr=' + printTR(tr) #now go through the incoming grid's keys and validate each one for idx in xrange(len(key)): #each index of the weather key @@ -1451,7 +1570,7 @@ class IscMosaic: # type is known, validate other components else: de = self.__wxDef.typeIndex(typ) - + # validate coverage if self.__wxDef.coverageIndex(typ, cov) == -1: cov = self.__wxDef.coverageSymbol(de, 0) @@ -1461,7 +1580,7 @@ class IscMosaic: if self.__wxDef.intensityIndex(typ, inten) == -1: inten = self.__wxDef.intensitySymbol(de, 0) changedReasons.append("Unknown Intensity") - + # validate attributes if len(attrs): atts = attrs.split(",") #get individual attributes @@ -1481,42 +1600,22 @@ class IscMosaic: # report any changes if len(changedReasons): - self.logProblem(smsg, - " from [" + oldEntry + "] to [" + keyentry + "]", - "(" + ",".join(changedReasons) + ")") - msg = self.__siteID + " " + parmName + " " + \ - self.__printShortTR(tr) + \ - " [" + oldEntry + "] -> [" + keyentry + "] (" + \ - ",".join(changedReasons) + ")" + logger.error("%s from [%s] to [%s] (%s)", + smsg, oldEntry, keyentry, ",".join(changedReasons)) + + msg = "%s %s %s [%s] -> [%s] (%s)" % \ + self.__siteID, parmName, printShortTR(tr), oldEntry, keyentry, ",".join(changedReasons) + self.__adjDataMsg.append(msg) return (g, key) - + #--------------------------------------------------------------------- # erase all grids from the given weather element over the # processTimePeriod procesTimePeriod = time range to remove grids #--------------------------------------------------------------------- def __eraseAllGrids(self, processTimePeriod): - dbinv = self.__dbinv - mergeInfo = iscTime.mergeTR(processTimePeriod, dbinv) - oldGrid = None - oldTR = None - for m in mergeInfo: - if m[2] == 0: #split grid, don't erase - if m[0] != oldTR: - oldGrid = self.__getDbGrid(m[0]) - oldTR = m[0] - if oldGrid is not None: - if self.__rateParm: - adjGrid = self.__adjustForTime(m[0], m[1], oldGrid[0], - 0.0) #old db grids don'thave missing value flags - self.__storeGrid(m[1], (adjGrid, oldGrid[1])) - else: - self.__storeGrid(m[1], oldGrid) - elif m[2] == 1: #overlaps mean erase - self.__storeGrid(m[1], None) - - self.__dbGrid = None + self.__storeGrid(processTimePeriod, None) def convertList(unknownList): @@ -1527,33 +1626,33 @@ def convertList(unknownList): retVal = JUtil.javaObjToPyVal(unknownList) return retVal -def main(siteID, userID, databaseID, parmsToProcess, blankOtherPeriods, - startTime, endTime, altMask, replaceOnly, eraseFirst, - announce, renameWE, iscSends, inFiles, ignoreMask, - adjustTranslate, deleteInput, parmsToIgnore, gridDelay, logFileName): +def main(siteID, userID, databaseID, parmsToProcess, blankOtherPeriods, + startTime, endTime, altMask, replaceOnly, eraseFirst, + announce, renameWE, iscSends, inFiles, ignoreMask, + adjustTranslate, deleteInput, parmsToIgnore, gridDelay, logFileName): # convert Java types to python and send to IscMosaic for execution parmsToProcess = convertList(parmsToProcess) inFiles = convertList(inFiles) parmsToIgnore = convertList(parmsToIgnore) - argv = {"siteID": siteID, - "userID": userID, + argv = {"siteID": siteID, + "userID": userID, "databaseID": databaseID, - "parmsToProcess": parmsToProcess, - "blankOtherPeriods": bool(blankOtherPeriods), + "parmsToProcess": parmsToProcess, + "blankOtherPeriods": bool(blankOtherPeriods), "startTime": startTime, - "endTime": endTime, + "endTime": endTime, "altMask": altMask, - "replaceOnly": bool(replaceOnly), + "replaceOnly": bool(replaceOnly), "eraseFirst": bool(eraseFirst), - "announce": announce, + "announce": announce, "renameWE": bool(renameWE), - "iscSends": bool(iscSends), + "iscSends": bool(iscSends), "inFiles": inFiles, - "ignoreMask": bool(ignoreMask), + "ignoreMask": bool(ignoreMask), "adjustTranslate": bool(adjustTranslate), - "deleteInput": bool(deleteInput), + "deleteInput": bool(deleteInput), "parmsToIgnore": parmsToIgnore, - "gridDelay": float(gridDelay), + "gridDelay": float(gridDelay), "logFileName": logFileName} mosaic = IscMosaic(argv) mosaic.execute() diff --git a/edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/gfe/isc/iscUtil.py b/edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/gfe/isc/iscUtil.py index 7ada937cfa..e1284f4de3 100644 --- a/edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/gfe/isc/iscUtil.py +++ b/edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/gfe/isc/iscUtil.py @@ -1,30 +1,30 @@ ## # This software was developed and / or modified by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with the US Government. -# +# # U.S. EXPORT CONTROLLED TECHNICAL DATA # This software product contains export-restricted data whose # export/transfer/disclosure is restricted by U.S. law. Dissemination # to non-U.S. persons whether in the United States or abroad requires # an export license or other authorization. -# +# # Contractor Name: Raytheon Company # Contractor Address: 6825 Pine Street, Suite 340 # Mail Stop B8 # Omaha, NE 68106 # 402.291.0100 -# +# # See the AWIPS II Master Rights File ("Master Rights File.pdf") for # further licensing information. ## -import string, IrtAccess, JUtil +import string, IrtAccess, JUtil, logging import xml, pickle, tempfile, os from xml.etree import ElementTree from xml.etree.ElementTree import Element, SubElement import LogStream from datetime import datetime -from time import gmtime,strftime +from time import gmtime, strftime from java.io import File from com.raytheon.uf.common.time import TimeRange from com.raytheon.uf.common.dataplugin.gfe.db.objects import GridLocation @@ -34,68 +34,69 @@ from com.raytheon.edex.plugin.gfe.config import IFPServerConfigManager from com.raytheon.uf.common.serialization import SerializationUtil from com.raytheon.uf.common.localization import LocalizationFile from com.raytheon.uf.common.localization import PathManagerFactory -from com.raytheon.uf.common.localization import LocalizationContext +from com.raytheon.uf.common.localization import LocalizationContext from com.raytheon.uf.common.localization import LocalizationContext_LocalizationType as LocalizationType from com.raytheon.uf.common.localization import LocalizationContext_LocalizationLevel as LocalizationLevel # # Utility module of isc functions -# +# # SOFTWARE HISTORY -# +# # Date Ticket# Engineer Description # ------------ ---------- ----------- -------------------------- # 07/06/09 1995 bphillip Initial Creation. # 02/19/13 1637 randerso Removed unused import # 03/11/13 1759 dgilling Move siteConfig import into # methods where it's needed. -# -# +# 11/07/13 2517 randerso Allow getLogger to override logLevel +# +# # -def getEditArea(name, siteID): - +def getEditArea(name, siteID): + pathMgr = PathManagerFactory.getPathManager(); - + commonStaticConfig = pathMgr.getContext(LocalizationType.COMMON_STATIC, LocalizationLevel.SITE) commonStaticConfig.setContextName(siteID) - file = pathMgr.getFile(commonStaticConfig,"gfe/editAreas"+ File.separator + name + ".xml") - + file = pathMgr.getFile(commonStaticConfig, "gfe/editAreas" + File.separator + name + ".xml") + if not os.path.exists(file.getPath()): commonStaticConfig = pathMgr.getContext(LocalizationType.COMMON_STATIC, LocalizationLevel.CONFIGURED) commonStaticConfig.setContextName(siteID) - file = pathMgr.getFile(commonStaticConfig,"gfe/editAreas"+ File.separator + name + ".xml") - - refData = None - + file = pathMgr.getFile(commonStaticConfig, "gfe/editAreas" + File.separator + name + ".xml") + + refData = None + try: if os.path.exists(file.getPath()): refData = SerializationUtil.jaxbUnmarshalFromXmlFile(file.getPath()); else: - LogStream.logProblem("EDIT AREA NOT FOUND: ",name," for site ",siteID) + LogStream.logProblem("EDIT AREA NOT FOUND: ", name, " for site ", siteID) except: LogStream.logProblem("Unable to unmarshal " + name + " in iscExtract") - + return refData def saveEditAreaGrid(maskName, iscMask, siteID): iscMask.getPolygons(CoordinateType.LATLON); - + pathMgr = PathManagerFactory.getPathManager(); commonStaticConfig = pathMgr.getContext(LocalizationType.COMMON_STATIC, LocalizationLevel.CONFIGURED) commonStaticConfig.setContextName(siteID) - sitePath = pathMgr.getFile(commonStaticConfig,"gfe/editAreas").getPath() + sitePath = pathMgr.getFile(commonStaticConfig, "gfe/editAreas").getPath() editAreaPath = str(sitePath) + "/" + maskName + ".xml" SerializationUtil.jaxbMarshalToXmlFile(iscMask, editAreaPath) -def deleteEditArea(name, siteID): +def deleteEditArea(name, siteID): pathMgr = PathManagerFactory.getPathManager() commonStaticConfig = pathMgr.getContext(LocalizationType.COMMON_STATIC, LocalizationLevel.CONFIGURED) commonStaticConfig.setContextName(siteID) - file = pathMgr.getFile(commonStaticConfig,"gfe/editAreas"+ File.separator + name + ".xml") + file = pathMgr.getFile(commonStaticConfig, "gfe/editAreas" + File.separator + name + ".xml") file.delete() - -def transformTime(tr): + +def transformTime(tr): return (int(tr.getStart().getTime() / 1000), int(tr.getEnd().getTime() / 1000)) def toJavaTimeRange(tr): @@ -106,7 +107,7 @@ def swapCoord(coord): coord.y = coord.x coord.x = temp return coord - + def serverBoxText(server): #returns text based on the server dictionary that should be placed #into the dialog @@ -117,13 +118,13 @@ def serverBoxText(server): hostport = server['host'] + "-primary" elif server['port'] == "98000001": hostport = server['host'] + "-svcbu" - + if hostport is None: hostport = server['host'] + "/" + server['port'] return server['site'] + "-> " + hostport + "@" + \ server['mhsid'].lower() - + def sortServers(a, b): # sort function for the list of servers. Sorts in priority order for # most likely to have the data. Order is: @@ -135,15 +136,15 @@ def sortServers(a, b): sameSiteA = (a['mhsid'] == a['site']) sameSiteB = (b['mhsid'] == b['site']) if sameSiteA and not sameSiteB: - return - 1 + return -1 elif not sameSiteA and sameSiteB: - return 1 + return 1 #both are same sites, check for host next else: regHostA = (a['host'][0:3] in ['dx4', 'px3']) regHostB = (b['host'][0:3] in ['dx4', 'px3']) if regHostA and not regHostB: - return - 1 + return -1 elif not regHostA and regHostB: return 1 # same host, but not preferred host @@ -151,11 +152,11 @@ def sortServers(a, b): regPortA = (a['port'] == "98000000") regPortB = (b['port'] == "98000000") if regPortA and not regPortB: - return - 1 + return -1 elif not regPortA and regPortB: return 1 return 1 #must be non-standard, put at end of list - + def createDomainDict(xml): irt = IrtAccess.IrtAccess("") #decodes the packet of information from the ISC_REQUEST_QUERY call @@ -171,7 +172,7 @@ def createDomainDict(xml): return None if serversE.tag != "servers": LogStream.logEvent('servers tag not found in createDomainDict') - return None #invalid xml + return None #invalid xml #decode XML and create dictionary and parms list domains = {} @@ -185,7 +186,7 @@ def createDomainDict(xml): if name == "site": site = value break - if site is None: + if site is None: LogStream.logProblem('Malformed domain site XML') continue for addressE in domainE.getchildren(): @@ -196,62 +197,62 @@ def createDomainDict(xml): list.append(info) guiText = serverBoxText(info) serverDictT2S[guiText] = info - serverDictS2T[str(info)] = guiText + serverDictS2T[str(info)] = guiText list.sort(sortServers) domains[site] = list - elif domainE.tag == "welist": + elif domainE.tag == "welist": for parmE in domainE.getchildren(): - welist.append(parmE.text) + welist.append(parmE.text) welist.sort() - + retVal = {} retVal['serverDictS2T'] = serverDictS2T retVal['serverDictT2S'] = serverDictT2S retVal['domains'] = domains - - tempfile.tempdir = "/tmp/" + + tempfile.tempdir = "/tmp/" fname = tempfile.mktemp(".bin") FILE = open(fname, "w") pickle.dump(retVal, FILE) FILE.close() - + FILE = open(fname, "r") lines = FILE.readlines() FILE.close() os.remove(fname) - + pickledFile = "" for line in lines: pickledFile += line - + return pickledFile - + def unPickle(str): - import pickle,tempfile,os,JUtil - tempfile.tempdir = "/tmp/" + import pickle, tempfile, os, JUtil + tempfile.tempdir = "/tmp/" fname = tempfile.mktemp(".bin") - FILE = open(fname,"w") + FILE = open(fname, "w") FILE.write(str) FILE.close() - - FILE = open(fname,"r") + + FILE = open(fname, "r") retVal = pickle.load(FILE) FILE.close() return retVal - -def getRequestXML(xml,selectedServers, selectedWEList): + +def getRequestXML(xml, selectedServers, selectedWEList): irt = IrtAccess.IrtAccess("") selectedServers = JUtil.javaStringListToPylist(selectedServers) selectedWElist = JUtil.javaStringListToPylist(selectedWEList) - - response = unPickle(createDomainDict(xml)) + + response = unPickle(createDomainDict(xml)) serverDictT2S = response['serverDictT2S'] domainDict = response['domains'] - + iscReqE = Element('iscrequest') - servers = [] + servers = [] for serverT in selectedServers: server = serverDictT2S[serverT] servers.append(server) @@ -275,46 +276,44 @@ def getRequestXML(xml,selectedServers, selectedWEList): # send to ifpServer xmlreq = ElementTree.tostring(iscReqE) - + return xmlreq; -def getLogger(scriptName, logName=None): - import logging - # be relocating this import here we allow +def getLogger(scriptName, logName=None, logLevel=logging.INFO): + # be relocating this import here we allow # com.raytheon.edex.plugin.gfe.isc.IscScript to dynamically # modify its include path with the proper siteConfig just before # execution time import siteConfig - + if logName is None: - logPath=siteConfig.GFESUITE_LOGDIR+"/"+strftime("%Y%m%d", gmtime()) - logName=scriptName+".log" + logPath = siteConfig.GFESUITE_LOGDIR + "/" + strftime("%Y%m%d", gmtime()) + logName = scriptName + ".log" else: - logPath=os.path.dirname(logName) - if len(logPath)==0: - logPath=siteConfig.GFESUITE_LOGDIR+"/"+strftime("%Y%m%d", gmtime()) - logName=os.path.basename(logName) - - logFile=logPath+"/"+logName + logPath = os.path.dirname(logName) + if len(logPath) == 0: + logPath = siteConfig.GFESUITE_LOGDIR + "/" + strftime("%Y%m%d", gmtime()) + logName = os.path.basename(logName) + + logFile = logPath + "/" + logName if not os.path.exists(logPath): os.makedirs(logPath) - + theLog = logging.getLogger(scriptName) - theLog.setLevel(logging.INFO) + theLog.setLevel(logLevel) ch = logging.FileHandler(logFile) - - ch.setLevel(logging.INFO) + + ch.setLevel(logLevel) formatter = logging.Formatter("%(levelname)s %(asctime)s [%(process)d:%(thread)d] %(filename)s: %(message)s") ch.setFormatter(formatter) for h in theLog.handlers: theLog.removeHandler(h) theLog.addHandler(ch) return theLog - + def tupleToString(*msg): - concatMsg="" + concatMsg = "" for m in msg: - concatMsg=concatMsg+" "+str(m) + concatMsg = concatMsg + " " + str(m) return concatMsg - \ No newline at end of file diff --git a/edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/gfe/isc/mergeGrid.py b/edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/gfe/isc/mergeGrid.py index b8f7b946d3..709dca3594 100644 --- a/edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/gfe/isc/mergeGrid.py +++ b/edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/gfe/isc/mergeGrid.py @@ -1,19 +1,19 @@ ## # This software was developed and / or modified by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with the US Government. -# +# # U.S. EXPORT CONTROLLED TECHNICAL DATA # This software product contains export-restricted data whose # export/transfer/disclosure is restricted by U.S. law. Dissemination # to non-U.S. persons whether in the United States or abroad requires # an export license or other authorization. -# +# # Contractor Name: Raytheon Company # Contractor Address: 6825 Pine Street, Suite 340 # Mail Stop B8 # Omaha, NE 68106 # 402.291.0100 -# +# # See the AWIPS II Master Rights File ("Master Rights File.pdf") for # further licensing information. ## @@ -30,14 +30,15 @@ import LogStream, fcntl # Vector: ((magGrid, dirGrid), history) # Weather: ((byteGrid, key), history) # Discrete: ((byteGrid, key), history) -# +# # SOFTWARE HISTORY -# +# # Date Ticket# Engineer Description # ------------ ---------- ----------- -------------------------- # 07/06/09 1995 bphillip Initial Creation. -# -# +# 11/05/13 2517 randerso Improve memory utilization +# +# # @@ -54,7 +55,7 @@ class MergeGrid: # gridType = 'SCALAR', 'VECTOR', 'WEATHER', 'DISCRETE' #--------------------------------------------------------------------- def __init__(self, creationTime, siteID, inFillValue, outFillValue, - areaMask, gridType, discreteKeys = None): + areaMask, gridType, discreteKeys=None): self.__creationTime = creationTime self.__siteID = siteID self.__inFillV = inFillValue @@ -91,13 +92,13 @@ class MergeGrid: gridB = wxB[0] key = wxA[1] newGrid = numpy.zeros_like(gridB) - + for k in range(len(wxB[1])): index = self.__findKey(wxB[1][k], key) - newGrid = numpy.where(gridB == k, index, newGrid) - + newGrid[gridB == k] = index + return (key, wxA[0], newGrid) - + #--------------------------------------------------------------------- # update history strings @@ -107,17 +108,17 @@ class MergeGrid: # returns None if no history is present. #--------------------------------------------------------------------- def __updateHistoryStrings(self, historyA, historyB): - + out = [] - - # removal any old entry + + # removal any old entry if historyB is not None: for h in historyB: - index = string.find(h, ":"+ self.__siteID + "_GRID") + index = string.find(h, ":" + self.__siteID + "_GRID") if index == -1: - out.append(h) - - # if add mode, add in new entries + out.append(h) + + # if add mode, add in new entries if historyA is not None: for h in historyA: out.append(h) @@ -125,33 +126,33 @@ class MergeGrid: if len(out) > 0: return out else: - return None + return None #--------------------------------------------------------------------- # merge scalar grid # Note: gridA can be None, which indicates that the data # is to be blanked out, i.e., made invalid. gridB can also be # none, which indicates that there is no destination grid and one must - # be created. + # be created. #--------------------------------------------------------------------- def __mergeScalarGrid(self, gridA, gridB): - if gridA is None and gridB is None: + if gridA is None and gridB is None: return None - + # merge the grids if gridA is not None: - inMask = numpy.not_equal(gridA, self.__inFillV) - mask = numpy.logical_and(inMask, self.__areaMask) - + mask = numpy.not_equal(gridA, self.__inFillV) + numpy.logical_and(mask, self.__areaMask, mask) + if gridB is None: - gridB = numpy.zeros(gridA.shape) + self.__outFillV - return numpy.where(mask, gridA, gridB) - + return numpy.where(mask, gridA, self.__outFillV) + else: + return numpy.where(mask, gridA, gridB) + # blank out the data else: - blankGrid = numpy.zeros(gridB.shape) + self.__outFillV - return numpy.where(self.__areaMask, blankGrid, gridB) - + return numpy.where(self.__areaMask, self.__outFillV, gridB) + #--------------------------------------------------------------------- # merge vector grid # Note: gridA can be None, which indicates that the data @@ -159,50 +160,47 @@ class MergeGrid: # none, which indicates that there is no destination grid and one must # be created. #--------------------------------------------------------------------- - def __mergeVectorGrid(self, gridA, gridB): + def __mergeVectorGrid(self, gridA, gridB): if gridA is None and gridB is None: return None - + # merge the grids if gridA is not None: - inMask = numpy.not_equal(gridA[0], self.__inFillV) - mask = numpy.logical_and(inMask, self.__areaMask) - + mask = numpy.not_equal(gridA[0], self.__inFillV) + numpy.logical_and(mask, self.__areaMask, mask) + if gridB is None: - gridSize = gridA[0].shape - gridB = (numpy.zeros(gridSize) + self.__outFillV, - numpy.zeros(gridSize) + 0.0) - - magGrid = numpy.where(mask, gridA[0], gridB[0]) - dirGrid = numpy.where(mask, gridA[1], gridB[1]) + magGrid = numpy.where(mask, gridA[0], self.__outFillV) + dirGrid = numpy.where(mask, gridA[1], 0.0) + else: + magGrid = numpy.where(mask, gridA[0], gridB[0]) + dirGrid = numpy.where(mask, gridA[1], gridB[1]) return (magGrid, dirGrid) - + # blank out the data else: - blankGrid = numpy.zeros(gridB[0].shape) + self.__outFillV - blankDirGrid = numpy.zeros_like(gridB[1]) - magGrid = numpy.where(self.__areaMask, blankGrid, gridB[0]) - dirGrid = numpy.where(self.__areaMask, blankDirGrid, gridB[1]) + magGrid = numpy.where(self.__areaMask, self.__outFillV, gridB[0]) + dirGrid = numpy.where(self.__areaMask, 0.0, gridB[1]) return (magGrid, dirGrid) - - + + #--------------------------------------------------------------------- # merge weather grid # # Note the outFillV is ignored for now, all out-of-bounds points will # get the value. #--------------------------------------------------------------------- - def __mergeWeatherGrid(self, gridA, gridB): - - if gridA is None and gridB is None: + def __mergeWeatherGrid(self, gridA, gridB): + + if gridA is None and gridB is None: return None - + noWx = "::::" # merge the grids if gridA is not None: - inMask = numpy.not_equal(gridA[0], self.__inFillV) - mask = numpy.logical_and(inMask, self.__areaMask) - + mask = numpy.not_equal(gridA[0], self.__inFillV) + numpy.logical_and(mask, self.__areaMask, mask) + if gridB is None: #make an empty grid noWxKeys = [] noWxGrid = numpy.empty_like(gridA[0]) @@ -211,15 +209,15 @@ class MergeGrid: (commonkey, remapG, dbG) = self.__commonizeKey(gridA, gridB) mergedGrid = numpy.where(mask, remapG, dbG) return (mergedGrid, commonkey) - + # blank out the data - else: + else: blankGrid = numpy.empty_like(gridB[0]) blankGrid.fill(self.__findKey(noWx, gridB[1])) key = gridB[1] grid = numpy.where(self.__areaMask, blankGrid, gridB[0]) return (grid, key) - + #--------------------------------------------------------------------- # merge discrete grid # @@ -231,23 +229,23 @@ class MergeGrid: return None noKey = self.__discreteKeys[0] - + # merge the grids if gridA is not None: - inMask = numpy.not_equal(gridA[0], self.__inFillV) - mask = numpy.logical_and(inMask, self.__areaMask) - + mask = numpy.not_equal(gridA[0], self.__inFillV) + numpy.logical_and(mask, self.__areaMask) + if gridB is None: #make an empty grid noKeys = [] noGrid = numpy.empty_like(gridA[0]) noGrid.fill(self.__findKey(noKey, noKeys)) gridB = (noGrid, noKeys) - + (commonkey, remapG, dbG) = \ self.__commonizeKey(gridA, gridB) mergedGrid = numpy.where(mask, remapG, dbG) return (mergedGrid, commonkey) - + # blank out the data else: blankGrid = numpy.empty_like(gridB[0]) @@ -255,7 +253,7 @@ class MergeGrid: key = gridB[1] grid = numpy.where(self.__areaMask, blankGrid, gridB[0]) return (grid, key) - + #--------------------------------------------------------------------- # mergeGrid # Merges the grid @@ -270,8 +268,8 @@ class MergeGrid: # none, which indicates that there is no destination grid and one must # be created. #--------------------------------------------------------------------- - def mergeGrid(self, gridAIn, gridBIn): - # merge the grids + def mergeGrid(self, gridAIn, gridBIn): + # merge the grids if gridAIn is not None: gridA = gridAIn[0] historyA = gridAIn[1] @@ -279,28 +277,28 @@ class MergeGrid: gridA = None historyA = None if gridBIn is not None: - gridB = gridBIn[0] + gridB = gridBIn[0] historyB = gridBIn[1] else: gridB = None historyB = None - if self.__gridType == 'SCALAR': - mergedGrid = self.__mergeScalarGrid(gridA, gridB) - + if self.__gridType == 'SCALAR': + mergedGrid = self.__mergeScalarGrid(gridA, gridB) + elif self.__gridType == 'VECTOR': mergedGrid = self.__mergeVectorGrid(gridA, gridB) - + elif self.__gridType == 'WEATHER': mergedGrid = self.__mergeWeatherGrid(gridA, gridB) - + elif self.__gridType == 'DISCRETE': mergedGrid = self.__mergeDiscreteGrid(gridA, gridB) - + else: mergedGrid = None # merge History history = self.__updateHistoryStrings(historyA, historyB) - + return (mergedGrid, history) diff --git a/edexOsgi/com.raytheon.edex.plugin.obs/res/spring/obs-ingest.xml b/edexOsgi/com.raytheon.edex.plugin.obs/res/spring/obs-ingest.xml index 5bec4f52ae..f797a17d03 100644 --- a/edexOsgi/com.raytheon.edex.plugin.obs/res/spring/obs-ingest.xml +++ b/edexOsgi/com.raytheon.edex.plugin.obs/res/spring/obs-ingest.xml @@ -1,67 +1,53 @@ - - - - - - + - - - - + - + + + + + + + - - + + - - - - - - - - obs - - - - - - - - - - - - - - java.lang.Throwable - - - - - + + + + + + + obs + + + + + + + + + + + + + + + java.lang.Throwable + + + + + \ No newline at end of file diff --git a/edexOsgi/com.raytheon.edex.plugin.sfcobs/res/spring/sfcobs-ingest.xml b/edexOsgi/com.raytheon.edex.plugin.sfcobs/res/spring/sfcobs-ingest.xml index a8d9d8c731..f27a884f78 100644 --- a/edexOsgi/com.raytheon.edex.plugin.sfcobs/res/spring/sfcobs-ingest.xml +++ b/edexOsgi/com.raytheon.edex.plugin.sfcobs/res/spring/sfcobs-ingest.xml @@ -1,68 +1,52 @@ - - - + + - - - - - - + + + + + - - + + - - - - - - - - sfcobs - - - - - + + + + + sfcobs + + + + + + - - + + - - - java.lang.Throwable - - - - - - + + + java.lang.Throwable + + + +
+
\ No newline at end of file diff --git a/edexOsgi/com.raytheon.uf.common.dataplugin/src/com/raytheon/uf/common/dataplugin/annotations/DataURIUtil.java b/edexOsgi/com.raytheon.uf.common.dataplugin/src/com/raytheon/uf/common/dataplugin/annotations/DataURIUtil.java index 7bd40cbdcf..195770cba4 100644 --- a/edexOsgi/com.raytheon.uf.common.dataplugin/src/com/raytheon/uf/common/dataplugin/annotations/DataURIUtil.java +++ b/edexOsgi/com.raytheon.uf.common.dataplugin/src/com/raytheon/uf/common/dataplugin/annotations/DataURIUtil.java @@ -63,7 +63,7 @@ import com.raytheon.uf.common.util.ConvertUtil; */ public class DataURIUtil { - private static final String PLUGIN_NAME_KEY = "pluginName"; + public static final String PLUGIN_NAME_KEY = "pluginName"; private static final String FIELD_SEPARATOR = "."; diff --git a/edexOsgi/com.raytheon.uf.edex.archive/resources/com.raytheon.uf.edex.archive.cron.properties b/edexOsgi/com.raytheon.uf.edex.archive/resources/com.raytheon.uf.edex.archive.cron.properties index 5cdaa2a6c6..3a69251ba6 100644 --- a/edexOsgi/com.raytheon.uf.edex.archive/resources/com.raytheon.uf.edex.archive.cron.properties +++ b/edexOsgi/com.raytheon.uf.edex.archive/resources/com.raytheon.uf.edex.archive.cron.properties @@ -6,6 +6,8 @@ archive.cron=0+40+*+*+*+? archive.purge.enable=true # purge archives archive.purge.cron=0+5+0/3+*+*+? +# compress database records +archive.compression.enable=true # to disable a specific archive, use property archive.disable=pluginName,pluginName... #archive.disable=grid,text,acars \ No newline at end of file diff --git a/edexOsgi/com.raytheon.uf.edex.archive/src/com/raytheon/uf/edex/archive/DatabaseArchiver.java b/edexOsgi/com.raytheon.uf.edex.archive/src/com/raytheon/uf/edex/archive/DatabaseArchiver.java index 056bc09950..74d256d8cd 100644 --- a/edexOsgi/com.raytheon.uf.edex.archive/src/com/raytheon/uf/edex/archive/DatabaseArchiver.java +++ b/edexOsgi/com.raytheon.uf.edex.archive/src/com/raytheon/uf/edex/archive/DatabaseArchiver.java @@ -19,6 +19,8 @@ **/ package com.raytheon.uf.edex.archive; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; import java.io.BufferedWriter; import java.io.File; import java.io.FileInputStream; @@ -80,7 +82,8 @@ import com.raytheon.uf.edex.database.plugin.PluginFactory; * Jan 18, 2013 1469 bkowal Removed the hdf5 data directory. * Oct 23, 2013 2478 rferrel Make date format thread safe. * Add debug information. - * Nov 05, 2013 2499 rjpeter Repackaged, removed config files, always compresses. + * Nov 05, 2013 2499 rjpeter Repackaged, removed config files, always compresses hdf5. + * Nov 11, 2013 2478 rjpeter Updated data store copy to always copy hdf5. * * * @author rjpeter @@ -114,12 +117,17 @@ public class DatabaseArchiver implements IPluginArchiver { /** Cluster time out on lock. */ private static final int CLUSTER_LOCK_TIMEOUT = 60000; + /** Chunk size for I/O Buffering and Compression */ + private static final int CHUNK_SIZE = 8192; + /** Mapping for plug-in formatters. */ private final Map pluginArchiveFormatters; /** When true dump the pdos. */ private final boolean debugArchiver; + private final boolean compressDatabaseFiles; + /** * The constructor. */ @@ -128,6 +136,8 @@ public class DatabaseArchiver implements IPluginArchiver { pluginArchiveFormatters.put("default", new DefaultPluginArchiveFileNameFormatter()); debugArchiver = Boolean.getBoolean("archive.debug.enable"); + compressDatabaseFiles = Boolean + .getBoolean("archive.compression.enable"); } @Override @@ -259,12 +269,9 @@ public class DatabaseArchiver implements IPluginArchiver { .join(archivePath, pluginName, dataStoreFile)); try { - // data must be older than 30 minutes, and no older than - // hours to keep hours need to lookup plugin and see if - // compression matches, or embed in configuration the - // compression level on archive, but would still need to - // lookup plugin - ds.copy(outputDir, compRequired, "lastArchived", 0, 0); + // copy the changed hdf5 file, does repack if + // compRequired, otherwise pure file copy + ds.copy(outputDir, compRequired, null, 0, 0); } catch (StorageException e) { statusHandler.handle(Priority.PROBLEM, e.getLocalizedMessage()); @@ -325,7 +332,11 @@ public class DatabaseArchiver implements IPluginArchiver { path.setLength(path.length() - 3); } int pathDebugLength = path.length(); - path.append(".bin.gz"); + if (compressDatabaseFiles) { + path.append(".bin.gz"); + } else { + path.append(".bin"); + } File file = new File(path.toString()); List pdosToSerialize = entry.getValue(); @@ -338,7 +349,13 @@ public class DatabaseArchiver implements IPluginArchiver { try { // created gzip'd stream - is = new GZIPInputStream(new FileInputStream(file), 8192); + if (compressDatabaseFiles) { + is = new GZIPInputStream(new FileInputStream(file), + CHUNK_SIZE); + } else { + is = new BufferedInputStream(new FileInputStream(file), + CHUNK_SIZE); + } // transform back for list append @SuppressWarnings("unchecked") @@ -400,7 +417,12 @@ public class DatabaseArchiver implements IPluginArchiver { } // created gzip'd stream - os = new GZIPOutputStream(new FileOutputStream(file), 8192); + if (compressDatabaseFiles) { + os = new GZIPOutputStream(new FileOutputStream(file), CHUNK_SIZE); + } else { + os = new BufferedOutputStream(new FileOutputStream(file), + CHUNK_SIZE); + } // Thrift serialize pdo list SerializationUtil.transformToThriftUsingStream(pdosToSerialize, diff --git a/edexOsgi/com.raytheon.uf.edex.archive/utility/common_static/base/archiver/purger/PROCESSED_DATA.xml b/edexOsgi/com.raytheon.uf.edex.archive/utility/common_static/base/archiver/purger/PROCESSED_DATA.xml index 8a6fc621ee..8cc6e745f7 100644 --- a/edexOsgi/com.raytheon.uf.edex.archive/utility/common_static/base/archiver/purger/PROCESSED_DATA.xml +++ b/edexOsgi/com.raytheon.uf.edex.archive/utility/common_static/base/archiver/purger/PROCESSED_DATA.xml @@ -27,6 +27,7 @@ * Aug 05, 2013 2224 rferrel Changes to add dataSet tags. * Oct 01, 2013 2147 rferrel Date time stamp no longer requires an hour field. * Nov 05, 2013 2497 rferrel Change root directory. + * Nov 13, 2013 2549 rferrel Changes to GFE and modelsounding. * * @author rferrel * @version 1.0 @@ -151,7 +152,7 @@ 3,4,5,6 - gfe/(.*)/(Fcst|Official) + gfe/(.*)/(.*) .*_(\d{4})(\d{2})(\d{2})_.* {1} - {2} 3,4,5 @@ -177,11 +178,11 @@ .*-(\d{4})-(\d{2})-(\d{2})-(\d{2})-.* - (modelsounding)/(.*) + (modelsounding)/(.*)/.* (bufrmos)(.*) {1} - {2} 3,4,5,6 - .*(\d{4})-(\d{2})-(\d{2})[-_](\d{2}).* + .*(\d{4})-(\d{2})-(\d{2})-(\d{2}).* diff --git a/edexOsgi/com.raytheon.uf.edex.archive/utility/common_static/base/archiver/purger/RAW_DATA.xml b/edexOsgi/com.raytheon.uf.edex.archive/utility/common_static/base/archiver/purger/RAW_DATA.xml index 8d24cbb9f2..c9aff14e97 100644 --- a/edexOsgi/com.raytheon.uf.edex.archive/utility/common_static/base/archiver/purger/RAW_DATA.xml +++ b/edexOsgi/com.raytheon.uf.edex.archive/utility/common_static/base/archiver/purger/RAW_DATA.xml @@ -219,6 +219,20 @@ 1,2,3,4 + + Radar (Local) + 168 + + radar/([k|t|e|f]\w{3})/.* + radar/(k...|t...|e...|f...)/.*/.* + radar/(k...|t...|e...|f...)/.*/.*/.* + radar/(k...|t...|e...|f...)/.*/.*/.*/.* + radar/(k...|t...|e...|f...)/.*/.*/.*/.*/.* + (\w{4}).(\d*).(\d{4})(\d{2})(\d{2})_(\d{2})(\d{2})(.*) + {1} + 4,5,6,7 + + Satellite 168 diff --git a/edexOsgi/com.raytheon.uf.tools.gfesuite.servicebackup/svcBackup/ServiceBackup/scripts/receive_grids_from_backup_site b/edexOsgi/com.raytheon.uf.tools.gfesuite.servicebackup/svcBackup/ServiceBackup/scripts/receive_grids_from_backup_site index 52b09f1f3e..491b13da00 100644 --- a/edexOsgi/com.raytheon.uf.tools.gfesuite.servicebackup/svcBackup/ServiceBackup/scripts/receive_grids_from_backup_site +++ b/edexOsgi/com.raytheon.uf.tools.gfesuite.servicebackup/svcBackup/ServiceBackup/scripts/receive_grids_from_backup_site @@ -1,126 +1,126 @@ -#!/bin/bash -if [ ${#AWIPS_HOME} = 0 ] -then - path_to_script=`readlink -f $0` - export AWIPS_HOME=$(dirname $(dirname $(dirname $(dirname $path_to_script)))) -fi - -. ${AWIPS_HOME}/GFESuite/ServiceBackup/configuration/svcbu.env - -# Create the log file -logdir=${IFPS_LOG}/`date +%Y%m%d` -logfil=svcbu_receive_grids_from_bksite`date +%H%M` -logfile=${logdir}/${logfil} -[ ! -d ${logdir} ] && (umask 000;mkdir ${logdir}) -touch ${logdir}/${logfil} -exec 1>${logdir}/${logfil} 2>&1 - -# Check the status of the lock file to see if we are OK to proceed -if [ -f ${LOCK_DIR}/importBkSiteGrids ]; -then - log_msg Cannot process grids. - exit 1 -fi - -touch ${LOCK_DIR}/importBkSiteGrids - -log_msg 0 - -# Retrieve the name of the site from the tar file. -import_grd_file=${1} -if [ -a ${import_grd_file} ] -then - - log_msg "Import Grids file is ${import_grd_file}" - mv ${import_grd_file} ${GFESUITE_HOME}/Grd - cd ${GFESUITE_HOME} - - tar xf Grd - if [ $? -eq 0 ]; then - SITE=`cat siteID.txt | tr '[a-z]' '[A-Z]'` - site=`echo $SITE | tr '[A-Z]' '[a-z]'` - gunzip -f ${site}Grd.netcdf.gz - else - # move the file to appropriate directory. - mv -f Grd Grd.netcdf.gz - chmod 777 Grd.netcdf.gz - - log_msg "Gunzipping ${GFESUITE_HOME}/Grd.netcdf.gz" - gunzip -f Grd.netcdf.gz - if [ $? -ne 0 ]; - then - log_msg "ERROR: Could not gunzip ${GFESUITE_HOME}/Grd.netcdf.gz" - rm -f ${LOCK_DIR}/importBkSiteGrids - log_msg 100 - exit 1 - fi - log_msg "Done Gunzipping!" - - log_msg "Finding site-id using ncdump method." - hdr=`mktemp` - ncdump -h ${GFESUITE_HOME}/Grd.netcdf > $hdr - SITE=`grep siteID $hdr | head -1 | cut -d'"' -f2` - site=`echo $SITE | tr '[A-Z]' '[a-z]'` - if [ -z $site ] - then - log_msg "ERROR: Could not find out the site from ncdump method..." - rm -f $hdr - rm -f ${LOCK_DIR}/importBkSiteGrids - log_msg 100 - exit 1 - fi - - mv -f ${GFESUITE_HOME}/Grd.netcdf ${GFESUITE_HOME}/${site}Grd.netcdf - rm -f $hdr - fi - log_msg "site is $site" - - -else - log_msg "Unable to locate the gridded data of the site, ${import_grd_file}" - log_msg "You will need to request your backup site to send grids again." - log_msg 100 - rm -f ${LOCK_DIR}/importBkSiteGrids - exit 1 -fi - - -log_msg 50 -# Check if import file was supplied correctly by msg_send. -import_file=${GFESUITE_HOME}/${site}Grd.netcdf -log_msg "import_file=${import_file}" -SITE=`echo ${SITE} | tr '[a-z]' '[A-Z]'` -if [ -a ${import_file} ] -then - #use iscMosaic to load grids into databases - log_msg "Running iscMosaic to unpack griddded data..." - ${GFESUITE_BIN}/iscMosaic -h $SVCBU_HOST -r $CDSPORT -d ${SITE}_GRID__Restore_00000000_0000 -f ${import_file} -n -x - if [ $? -ne 0 ]; - then - log_msg "ERROR: iscMosaic failed to run correctly. Please re-run iscMosaic manually." - log_msg 100 - rm -f ${LOCK_DIR}/importBkSiteGrids - exit 1 - fi - log_msg "Finished running iscMosaic..." - # Generate a GFE message saying new Grids have arrived in Restore database. - cd ${GFESUITE_BIN} - sendGfeMessage -h ${SVCBU_HOST} -p ${CDSPORT} -u -m "Restore database has been populated with new grids." -else - log_msg "Unable to locate the gridded data of the site,${import_file} You will need to request your backup site to send grids again." - log_msg 100 - rm -f ${LOCK_DIR}/importBkSiteGrids - exit 1 -fi - - -#clean-up. -rm -f ${GFESUITE_HOME}/Grd -rm -f ${GFESUITE_HOME}/${SITE}Grd* siteID.txt -log_msg 100 -rm -f ${LOCK_DIR}/importBkSiteGrids -#ALL well, send a msg and get out of here -log_msg "Importing Grids from backup site is completed. You may start your GFE now." -log_msg "Grids received from backup site are stored in ${SITE}_GRID__Restore_00000000_0000 database." -exit 0 - +#!/bin/bash +if [ ${#AWIPS_HOME} = 0 ] +then + path_to_script=`readlink -f $0` + export AWIPS_HOME=$(dirname $(dirname $(dirname $(dirname $path_to_script)))) +fi + +. ${AWIPS_HOME}/GFESuite/ServiceBackup/configuration/svcbu.env + +# Create the log file +logdir=${IFPS_LOG}/`date +%Y%m%d` +logfil=svcbu_receive_grids_from_bksite`date +%H%M` +logfile=${logdir}/${logfil} +[ ! -d ${logdir} ] && (umask 000;mkdir ${logdir}) +touch ${logdir}/${logfil} +exec 1>${logdir}/${logfil} 2>&1 + +# Check the status of the lock file to see if we are OK to proceed +if [ -f ${LOCK_DIR}/importBkSiteGrids ]; +then + log_msg Cannot process grids. + exit 1 +fi + +touch ${LOCK_DIR}/importBkSiteGrids + +log_msg 0 + +# Retrieve the name of the site from the tar file. +import_grd_file=${1} +if [ -a ${import_grd_file} ] +then + + log_msg "Import Grids file is ${import_grd_file}" + mv ${import_grd_file} ${GFESUITE_HOME}/Grd + cd ${GFESUITE_HOME} + + tar xf Grd + if [ $? -eq 0 ]; then + SITE=`cat siteID.txt | tr '[a-z]' '[A-Z]'` + site=`echo $SITE | tr '[A-Z]' '[a-z]'` + gunzip -f ${site}Grd.netcdf.gz + else + # move the file to appropriate directory. + mv -f Grd Grd.netcdf.gz + chmod 777 Grd.netcdf.gz + + log_msg "Gunzipping ${GFESUITE_HOME}/Grd.netcdf.gz" + gunzip -f Grd.netcdf.gz + if [ $? -ne 0 ]; + then + log_msg "ERROR: Could not gunzip ${GFESUITE_HOME}/Grd.netcdf.gz" + rm -f ${LOCK_DIR}/importBkSiteGrids + log_msg 100 + exit 1 + fi + log_msg "Done Gunzipping!" + + log_msg "Finding site-id using ncdump method." + hdr=`mktemp` + ncdump -h ${GFESUITE_HOME}/Grd.netcdf > $hdr + SITE=`grep siteID $hdr | head -1 | cut -d'"' -f2` + site=`echo $SITE | tr '[A-Z]' '[a-z]'` + if [ -z $site ] + then + log_msg "ERROR: Could not find out the site from ncdump method..." + rm -f $hdr + rm -f ${LOCK_DIR}/importBkSiteGrids + log_msg 100 + exit 1 + fi + + mv -f ${GFESUITE_HOME}/Grd.netcdf ${GFESUITE_HOME}/${site}Grd.netcdf + rm -f $hdr + fi + log_msg "site is $site" + + +else + log_msg "Unable to locate the gridded data of the site, ${import_grd_file}" + log_msg "You will need to request your backup site to send grids again." + log_msg 100 + rm -f ${LOCK_DIR}/importBkSiteGrids + exit 1 +fi + + +log_msg 50 +# Check if import file was supplied correctly by msg_send. +import_file=${GFESUITE_HOME}/${site}Grd.netcdf +log_msg "import_file=${import_file}" +SITE=`echo ${SITE} | tr '[a-z]' '[A-Z]'` +if [ -a ${import_file} ] +then + #use iscMosaic to load grids into databases + log_msg "Running iscMosaic to unpack griddded data..." + ${GFESUITE_BIN}/iscMosaic -h $SVCBU_HOST -r $CDSPORT -d ${SITE}_GRID__Restore_00000000_0000 -f ${import_file} -n -x + if [ $? -ne 0 ]; + then + log_msg "ERROR: iscMosaic failed to run correctly. Please re-run iscMosaic manually." + log_msg 100 + rm -f ${LOCK_DIR}/importBkSiteGrids + exit 1 + fi + log_msg "Finished running iscMosaic..." + # Generate a GFE message saying new Grids have arrived in Restore database. + cd ${GFESUITE_BIN} + ./sendGfeMessage -h ${SVCBU_HOST} -p ${CDSPORT} -u -m "Restore database has been populated with new grids." +else + log_msg "Unable to locate the gridded data of the site,${import_file} You will need to request your backup site to send grids again." + log_msg 100 + rm -f ${LOCK_DIR}/importBkSiteGrids + exit 1 +fi + + +#clean-up. +rm -f ${GFESUITE_HOME}/Grd +rm -f ${GFESUITE_HOME}/${SITE}Grd* siteID.txt +log_msg 100 +rm -f ${LOCK_DIR}/importBkSiteGrids +#ALL well, send a msg and get out of here +log_msg "Importing Grids from backup site is completed. You may start your GFE now." +log_msg "Grids received from backup site are stored in ${SITE}_GRID__Restore_00000000_0000 database." +exit 0 + diff --git a/ncep/gov.noaa.nws.ncep.viz.rsc.ntrans/bin/gov/noaa/nws/ncep/viz/rsc/ntrans/jcgm/LICENSE.txt b/ncep/gov.noaa.nws.ncep.viz.rsc.ntrans/bin/gov/noaa/nws/ncep/viz/rsc/ntrans/jcgm/LICENSE.txt deleted file mode 100644 index 9de7145a20..0000000000 --- a/ncep/gov.noaa.nws.ncep.viz.rsc.ntrans/bin/gov/noaa/nws/ncep/viz/rsc/ntrans/jcgm/LICENSE.txt +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2009, Swiss AviationSoftware Ltd. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -- Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -- Neither the name of the Swiss AviationSoftware Ltd. nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff --git a/rpms/awips2.ade/Installer.eclipse/component.spec.3.6.1 b/rpms/awips2.ade/Installer.eclipse/component.spec.3.6.1 deleted file mode 100644 index 8cd44bd022..0000000000 --- a/rpms/awips2.ade/Installer.eclipse/component.spec.3.6.1 +++ /dev/null @@ -1,178 +0,0 @@ -# -# AWIPS II Eclipse Spec File -# - -# --define arguments: -# %{_uframe_eclipse} -# %{_build_root} -# %{_baseline_workspace} - -Name: awips2-eclipse -Summary: AWIPS II Eclipse Distribution -Version: 3.6.1 -Release: 1 -Group: AWIPSII -BuildRoot: %{_build_root} -URL: N/A -License: N/A -Distribution: N/A -Vendor: Raytheon -Packager: Bryan Kowal - -AutoReq: no -provides: awips2-eclipse - -%description -AWIPS II Eclipse Distribution - Contains the AWIPS II Eclipse Distribution. - -# Turn off the brp-python-bytecompile script -%global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g') -%global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-java-repack-jars[[:space:]].*$!!g') - -%prep -# Verify That The User Has Specified A BuildRoot. -if [ "%{_build_root}" = "/tmp" ] -then - echo "An Actual BuildRoot Must Be Specified. Use The --buildroot Parameter." - echo "Unable To Continue ... Terminating" - exit 1 -fi - -if [ -d %{_build_root} ]; then - rm -rf %{_build_root} -fi -mkdir -p %{_build_root}/awips2/eclipse - -%build - -%install -mkdir -p %{_build_root}/awips2/eclipse -# The location of the awips2 eclipse source directory will be -# specified as a command line argument. Fail if the specified -# directory cannot be found. -if [ ! -d %{_uframe_eclipse} ]; then - echo "ERROR: Unable To Find The AWIPS II Eclipse Distribution." - echo "Unable To Continue ... Terminating" - exit 1 -fi - -# Copy the uframe eclipse distribution. -cp -r %{_uframe_eclipse}/* %{_build_root}/awips2/eclipse - -# Copy eclipse.sh to our build-directory. -cp %{_baseline_workspace}/rpms/awips2.ade/Installer.eclipse/scripts/* \ - %{_build_root}/awips2/eclipse - -# delete the basemaps and etc links -rm -f %{_build_root}/awips2/eclipse/basemaps -rm -f %{_build_root}/awips2/eclipse/etc - -%pre -JAVA_INSTALL="" -PYTHON_INSTALL="" -ANT_INSTALL="" - -INSTALL_PATH="/awips2/java" -if [ -d ${INSTALL_PATH} ]; then - JAVA_INSTALL=${INSTALL_PATH} -fi - -INSTALL_PATH="/awips2/python" -if [ -d ${INSTALL_PATH} ]; then - PYTHON_INSTALL=${INSTALL_PATH} -fi - -INSTALL_PATH="/awips2/ant" -if [ -d ${INSTALL_PATH} ]; then - ANT_INSTALL=${INSTALL_PATH} -fi - -echo -e "\e[1;34m--------------------------------------------------------------------------------\e[m" -echo -e "\e[1;34m\| Installing the AWIPS II Eclipse Distribution...\e[m" -echo -e "\e[1;34m--------------------------------------------------------------------------------\e[m" -echo -e "\e[1;34m Java Detected At: ${JAVA_INSTALL}\e[m" -echo -e "\e[1;34m Python Detected At: ${PYTHON_INSTALL}\e[m" -echo -e "\e[1;34m Ant Detected At: ${ANT_INSTALL}\e[m" - -%post -echo -e "\e[1;34m--------------------------------------------------------------------------------\e[m" -echo -e "\e[1;34m\| Creating ADE Eclipse Desktop Shortcut...\e[m" -echo -e "\e[1;34m--------------------------------------------------------------------------------\e[m" -ADE_ECLIPSE_SHORTCUT="ade-eclipse" -SHORTCUT_OWNER="${USER}" -CREATE_SHORTCUT="true" -if [ ! "${SUDO_USER}" = "" ]; then - SHORTCUT_OWNER="${SUDO_USER}" -fi -echo -e "\e[1;34m Creating Shortcut For User: ${SHORTCUT_OWNER}\e[m" - -USER_HOME_DIR="~${SHORTCUT_OWNER}" -if [ ! -d ${USER_HOME_DIR} ]; then - USER_HOME_DIR="/home/${SHORTCUT_OWNER}" - echo " (Assuming User Home Directory Is Under '/home')" -fi - -if [ ! -d ${USER_HOME_DIR}/Desktop ]; then - echo -e "\e[1;31m ERROR: Unable To Find The User's Desktop!!!" - CREATE_SHORTCUT="false" -fi - -if [ "${CREATE_SHORTCUT}" = "true" ]; then - SHORTCUT_TMP="${USER_HOME_DIR}/Desktop/${ADE_ECLIPSE_SHORTCUT}.tmp" - SHORTCUT="${USER_HOME_DIR}/Desktop/${ADE_ECLIPSE_SHORTCUT}.desktop" - - if [ -f ${SHORTCUT} ]; then - echo -n " Attempting To Remove The Existing Shortcut ... " - sudo -u ${SHORTCUT_OWNER} rm -f ${SHORTCUT} - if [ ! -f ${SHORTCUT} ]; then - echo -n "SUCCESS" - else - echo -n "FAILURE" - fi - echo "" - fi - sudo -u ${SHORTCUT_OWNER} touch ${SHORTCUT_TMP} - sudo -u ${SHORTCUT_OWNER} chmod 666 ${SHORTCUT_TMP} - - echo "[Desktop Entry]" >> ${SHORTCUT_TMP} - echo "Version=1.0" >> ${SHORTCUT_TMP} - echo "Encoding=UTF-8" >> ${SHORTCUT_TMP} - echo "Name=ADE Eclipse" >> ${SHORTCUT_TMP} - echo "GenericName=Eclipse" >> ${SHORTCUT_TMP} - echo "Comment=IDE" >> ${SHORTCUT_TMP} - echo "Exec=/bin/bash -i -c \"xterm -title 'AWIPS II ADE Eclipse' -e '/awips2/eclipse/eclipseShortcutWrap.sh'\"" >> ${SHORTCUT_TMP} - echo "Icon=/awips2/eclipse/icon.xpm" >> ${SHORTCUT_TMP} - echo "Terminal=false" >> ${SHORTCUT_TMP} - echo "Type=Application" >> ${SHORTCUT_TMP} - echo "Categories=Development;IDE;" >> ${SHORTCUT_TMP} - - sudo -u ${SHORTCUT_OWNER} mv ${SHORTCUT_TMP} ${SHORTCUT} - sudo -u ${SHORTCUT_OWNER} chmod 644 ${SHORTCUT} -fi - -echo -e "\e[1;32m--------------------------------------------------------------------------------\e[m" -echo -e "\e[1;32m\| AWIPS II Eclipse Distribution Installation - COMPLETE\e[m" -echo -e "\e[1;32m--------------------------------------------------------------------------------\e[m" - -%preun - -%postun - -%clean -rm -rf ${RPM_BUILD_ROOT} - -%files -%defattr(644,awips,fxalpha,755) -%dir /awips2/eclipse -/awips2/eclipse/* -%defattr(755,awips,fxalpha,755) -/awips2/eclipse/about.html -/awips2/eclipse/artifacts.xml -/awips2/eclipse/eclipse -/awips2/eclipse/eclipse.ini -/awips2/eclipse/eclipse.sh -/awips2/eclipse/eclipseShortcutWrap.sh -/awips2/eclipse/epl-v10.html -/awips2/eclipse/icon.xpm -/awips2/eclipse/libcairo-swt.so -/awips2/eclipse/notice.html diff --git a/rpms/awips2.qpid/0.18/SOURCES/qpid-wrapper b/rpms/awips2.qpid/0.18/SOURCES/qpid-wrapper index ff2c960ba1..5a909eb315 100644 --- a/rpms/awips2.qpid/0.18/SOURCES/qpid-wrapper +++ b/rpms/awips2.qpid/0.18/SOURCES/qpid-wrapper @@ -48,4 +48,4 @@ if [ ! -f ${JAVA} ]; then exit 1 fi -$JAVA -jar ${QPID_HOME}/bin/yajsw/wrapper.jar -c ${QPID_HOME}/conf/${CONF_FILE} +$JAVA -Xmx32m -XX:MaxPermSize=12m -XX:ReservedCodeCacheSize=4m -jar ${QPID_HOME}/bin/yajsw/wrapper.jar -c ${QPID_HOME}/conf/${CONF_FILE} diff --git a/rpms/awips2.qpid/0.18/SPECS/qpid-java.spec.patch0 b/rpms/awips2.qpid/0.18/SPECS/qpid-java.spec.patch0 index c36b7701ff..1c29240b03 100644 --- a/rpms/awips2.qpid/0.18/SPECS/qpid-java.spec.patch0 +++ b/rpms/awips2.qpid/0.18/SPECS/qpid-java.spec.patch0 @@ -14,7 +14,7 @@ diff -crB a/qpid-java.spec b/qpid-java.spec ! ! Name: awips2-qpid-java Version: 0.18 -! Release: 3%{?dist} +! Release: 4%{?dist} Summary: Java implementation of Apache Qpid License: Apache Software License Group: Development/Java diff --git a/rpms/build/x86_64/build.sh b/rpms/build/x86_64/build.sh index 61241b182d..5999951816 100644 --- a/rpms/build/x86_64/build.sh +++ b/rpms/build/x86_64/build.sh @@ -400,14 +400,16 @@ if [ "${1}" = "-viz" ]; then buildRPM "awips2" buildRPM "awips2-common-base" #buildRPM "awips2-python-dynamicserialize" - buildRPM "awips2-python" - buildRPM "awips2-adapt-native" + buildRPM "awips2-gfesuite-client" + buildRPM "awips2-gfesuite-server" + #buildRPM "awips2-python" + #buildRPM "awips2-adapt-native" #unpackHttpdPypies #if [ $? -ne 0 ]; then # exit 1 #fi #buildRPM "awips2-httpd-pypies" - buildRPM "awips2-hydroapps-shared" + #buildRPM "awips2-hydroapps-shared" #buildRPM "awips2-rcm" #buildRPM "awips2-tools" #buildRPM "awips2-cli"